lollms-client 0.27.0__tar.gz → 0.27.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (100) hide show
  1. {lollms_client-0.27.0 → lollms_client-0.27.2}/PKG-INFO +1 -1
  2. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/__init__.py +1 -1
  3. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_core.py +26 -4
  4. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_discussion.py +72 -2
  5. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_llm_binding.py +3 -2
  6. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client.egg-info/PKG-INFO +1 -1
  7. {lollms_client-0.27.0 → lollms_client-0.27.2}/LICENSE +0 -0
  8. {lollms_client-0.27.0 → lollms_client-0.27.2}/README.md +0 -0
  9. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/article_summary/article_summary.py +0 -0
  10. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/console_discussion/console_app.py +0 -0
  11. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/console_discussion.py +0 -0
  12. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/deep_analyze/deep_analyse.py +0 -0
  13. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  14. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/function_calling_with_local_custom_mcp.py +0 -0
  15. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  16. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/generate_and_speak/generate_and_speak.py +0 -0
  17. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  18. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/generate_text_with_multihop_rag_example.py +0 -0
  19. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/gradio_chat_app.py +0 -0
  20. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/gradio_lollms_chat.py +0 -0
  21. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/internet_search_with_rag.py +0 -0
  22. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/lollms_discussions_test.py +0 -0
  23. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/mcp_examples/external_mcp.py +0 -0
  24. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/mcp_examples/local_mcp.py +0 -0
  25. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/mcp_examples/openai_mcp.py +0 -0
  26. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
  27. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
  28. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/simple_text_gen_test.py +0 -0
  29. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/simple_text_gen_with_image_test.py +0 -0
  30. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/test_local_models/local_chat.py +0 -0
  31. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/text_2_audio.py +0 -0
  32. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/text_2_image.py +0 -0
  33. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/text_2_image_diffusers.py +0 -0
  34. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/text_and_image_2_audio.py +0 -0
  35. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/text_gen.py +0 -0
  36. {lollms_client-0.27.0 → lollms_client-0.27.2}/examples/text_gen_system_prompt.py +0 -0
  37. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/__init__.py +0 -0
  38. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
  39. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/claude/__init__.py +0 -0
  40. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  41. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/grok/__init__.py +0 -0
  42. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/groq/__init__.py +0 -0
  43. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
  44. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  45. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  46. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  47. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/mistral/__init__.py +0 -0
  48. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  49. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/open_router/__init__.py +0 -0
  50. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  51. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  52. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  53. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  54. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  55. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  56. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_config.py +0 -0
  57. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_js_analyzer.py +0 -0
  58. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_mcp_binding.py +0 -0
  59. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_personality.py +0 -0
  60. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_python_analyzer.py +0 -0
  61. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_stt_binding.py +0 -0
  62. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_tti_binding.py +0 -0
  63. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_ttm_binding.py +0 -0
  64. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_tts_binding.py +0 -0
  65. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_ttv_binding.py +0 -0
  66. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_types.py +0 -0
  67. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/lollms_utilities.py +0 -0
  68. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  69. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  70. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  71. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  72. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  73. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  74. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  75. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/stt_bindings/__init__.py +0 -0
  76. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  77. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  78. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  79. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tti_bindings/__init__.py +0 -0
  80. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  81. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  82. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  83. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  84. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/ttm_bindings/__init__.py +0 -0
  85. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  86. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  87. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  88. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tts_bindings/__init__.py +0 -0
  89. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  90. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  91. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  92. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  93. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/ttv_bindings/__init__.py +0 -0
  94. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  95. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client.egg-info/SOURCES.txt +0 -0
  96. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client.egg-info/dependency_links.txt +0 -0
  97. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client.egg-info/requires.txt +0 -0
  98. {lollms_client-0.27.0 → lollms_client-0.27.2}/lollms_client.egg-info/top_level.txt +0 -0
  99. {lollms_client-0.27.0 → lollms_client-0.27.2}/pyproject.toml +0 -0
  100. {lollms_client-0.27.0 → lollms_client-0.27.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.27.0
3
+ Version: 0.27.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "0.27.0" # Updated version
11
+ __version__ = "0.27.2" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -280,6 +280,13 @@ class LollmsClient():
280
280
  available = self.binding_manager.get_available_bindings()
281
281
  raise ValueError(f"Failed to update LLM binding: {binding_name}. Available: {available}")
282
282
 
283
+ def get_ctx_size(self, model_name=None):
284
+ if self.binding:
285
+ ctx_size = self.binding.get_ctx_size(model_name)
286
+ return ctx_size if ctx_size else self.default_ctx_size
287
+ else:
288
+ return None
289
+
283
290
  def update_tts_binding(self, binding_name: str, config: Optional[Dict[str, Any]] = None):
284
291
  """Update the TTS binding with a new configuration."""
285
292
  self.tts = self.tts_binding_manager.create_binding(
@@ -1584,6 +1591,11 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1584
1591
  "description": """Generates and stores code into a buffer to be used by another tool. You can put the uuid of the generated code into the fields that require long code among the tools. If no tool requires code as input do not use put_code_in_buffer. put_code_in_buffer do not execute the code nor does it audit it.""",
1585
1592
  "input_schema": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed natural language description of the code's purpose and requirements."}, "language": {"type": "string", "description": "The programming language of the generated code. By default it uses python."}}, "required": ["prompt"]}
1586
1593
  })
1594
+ available_tools.append({
1595
+ "name": "view_generated_code",
1596
+ "description": """Views the code that was generated and stored to the buffer. You need to have a valid uuid of the generated code.""",
1597
+ "input_schema": {"type": "object", "properties": {"code_id": {"type": "string", "description": "The case sensitive uuid of the generated code."}}, "required": ["uuid"]}
1598
+ })
1587
1599
  # Add the new refactor_scratchpad tool definition
1588
1600
  available_tools.append({
1589
1601
  "name": "refactor_scratchpad",
@@ -1595,7 +1607,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1595
1607
  formatted_tools_list += "\n**request_clarification**:\nUse if the user's request is ambiguous and you can not infer a clear idea of his intent. this tool has no parameters."
1596
1608
  formatted_tools_list += "\n**final_answer**:\nUse when you are ready to respond to the user. this tool has no parameters."
1597
1609
 
1598
- if discovery_step_id: log_event("**Discovering tools**",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1610
+ if discovery_step_id: log_event(f"**Discovering tools** found {len(available_tools)} tools",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1599
1611
 
1600
1612
  # --- 2. Dynamic Reasoning Loop ---
1601
1613
  for i in range(max_reasoning_steps):
@@ -1658,7 +1670,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1658
1670
 
1659
1671
 
1660
1672
  current_scratchpad += f"\n\n### Step {i+1}: Thought\n{thought}"
1661
- log_event(f"**Thought**:\n{thought}", MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT)
1673
+ log_event(f"{thought}", MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT)
1662
1674
 
1663
1675
  if not tool_name:
1664
1676
  # Handle error...
@@ -1691,10 +1703,20 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1691
1703
  tool_calls_this_turn.append({"name": "put_code_in_buffer", "params": tool_params, "result": tool_result})
1692
1704
  observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
1693
1705
  current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1694
- log_event(f"**Observation**:Code generated with ID: {code_uuid}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1706
+ log_event(f"Code generated with ID: {code_uuid}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1695
1707
  if code_gen_id: log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
1696
1708
  if reasoning_step_id: log_event(f"**Reasoning Step {i+1}/{max_reasoning_steps}**", MSG_TYPE.MSG_TYPE_STEP_END, event_id= reasoning_step_id)
1697
1709
  continue # Go to the next reasoning step immediately
1710
+ if tool_name == 'view_generated_code':
1711
+ code_id = tool_params.get("code_id")
1712
+ if code_id:
1713
+ tool_result = {"status": "success", "code_id": code_id, "generated_code":generated_code_store[code_uuid]}
1714
+ else:
1715
+ tool_result = {"status": "error", "code_id": code_id, "error":"Unknown uuid"}
1716
+ observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
1717
+ current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1718
+ log_event(f"Result from `{tool_name}`:\n```\n{generated_code_store[code_uuid]}\n```\n", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
1719
+ continue
1698
1720
  if tool_name == 'refactor_scratchpad':
1699
1721
  scratchpad_cleaning_prompt = f"""Enhance this scratchpad content to be more organized and comprehensive. Keep relevant experience information and remove any useless redundancies. Try to log learned things from the context so that you won't make the same mistakes again. Do not remove the main objective information or any crucial information that may be useful for the next iterations. Answer directly with the new scratchpad content without any comments.
1700
1722
  --- YOUR INTERNAL SCRATCHPAD (Work History & Analysis) ---
@@ -1755,7 +1777,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1755
1777
 
1756
1778
  tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result})
1757
1779
  current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1758
- log_event(f"**Observation**: Result from `{tool_name}`:\n{dict_to_markdown(sanitized_result)}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1780
+ log_event(f"Result from `{tool_name}`:\n{dict_to_markdown(sanitized_result)}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1759
1781
 
1760
1782
  if reasoning_step_id: log_event(f"**Reasoning Step {i+1}/{max_reasoning_steps}**", MSG_TYPE.MSG_TYPE_STEP_END, event_id = reasoning_step_id)
1761
1783
  except Exception as ex:
@@ -333,7 +333,12 @@ class LollmsMessage:
333
333
  def __repr__(self) -> str:
334
334
  """Provides a developer-friendly representation of the message."""
335
335
  return f"<LollmsMessage id={self.id} sender='{self.sender}'>"
336
-
336
+
337
+ def set_metadata_item(self, itemname:str, item_value, discussion):
338
+ new_metadata = (self.metadata or {}).copy()
339
+ new_metadata[itemname] = item_value
340
+ self.metadata = new_metadata
341
+ discussion.commit()
337
342
 
338
343
  class LollmsDiscussion:
339
344
  """Represents and manages a single discussion.
@@ -367,6 +372,7 @@ class LollmsDiscussion:
367
372
  object.__setattr__(self, 'autosave', autosave)
368
373
  object.__setattr__(self, 'max_context_size', max_context_size)
369
374
  object.__setattr__(self, 'scratchpad', "")
375
+ object.__setattr__(self, 'images', [])
370
376
 
371
377
  # Internal state
372
378
  object.__setattr__(self, '_session', None)
@@ -1043,6 +1049,61 @@ class LollmsDiscussion:
1043
1049
  self.touch()
1044
1050
  print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
1045
1051
 
1052
+ def count_discussion_tokens(self, format_type: str, branch_tip_id: Optional[str] = None) -> int:
1053
+ """Counts the number of tokens in the exported discussion content.
1054
+
1055
+ This method exports the discussion in the specified format and then uses
1056
+ the lollmsClient's tokenizer to count the tokens in the resulting text.
1057
+
1058
+ Args:
1059
+ format_type: The target format (e.g., "lollms_text", "openai_chat").
1060
+ branch_tip_id: The ID of the message to use as the end of the context.
1061
+ Defaults to the active branch ID.
1062
+
1063
+ Returns:
1064
+ The total number of tokens.
1065
+ """
1066
+ exported_content = self.export(format_type, branch_tip_id)
1067
+
1068
+ text_to_count = ""
1069
+ if isinstance(exported_content, str):
1070
+ text_to_count = exported_content
1071
+ elif isinstance(exported_content, list):
1072
+ # Handle list of dicts (OpenAI/Ollama format)
1073
+ full_content = []
1074
+ for message in exported_content:
1075
+ content = message.get("content")
1076
+ if isinstance(content, str):
1077
+ full_content.append(content)
1078
+ elif isinstance(content, list): # Handle OpenAI content parts
1079
+ for part in content:
1080
+ if part.get("type") == "text":
1081
+ full_content.append(part.get("text", ""))
1082
+ text_to_count = "\n".join(full_content)
1083
+
1084
+ return self.lollmsClient.count_tokens(text_to_count)
1085
+
1086
+ def get_context_status(self, branch_tip_id: Optional[str] = None) -> Dict[str, Optional[int]]:
1087
+ """Returns the current token count and the maximum context size.
1088
+
1089
+ This provides a snapshot of the context usage, taking into account
1090
+ any non-destructive pruning that has occurred. The token count is
1091
+ based on the "lollms_text" export format, which is the format used
1092
+ for pruning calculations.
1093
+
1094
+ Args:
1095
+ branch_tip_id: The ID of the message branch to measure. Defaults
1096
+ to the active branch.
1097
+
1098
+ Returns:
1099
+ A dictionary with 'current_tokens' and 'max_tokens'.
1100
+ """
1101
+ current_tokens = self.count_discussion_tokens("lollms_text", branch_tip_id)
1102
+ return {
1103
+ "current_tokens": current_tokens,
1104
+ "max_tokens": self.max_context_size
1105
+ }
1106
+
1046
1107
  def switch_to_branch(self, branch_id):
1047
1108
  self.active_branch_id = branch_id
1048
1109
 
@@ -1060,8 +1121,17 @@ class LollmsDiscussion:
1060
1121
  }"""
1061
1122
  infos = self.lollmsClient.generate_code(prompt = prompt, template = template)
1062
1123
  discussion_title = robust_json_parser(infos)["title"]
1063
- self.metadata['title'] = discussion_title
1124
+ new_metadata = (self.metadata or {}).copy()
1125
+ new_metadata['title'] = discussion_title
1126
+
1127
+ self.metadata = new_metadata
1064
1128
  self.commit()
1065
1129
  return discussion_title
1066
1130
  except Exception as ex:
1067
1131
  trace_exception(ex)
1132
+
1133
+ def set_metadata_item(self, itemname:str, item_value):
1134
+ new_metadata = (self.metadata or {}).copy()
1135
+ new_metadata[itemname] = item_value
1136
+ self.metadata = new_metadata
1137
+ self.commit()
@@ -115,8 +115,9 @@ class LollmsLLMBinding(ABC):
115
115
  """
116
116
  pass
117
117
 
118
- def get_ctx_size(self, model_name):
119
- return 32000
118
+ def get_ctx_size(self, model_name=None):
119
+ # if model_name is none use current model name
120
+ return None
120
121
 
121
122
 
122
123
  @abstractmethod
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.27.0
3
+ Version: 0.27.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
File without changes
File without changes
File without changes