lollms-client 0.27.1__py3-none-any.whl → 0.27.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "0.27.1" # Updated version
11
+ __version__ = "0.27.2" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -280,6 +280,13 @@ class LollmsClient():
280
280
  available = self.binding_manager.get_available_bindings()
281
281
  raise ValueError(f"Failed to update LLM binding: {binding_name}. Available: {available}")
282
282
 
283
+ def get_ctx_size(self, model_name=None):
284
+ if self.binding:
285
+ ctx_size = self.binding.get_ctx_size(model_name)
286
+ return ctx_size if ctx_size else self.default_ctx_size
287
+ else:
288
+ return None
289
+
283
290
  def update_tts_binding(self, binding_name: str, config: Optional[Dict[str, Any]] = None):
284
291
  """Update the TTS binding with a new configuration."""
285
292
  self.tts = self.tts_binding_manager.create_binding(
@@ -373,10 +380,6 @@ class LollmsClient():
373
380
  pass
374
381
 
375
382
  # --- Core LLM Binding Methods ---
376
- def get_ctx_size(self, model_name):
377
- return self.binding.get_ctx_size(model_name)
378
-
379
-
380
383
  def tokenize(self, text: str) -> list:
381
384
  """
382
385
  Tokenize text using the active LLM binding.
@@ -1588,6 +1591,11 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1588
1591
  "description": """Generates and stores code into a buffer to be used by another tool. You can put the uuid of the generated code into the fields that require long code among the tools. If no tool requires code as input do not use put_code_in_buffer. put_code_in_buffer do not execute the code nor does it audit it.""",
1589
1592
  "input_schema": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed natural language description of the code's purpose and requirements."}, "language": {"type": "string", "description": "The programming language of the generated code. By default it uses python."}}, "required": ["prompt"]}
1590
1593
  })
1594
+ available_tools.append({
1595
+ "name": "view_generated_code",
1596
+ "description": """Views the code that was generated and stored to the buffer. You need to have a valid uuid of the generated code.""",
1597
+ "input_schema": {"type": "object", "properties": {"code_id": {"type": "string", "description": "The case sensitive uuid of the generated code."}}, "required": ["uuid"]}
1598
+ })
1591
1599
  # Add the new refactor_scratchpad tool definition
1592
1600
  available_tools.append({
1593
1601
  "name": "refactor_scratchpad",
@@ -1599,7 +1607,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1599
1607
  formatted_tools_list += "\n**request_clarification**:\nUse if the user's request is ambiguous and you can not infer a clear idea of his intent. this tool has no parameters."
1600
1608
  formatted_tools_list += "\n**final_answer**:\nUse when you are ready to respond to the user. this tool has no parameters."
1601
1609
 
1602
- if discovery_step_id: log_event("**Discovering tools**",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1610
+ if discovery_step_id: log_event(f"**Discovering tools** found {len(available_tools)} tools",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1603
1611
 
1604
1612
  # --- 2. Dynamic Reasoning Loop ---
1605
1613
  for i in range(max_reasoning_steps):
@@ -1699,6 +1707,16 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1699
1707
  if code_gen_id: log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
1700
1708
  if reasoning_step_id: log_event(f"**Reasoning Step {i+1}/{max_reasoning_steps}**", MSG_TYPE.MSG_TYPE_STEP_END, event_id= reasoning_step_id)
1701
1709
  continue # Go to the next reasoning step immediately
1710
+ if tool_name == 'view_generated_code':
1711
+ code_id = tool_params.get("code_id")
1712
+ if code_id:
1713
+ tool_result = {"status": "success", "code_id": code_id, "generated_code":generated_code_store[code_uuid]}
1714
+ else:
1715
+ tool_result = {"status": "error", "code_id": code_id, "error":"Unknown uuid"}
1716
+ observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
1717
+ current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1718
+ log_event(f"Result from `{tool_name}`:\n```\n{generated_code_store[code_uuid]}\n```\n", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
1719
+ continue
1702
1720
  if tool_name == 'refactor_scratchpad':
1703
1721
  scratchpad_cleaning_prompt = f"""Enhance this scratchpad content to be more organized and comprehensive. Keep relevant experience information and remove any useless redundancies. Try to log learned things from the context so that you won't make the same mistakes again. Do not remove the main objective information or any crucial information that may be useful for the next iterations. Answer directly with the new scratchpad content without any comments.
1704
1722
  --- YOUR INTERNAL SCRATCHPAD (Work History & Analysis) ---
@@ -372,6 +372,7 @@ class LollmsDiscussion:
372
372
  object.__setattr__(self, 'autosave', autosave)
373
373
  object.__setattr__(self, 'max_context_size', max_context_size)
374
374
  object.__setattr__(self, 'scratchpad', "")
375
+ object.__setattr__(self, 'images', [])
375
376
 
376
377
  # Internal state
377
378
  object.__setattr__(self, '_session', None)
@@ -1048,6 +1049,61 @@ class LollmsDiscussion:
1048
1049
  self.touch()
1049
1050
  print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
1050
1051
 
1052
+ def count_discussion_tokens(self, format_type: str, branch_tip_id: Optional[str] = None) -> int:
1053
+ """Counts the number of tokens in the exported discussion content.
1054
+
1055
+ This method exports the discussion in the specified format and then uses
1056
+ the lollmsClient's tokenizer to count the tokens in the resulting text.
1057
+
1058
+ Args:
1059
+ format_type: The target format (e.g., "lollms_text", "openai_chat").
1060
+ branch_tip_id: The ID of the message to use as the end of the context.
1061
+ Defaults to the active branch ID.
1062
+
1063
+ Returns:
1064
+ The total number of tokens.
1065
+ """
1066
+ exported_content = self.export(format_type, branch_tip_id)
1067
+
1068
+ text_to_count = ""
1069
+ if isinstance(exported_content, str):
1070
+ text_to_count = exported_content
1071
+ elif isinstance(exported_content, list):
1072
+ # Handle list of dicts (OpenAI/Ollama format)
1073
+ full_content = []
1074
+ for message in exported_content:
1075
+ content = message.get("content")
1076
+ if isinstance(content, str):
1077
+ full_content.append(content)
1078
+ elif isinstance(content, list): # Handle OpenAI content parts
1079
+ for part in content:
1080
+ if part.get("type") == "text":
1081
+ full_content.append(part.get("text", ""))
1082
+ text_to_count = "\n".join(full_content)
1083
+
1084
+ return self.lollmsClient.count_tokens(text_to_count)
1085
+
1086
+ def get_context_status(self, branch_tip_id: Optional[str] = None) -> Dict[str, Optional[int]]:
1087
+ """Returns the current token count and the maximum context size.
1088
+
1089
+ This provides a snapshot of the context usage, taking into account
1090
+ any non-destructive pruning that has occurred. The token count is
1091
+ based on the "lollms_text" export format, which is the format used
1092
+ for pruning calculations.
1093
+
1094
+ Args:
1095
+ branch_tip_id: The ID of the message branch to measure. Defaults
1096
+ to the active branch.
1097
+
1098
+ Returns:
1099
+ A dictionary with 'current_tokens' and 'max_tokens'.
1100
+ """
1101
+ current_tokens = self.count_discussion_tokens("lollms_text", branch_tip_id)
1102
+ return {
1103
+ "current_tokens": current_tokens,
1104
+ "max_tokens": self.max_context_size
1105
+ }
1106
+
1051
1107
  def switch_to_branch(self, branch_id):
1052
1108
  self.active_branch_id = branch_id
1053
1109
 
@@ -1078,4 +1134,4 @@ class LollmsDiscussion:
1078
1134
  new_metadata = (self.metadata or {}).copy()
1079
1135
  new_metadata[itemname] = item_value
1080
1136
  self.metadata = new_metadata
1081
- self.commit()
1137
+ self.commit()
@@ -115,8 +115,9 @@ class LollmsLLMBinding(ABC):
115
115
  """
116
116
  pass
117
117
 
118
- def get_ctx_size(self, model_name):
119
- return 32000
118
+ def get_ctx_size(self, model_name=None):
119
+ # if model_name is none use current model name
120
+ return None
120
121
 
121
122
 
122
123
  @abstractmethod
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.27.1
3
+ Version: 0.27.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -26,12 +26,12 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
26
26
  examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
27
27
  examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
28
28
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
29
- lollms_client/__init__.py,sha256=sUL95Hk1acM0CHbqz-eQZDE39r9R7dkV4Q6uK18_5IE,1147
29
+ lollms_client/__init__.py,sha256=FNukbXeOwWGP1i58B6V4_LtjA1QumGvROeGjoxK2wBs,1147
30
30
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
31
- lollms_client/lollms_core.py,sha256=x1RF4EZfEqawBtU7iJKMV3JiwwlYK_8AbwZJehHj4kc,163851
32
- lollms_client/lollms_discussion.py,sha256=aGjjAuRUaPaZvVh8sRWUrSYH8pEGa6j9iM9mTGIqdHQ,49633
31
+ lollms_client/lollms_core.py,sha256=SuU5HpBHXkG9GGpSMRHiRA-qSDBFNEDis3V9ZaV_Tvw,165323
32
+ lollms_client/lollms_discussion.py,sha256=tvANNvpTkUr4L6GKowosIyfV7l3SA6cXnzElt36e2s8,52133
33
33
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
34
- lollms_client/lollms_llm_binding.py,sha256=vtX158AWHSiUUDh7UU6BmyomhB3IXBTrxvGIXcADZfA,12391
34
+ lollms_client/lollms_llm_binding.py,sha256=eBRbiTLsaB-g-UwB2JQTdzTQ6qFvDZk14viDdcxy-ck,12451
35
35
  lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
36
36
  lollms_client/lollms_personality.py,sha256=dILUI5DZdzJ3NDDQiIsK2UptVF-jZK3XYXZ2bpXP_ew,8035
37
37
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
@@ -88,8 +88,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
88
88
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
89
89
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
90
90
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
- lollms_client-0.27.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
92
- lollms_client-0.27.1.dist-info/METADATA,sha256=4QW25XghKUQNEX3y_RFrE9me4SP_efyvet4Z-76h4rc,25778
93
- lollms_client-0.27.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
94
- lollms_client-0.27.1.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
95
- lollms_client-0.27.1.dist-info/RECORD,,
91
+ lollms_client-0.27.2.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
92
+ lollms_client-0.27.2.dist-info/METADATA,sha256=PLlEliNSNJfH4IkjgrxdrTaNW2QEoEWuF91u2zCszbI,25778
93
+ lollms_client-0.27.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
94
+ lollms_client-0.27.2.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
95
+ lollms_client-0.27.2.dist-info/RECORD,,