lollms-client 0.27.0__py3-none-any.whl → 0.27.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +26 -4
- lollms_client/lollms_discussion.py +72 -2
- lollms_client/lollms_llm_binding.py +3 -2
- {lollms_client-0.27.0.dist-info → lollms_client-0.27.2.dist-info}/METADATA +1 -1
- {lollms_client-0.27.0.dist-info → lollms_client-0.27.2.dist-info}/RECORD +9 -9
- {lollms_client-0.27.0.dist-info → lollms_client-0.27.2.dist-info}/WHEEL +0 -0
- {lollms_client-0.27.0.dist-info → lollms_client-0.27.2.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.27.0.dist-info → lollms_client-0.27.2.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "0.27.
|
|
11
|
+
__version__ = "0.27.2" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
lollms_client/lollms_core.py
CHANGED
|
@@ -280,6 +280,13 @@ class LollmsClient():
|
|
|
280
280
|
available = self.binding_manager.get_available_bindings()
|
|
281
281
|
raise ValueError(f"Failed to update LLM binding: {binding_name}. Available: {available}")
|
|
282
282
|
|
|
283
|
+
def get_ctx_size(self, model_name=None):
|
|
284
|
+
if self.binding:
|
|
285
|
+
ctx_size = self.binding.get_ctx_size(model_name)
|
|
286
|
+
return ctx_size if ctx_size else self.default_ctx_size
|
|
287
|
+
else:
|
|
288
|
+
return None
|
|
289
|
+
|
|
283
290
|
def update_tts_binding(self, binding_name: str, config: Optional[Dict[str, Any]] = None):
|
|
284
291
|
"""Update the TTS binding with a new configuration."""
|
|
285
292
|
self.tts = self.tts_binding_manager.create_binding(
|
|
@@ -1584,6 +1591,11 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1584
1591
|
"description": """Generates and stores code into a buffer to be used by another tool. You can put the uuid of the generated code into the fields that require long code among the tools. If no tool requires code as input do not use put_code_in_buffer. put_code_in_buffer do not execute the code nor does it audit it.""",
|
|
1585
1592
|
"input_schema": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed natural language description of the code's purpose and requirements."}, "language": {"type": "string", "description": "The programming language of the generated code. By default it uses python."}}, "required": ["prompt"]}
|
|
1586
1593
|
})
|
|
1594
|
+
available_tools.append({
|
|
1595
|
+
"name": "view_generated_code",
|
|
1596
|
+
"description": """Views the code that was generated and stored to the buffer. You need to have a valid uuid of the generated code.""",
|
|
1597
|
+
"input_schema": {"type": "object", "properties": {"code_id": {"type": "string", "description": "The case sensitive uuid of the generated code."}}, "required": ["uuid"]}
|
|
1598
|
+
})
|
|
1587
1599
|
# Add the new refactor_scratchpad tool definition
|
|
1588
1600
|
available_tools.append({
|
|
1589
1601
|
"name": "refactor_scratchpad",
|
|
@@ -1595,7 +1607,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1595
1607
|
formatted_tools_list += "\n**request_clarification**:\nUse if the user's request is ambiguous and you can not infer a clear idea of his intent. this tool has no parameters."
|
|
1596
1608
|
formatted_tools_list += "\n**final_answer**:\nUse when you are ready to respond to the user. this tool has no parameters."
|
|
1597
1609
|
|
|
1598
|
-
if discovery_step_id: log_event("**Discovering tools**",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
|
|
1610
|
+
if discovery_step_id: log_event(f"**Discovering tools** found {len(available_tools)} tools",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
|
|
1599
1611
|
|
|
1600
1612
|
# --- 2. Dynamic Reasoning Loop ---
|
|
1601
1613
|
for i in range(max_reasoning_steps):
|
|
@@ -1658,7 +1670,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1658
1670
|
|
|
1659
1671
|
|
|
1660
1672
|
current_scratchpad += f"\n\n### Step {i+1}: Thought\n{thought}"
|
|
1661
|
-
log_event(f"
|
|
1673
|
+
log_event(f"{thought}", MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT)
|
|
1662
1674
|
|
|
1663
1675
|
if not tool_name:
|
|
1664
1676
|
# Handle error...
|
|
@@ -1691,10 +1703,20 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1691
1703
|
tool_calls_this_turn.append({"name": "put_code_in_buffer", "params": tool_params, "result": tool_result})
|
|
1692
1704
|
observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
|
|
1693
1705
|
current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
|
|
1694
|
-
log_event(f"
|
|
1706
|
+
log_event(f"Code generated with ID: {code_uuid}", MSG_TYPE.MSG_TYPE_OBSERVATION)
|
|
1695
1707
|
if code_gen_id: log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
|
|
1696
1708
|
if reasoning_step_id: log_event(f"**Reasoning Step {i+1}/{max_reasoning_steps}**", MSG_TYPE.MSG_TYPE_STEP_END, event_id= reasoning_step_id)
|
|
1697
1709
|
continue # Go to the next reasoning step immediately
|
|
1710
|
+
if tool_name == 'view_generated_code':
|
|
1711
|
+
code_id = tool_params.get("code_id")
|
|
1712
|
+
if code_id:
|
|
1713
|
+
tool_result = {"status": "success", "code_id": code_id, "generated_code":generated_code_store[code_uuid]}
|
|
1714
|
+
else:
|
|
1715
|
+
tool_result = {"status": "error", "code_id": code_id, "error":"Unknown uuid"}
|
|
1716
|
+
observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
|
|
1717
|
+
current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
|
|
1718
|
+
log_event(f"Result from `{tool_name}`:\n```\n{generated_code_store[code_uuid]}\n```\n", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
|
|
1719
|
+
continue
|
|
1698
1720
|
if tool_name == 'refactor_scratchpad':
|
|
1699
1721
|
scratchpad_cleaning_prompt = f"""Enhance this scratchpad content to be more organized and comprehensive. Keep relevant experience information and remove any useless redundancies. Try to log learned things from the context so that you won't make the same mistakes again. Do not remove the main objective information or any crucial information that may be useful for the next iterations. Answer directly with the new scratchpad content without any comments.
|
|
1700
1722
|
--- YOUR INTERNAL SCRATCHPAD (Work History & Analysis) ---
|
|
@@ -1755,7 +1777,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1755
1777
|
|
|
1756
1778
|
tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result})
|
|
1757
1779
|
current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
|
|
1758
|
-
log_event(f"
|
|
1780
|
+
log_event(f"Result from `{tool_name}`:\n{dict_to_markdown(sanitized_result)}", MSG_TYPE.MSG_TYPE_OBSERVATION)
|
|
1759
1781
|
|
|
1760
1782
|
if reasoning_step_id: log_event(f"**Reasoning Step {i+1}/{max_reasoning_steps}**", MSG_TYPE.MSG_TYPE_STEP_END, event_id = reasoning_step_id)
|
|
1761
1783
|
except Exception as ex:
|
|
@@ -333,7 +333,12 @@ class LollmsMessage:
|
|
|
333
333
|
def __repr__(self) -> str:
|
|
334
334
|
"""Provides a developer-friendly representation of the message."""
|
|
335
335
|
return f"<LollmsMessage id={self.id} sender='{self.sender}'>"
|
|
336
|
-
|
|
336
|
+
|
|
337
|
+
def set_metadata_item(self, itemname:str, item_value, discussion):
|
|
338
|
+
new_metadata = (self.metadata or {}).copy()
|
|
339
|
+
new_metadata[itemname] = item_value
|
|
340
|
+
self.metadata = new_metadata
|
|
341
|
+
discussion.commit()
|
|
337
342
|
|
|
338
343
|
class LollmsDiscussion:
|
|
339
344
|
"""Represents and manages a single discussion.
|
|
@@ -367,6 +372,7 @@ class LollmsDiscussion:
|
|
|
367
372
|
object.__setattr__(self, 'autosave', autosave)
|
|
368
373
|
object.__setattr__(self, 'max_context_size', max_context_size)
|
|
369
374
|
object.__setattr__(self, 'scratchpad', "")
|
|
375
|
+
object.__setattr__(self, 'images', [])
|
|
370
376
|
|
|
371
377
|
# Internal state
|
|
372
378
|
object.__setattr__(self, '_session', None)
|
|
@@ -1043,6 +1049,61 @@ class LollmsDiscussion:
|
|
|
1043
1049
|
self.touch()
|
|
1044
1050
|
print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
|
|
1045
1051
|
|
|
1052
|
+
def count_discussion_tokens(self, format_type: str, branch_tip_id: Optional[str] = None) -> int:
|
|
1053
|
+
"""Counts the number of tokens in the exported discussion content.
|
|
1054
|
+
|
|
1055
|
+
This method exports the discussion in the specified format and then uses
|
|
1056
|
+
the lollmsClient's tokenizer to count the tokens in the resulting text.
|
|
1057
|
+
|
|
1058
|
+
Args:
|
|
1059
|
+
format_type: The target format (e.g., "lollms_text", "openai_chat").
|
|
1060
|
+
branch_tip_id: The ID of the message to use as the end of the context.
|
|
1061
|
+
Defaults to the active branch ID.
|
|
1062
|
+
|
|
1063
|
+
Returns:
|
|
1064
|
+
The total number of tokens.
|
|
1065
|
+
"""
|
|
1066
|
+
exported_content = self.export(format_type, branch_tip_id)
|
|
1067
|
+
|
|
1068
|
+
text_to_count = ""
|
|
1069
|
+
if isinstance(exported_content, str):
|
|
1070
|
+
text_to_count = exported_content
|
|
1071
|
+
elif isinstance(exported_content, list):
|
|
1072
|
+
# Handle list of dicts (OpenAI/Ollama format)
|
|
1073
|
+
full_content = []
|
|
1074
|
+
for message in exported_content:
|
|
1075
|
+
content = message.get("content")
|
|
1076
|
+
if isinstance(content, str):
|
|
1077
|
+
full_content.append(content)
|
|
1078
|
+
elif isinstance(content, list): # Handle OpenAI content parts
|
|
1079
|
+
for part in content:
|
|
1080
|
+
if part.get("type") == "text":
|
|
1081
|
+
full_content.append(part.get("text", ""))
|
|
1082
|
+
text_to_count = "\n".join(full_content)
|
|
1083
|
+
|
|
1084
|
+
return self.lollmsClient.count_tokens(text_to_count)
|
|
1085
|
+
|
|
1086
|
+
def get_context_status(self, branch_tip_id: Optional[str] = None) -> Dict[str, Optional[int]]:
|
|
1087
|
+
"""Returns the current token count and the maximum context size.
|
|
1088
|
+
|
|
1089
|
+
This provides a snapshot of the context usage, taking into account
|
|
1090
|
+
any non-destructive pruning that has occurred. The token count is
|
|
1091
|
+
based on the "lollms_text" export format, which is the format used
|
|
1092
|
+
for pruning calculations.
|
|
1093
|
+
|
|
1094
|
+
Args:
|
|
1095
|
+
branch_tip_id: The ID of the message branch to measure. Defaults
|
|
1096
|
+
to the active branch.
|
|
1097
|
+
|
|
1098
|
+
Returns:
|
|
1099
|
+
A dictionary with 'current_tokens' and 'max_tokens'.
|
|
1100
|
+
"""
|
|
1101
|
+
current_tokens = self.count_discussion_tokens("lollms_text", branch_tip_id)
|
|
1102
|
+
return {
|
|
1103
|
+
"current_tokens": current_tokens,
|
|
1104
|
+
"max_tokens": self.max_context_size
|
|
1105
|
+
}
|
|
1106
|
+
|
|
1046
1107
|
def switch_to_branch(self, branch_id):
|
|
1047
1108
|
self.active_branch_id = branch_id
|
|
1048
1109
|
|
|
@@ -1060,8 +1121,17 @@ class LollmsDiscussion:
|
|
|
1060
1121
|
}"""
|
|
1061
1122
|
infos = self.lollmsClient.generate_code(prompt = prompt, template = template)
|
|
1062
1123
|
discussion_title = robust_json_parser(infos)["title"]
|
|
1063
|
-
self.metadata
|
|
1124
|
+
new_metadata = (self.metadata or {}).copy()
|
|
1125
|
+
new_metadata['title'] = discussion_title
|
|
1126
|
+
|
|
1127
|
+
self.metadata = new_metadata
|
|
1064
1128
|
self.commit()
|
|
1065
1129
|
return discussion_title
|
|
1066
1130
|
except Exception as ex:
|
|
1067
1131
|
trace_exception(ex)
|
|
1132
|
+
|
|
1133
|
+
def set_metadata_item(self, itemname:str, item_value):
|
|
1134
|
+
new_metadata = (self.metadata or {}).copy()
|
|
1135
|
+
new_metadata[itemname] = item_value
|
|
1136
|
+
self.metadata = new_metadata
|
|
1137
|
+
self.commit()
|
|
@@ -115,8 +115,9 @@ class LollmsLLMBinding(ABC):
|
|
|
115
115
|
"""
|
|
116
116
|
pass
|
|
117
117
|
|
|
118
|
-
def get_ctx_size(self, model_name):
|
|
119
|
-
|
|
118
|
+
def get_ctx_size(self, model_name=None):
|
|
119
|
+
# if model_name is none use current model name
|
|
120
|
+
return None
|
|
120
121
|
|
|
121
122
|
|
|
122
123
|
@abstractmethod
|
|
@@ -26,12 +26,12 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
|
|
|
26
26
|
examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
|
|
27
27
|
examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
|
|
28
28
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
29
|
-
lollms_client/__init__.py,sha256=
|
|
29
|
+
lollms_client/__init__.py,sha256=FNukbXeOwWGP1i58B6V4_LtjA1QumGvROeGjoxK2wBs,1147
|
|
30
30
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
31
|
-
lollms_client/lollms_core.py,sha256=
|
|
32
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
31
|
+
lollms_client/lollms_core.py,sha256=SuU5HpBHXkG9GGpSMRHiRA-qSDBFNEDis3V9ZaV_Tvw,165323
|
|
32
|
+
lollms_client/lollms_discussion.py,sha256=tvANNvpTkUr4L6GKowosIyfV7l3SA6cXnzElt36e2s8,52133
|
|
33
33
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
34
|
-
lollms_client/lollms_llm_binding.py,sha256=
|
|
34
|
+
lollms_client/lollms_llm_binding.py,sha256=eBRbiTLsaB-g-UwB2JQTdzTQ6qFvDZk14viDdcxy-ck,12451
|
|
35
35
|
lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
|
|
36
36
|
lollms_client/lollms_personality.py,sha256=dILUI5DZdzJ3NDDQiIsK2UptVF-jZK3XYXZ2bpXP_ew,8035
|
|
37
37
|
lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
|
|
@@ -88,8 +88,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
|
|
|
88
88
|
lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
|
|
89
89
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
90
90
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
91
|
-
lollms_client-0.27.
|
|
92
|
-
lollms_client-0.27.
|
|
93
|
-
lollms_client-0.27.
|
|
94
|
-
lollms_client-0.27.
|
|
95
|
-
lollms_client-0.27.
|
|
91
|
+
lollms_client-0.27.2.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
92
|
+
lollms_client-0.27.2.dist-info/METADATA,sha256=PLlEliNSNJfH4IkjgrxdrTaNW2QEoEWuF91u2zCszbI,25778
|
|
93
|
+
lollms_client-0.27.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
94
|
+
lollms_client-0.27.2.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
|
|
95
|
+
lollms_client-0.27.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|