lollms-client 0.31.0__py3-none-any.whl → 0.31.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "0.31.0" # Updated version
11
+ __version__ = "0.31.1" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -1586,10 +1586,10 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1586
1586
  "input_schema": {"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}
1587
1587
  })
1588
1588
 
1589
- # Add the new put_code_in_buffer tool definition
1589
+ # Add the new prepare_code tool definition
1590
1590
  available_tools.append({
1591
- "name": "local_tools::put_code_in_buffer",
1592
- "description": """Generates and stores code into a buffer to be used by another tool. You can put the uuid of the generated code into the fields that require long code among the tools. If no tool requires code as input do not use put_code_in_buffer. put_code_in_buffer do not execute the code nor does it audit it.""",
1591
+ "name": "local_tools::prepare_code",
1592
+ "description": """Generates and stores code into a buffer to be used by another tool. Never put code into a tool directly, first call this to generate the code and then paste the uuid in the tool that requires code. Only use this for generating code to be sent to another tool. You can put the uuid of the generated code into the fields that require long code among the tools. If no tool requires code as input do not use prepare_code. prepare_code do not execute the code nor does it audit it.""",
1593
1593
  "input_schema": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed natural language description of the code's purpose and requirements."}, "language": {"type": "string", "description": "The programming language of the generated code. By default it uses python."}}, "required": ["prompt"]}
1594
1594
  })
1595
1595
  available_tools.append({
@@ -1631,7 +1631,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1631
1631
  2. **THINK:**
1632
1632
  - Does the latest observation completely fulfill the user's original request?
1633
1633
  - If YES, your next action MUST be to use the `final_answer` tool.
1634
- - If NO, what is the single next logical step needed? This may involve writing code first with `put_code_in_buffer`, then using another tool.
1634
+ - If NO, what is the single next logical step needed? This may involve writing code first with `prepare_code`, then using another tool.
1635
1635
  - If you are stuck or the request is ambiguous, use `local_tools::request_clarification`.
1636
1636
  3. **ACT:** Formulate your decision as a JSON object.
1637
1637
  ** Important ** Always use this format alias::tool_name to call the tool
@@ -1639,7 +1639,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1639
1639
  action_template = {
1640
1640
  "thought": "My detailed analysis of the last observation and my reasoning for the next action and how it integrates with my global plan.",
1641
1641
  "action": {
1642
- "tool_name": "The single tool to use (e.g., 'local_tools::put_code_in_buffer', 'local_tools::final_answer').",
1642
+ "tool_name": "The single tool to use (e.g., 'local_tools::prepare_code', 'local_tools::final_answer').",
1643
1643
  "tool_params": {"param1": "value1"},
1644
1644
  "clarification_question": "(string, ONLY if tool_name is 'local_tools::request_clarification')"
1645
1645
  }
@@ -1693,20 +1693,20 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1693
1693
  if reasoning_step_id: log_event(f"**Reasoning Step {i+1}/{max_reasoning_steps}**",MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
1694
1694
  break
1695
1695
 
1696
- # --- Handle the `put_code_in_buffer` tool specifically ---
1697
- if tool_name == 'local_tools::put_code_in_buffer':
1698
- code_gen_id = log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_STEP_START, metadata={"name": "put_code_in_buffer", "id": "gencode"})
1696
+ # --- Handle the `prepare_code` tool specifically ---
1697
+ if tool_name == 'local_tools::prepare_code':
1698
+ code_gen_id = log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_STEP_START, metadata={"name": "prepare_code", "id": "gencode"})
1699
1699
  code_prompt = tool_params.get("prompt", "Generate the requested code.")
1700
1700
 
1701
1701
  # Use a specific system prompt to get raw code
1702
1702
  code_generation_system_prompt = "You are a code generation assistant. Generate ONLY the raw code based on the user's request. Do not add any explanations, markdown code fences, or other text outside of the code itself."
1703
- generated_code = self.generate_code(prompt=code_prompt, system_prompt=code_generation_system_prompt + "\n----\n" + reasoning_prompt_template, **llm_generation_kwargs)
1703
+ generated_code = self.generate_code(prompt=code_prompt, system_prompt=code_generation_system_prompt, **llm_generation_kwargs)
1704
1704
 
1705
1705
  code_uuid = str(uuid.uuid4())
1706
1706
  generated_code_store[code_uuid] = generated_code
1707
1707
 
1708
1708
  tool_result = {"status": "success", "code_id": code_uuid, "summary": f"Code generated successfully. Use this ID in the next tool call that requires code."}
1709
- tool_calls_this_turn.append({"name": "put_code_in_buffer", "params": tool_params, "result": tool_result})
1709
+ tool_calls_this_turn.append({"name": "prepare_code", "params": tool_params, "result": tool_result})
1710
1710
  observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
1711
1711
  current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1712
1712
  log_event(f"Code generated with ID: {code_uuid}", MSG_TYPE.MSG_TYPE_OBSERVATION)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.31.0
3
+ Version: 0.31.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -29,9 +29,9 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
29
29
  examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
30
30
  examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
31
31
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
32
- lollms_client/__init__.py,sha256=xGG08Mr3A0qFgHz0rb_LZpFFL8pH88U8JyZVYa6MCmA,1147
32
+ lollms_client/__init__.py,sha256=JktSbTe0t4EQVVLvNRScoCv9YSjDkFpblIwHDeE_-CE,1147
33
33
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
34
- lollms_client/lollms_core.py,sha256=MXTJgsAZ4eCeM-KbY7KJX-klryLX9MCdp8G6O-Y5mEE,176429
34
+ lollms_client/lollms_core.py,sha256=eOO92EFcs1akOXfRBV2QD5kpDmJyiDZP90g75jHgl6w,176520
35
35
  lollms_client/lollms_discussion.py,sha256=vaBJ9LJumTUgi2550toNOnEOYMN412OvPicMn8CNi64,85306
36
36
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
37
37
  lollms_client/lollms_llm_binding.py,sha256=_r5_bZfasJQlI84EfH_sKlVMlOuiIgMXL6wYznRT_GM,15526
@@ -92,9 +92,9 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
92
92
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
93
93
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
94
94
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- lollms_client-0.31.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
95
+ lollms_client-0.31.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
96
96
  test/test_lollms_discussion.py,sha256=KxTsV1bPdNz8QqZd7tIof9kTWkeXLUtAMU08BQmoY6U,16829
97
- lollms_client-0.31.0.dist-info/METADATA,sha256=9BCSndDpLDG8OigH4U8QZPl_puk0soNAdtg2pGcd1dI,38717
98
- lollms_client-0.31.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
99
- lollms_client-0.31.0.dist-info/top_level.txt,sha256=1jIpjTnOSGEGtIW2rGAFM6tVRzgsDdMOiox_SmDH_zw,28
100
- lollms_client-0.31.0.dist-info/RECORD,,
97
+ lollms_client-0.31.1.dist-info/METADATA,sha256=fqTGolR3AxVMxM-Wc1TX9112dp2wMEEGqQyHSS4xx14,38717
98
+ lollms_client-0.31.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
99
+ lollms_client-0.31.1.dist-info/top_level.txt,sha256=1jIpjTnOSGEGtIW2rGAFM6tVRzgsDdMOiox_SmDH_zw,28
100
+ lollms_client-0.31.1.dist-info/RECORD,,