lollms-client 0.20.2__py3-none-any.whl → 0.20.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
7
7
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
8
8
 
9
9
 
10
- __version__ = "0.20.2" # Updated version
10
+ __version__ = "0.20.3" # Updated version
11
11
 
12
12
  # Optionally, you could define __all__ if you want to be explicit about exports
13
13
  __all__ = [
@@ -666,7 +666,7 @@ Respond with a JSON object containing ONE of the following structures:
666
666
  """ # No {self.ai_full_header} here, generate_code will get raw JSON
667
667
 
668
668
  if streaming_callback:
669
- streaming_callback(f"LLM deciding next step (iteration {llm_iterations})...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "decision_making"}, turn_history)
669
+ streaming_callback(f"LLM deciding next step (iteration {llm_iterations})...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "decision_making"}, turn_history)
670
670
 
671
671
  # Use generate_code to get structured JSON output from LLM
672
672
  # Note: generate_code itself uses generate_text. We are asking for JSON here.
@@ -679,7 +679,7 @@ Respond with a JSON object containing ONE of the following structures:
679
679
  # streaming_callback=None, # Decisions are usually not streamed chunk by chunk
680
680
  )
681
681
  if streaming_callback:
682
- streaming_callback(f"LLM decision received.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "decision_making"}, turn_history)
682
+ streaming_callback(f"LLM decision received.", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "decision_making"}, turn_history)
683
683
 
684
684
 
685
685
  if not raw_llm_decision_json:
@@ -733,10 +733,11 @@ Respond with a JSON object containing ONE of the following structures:
733
733
  current_conversation.append({"role":"assistant", "content":"(I decided to use a tool, but I'm unsure which one. Could you clarify?)"})
734
734
  break # Or ask LLM to try again without this faulty decision in history
735
735
 
736
- tool_call_info = {"type": "tool_call_request", "name": tool_name, "params": tool_params}
736
+ tool_call_info = {"id": "tool_call_request", "name": tool_name, "params": tool_params}
737
737
  turn_history.append(tool_call_info)
738
738
  if streaming_callback:
739
739
  streaming_callback(f"LLM requests to call tool: {tool_name} with params: {tool_params}", MSG_TYPE.MSG_TYPE_INFO, tool_call_info, turn_history)
740
+ streaming_callback("", MSG_TYPE.MSG_TYPE_TOOL_CALL, tool_call_info, turn_history)
740
741
 
741
742
  # Interactive execution if enabled
742
743
  if interactive_tool_execution:
@@ -760,15 +761,17 @@ Respond with a JSON object containing ONE of the following structures:
760
761
 
761
762
 
762
763
  if streaming_callback:
763
- streaming_callback(f"Executing tool: {tool_name}...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "tool_execution", "tool_name": tool_name}, turn_history)
764
+ streaming_callback(f"Executing tool: {tool_name}...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "tool_execution", "tool_name": tool_name}, turn_history)
764
765
 
765
766
  tool_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
766
767
 
767
768
  tool_call_info["result"] = tool_result # Add result to this call's info
768
769
  tool_calls_made_this_turn.append(tool_call_info) # Log the completed call
770
+ if streaming_callback:
771
+ streaming_callback(f"", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, tool_result, turn_history)
769
772
 
770
773
  if streaming_callback:
771
- streaming_callback(f"Tool {tool_name} execution finished. Result: {json.dumps(tool_result)}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "tool_execution", "tool_name": tool_name, "result": tool_result}, turn_history)
774
+ streaming_callback(f"Tool {tool_name} execution finished. Result: {json.dumps(tool_result)}", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "tool_execution", "tool_name": tool_name, "result": tool_result}, turn_history)
772
775
 
773
776
  # Add tool execution result to conversation for the LLM
774
777
  # The format of this message can influence how the LLM uses the tool output.
@@ -972,12 +975,14 @@ Respond with a JSON object containing ONE of the following structures:
972
975
  hop_details = {"query": current_query_for_rag, "retrieved_chunks_details": [], "status": ""}
973
976
  previous_queries.append(current_query_for_rag)
974
977
  new_unique = 0
978
+ documents = []
975
979
  for chunk in retrieved:
976
980
  doc = chunk.get("file_path", "Unknown")
977
981
  content = str(chunk.get("chunk_text", ""))
978
982
  sim = float(chunk.get("similarity_percent", 0.0))
979
983
  detail = {"document": doc, "similarity": sim, "content": content,
980
984
  "retrieved_in_hop": hop_count + 1, "query_used": current_query_for_rag}
985
+ documents.append(doc)
981
986
  hop_details["retrieved_chunks_details"].append(detail)
982
987
  key = f"{doc}::{content[:100]}"
983
988
  if key not in all_unique_retrieved_chunks_map:
@@ -987,6 +992,8 @@ Respond with a JSON object containing ONE of the following structures:
987
992
  if hop_count > 0 and new_unique == 0:
988
993
  hop_details["status"] = "No *new* unique chunks retrieved"
989
994
  rag_hops_details_list.append(hop_details)
995
+ if streaming_callback:
996
+ streaming_callback(f"Retreived {len(retrieved)} data chunks from {set(documents)}", MSG_TYPE.MSG_TYPE_STEP, {"id": f"retreival {hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
990
997
 
991
998
  if streaming_callback:
992
999
  streaming_callback(f"RAG Hop {hop_count + 1} done", MSG_TYPE.MSG_TYPE_STEP_END, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
@@ -28,6 +28,9 @@ class MSG_TYPE(Enum):
28
28
  MSG_TYPE_NEW_MESSAGE = 15# A new message
29
29
  MSG_TYPE_FINISHED_MESSAGE = 17# End of current message
30
30
 
31
+ #Tool calling
32
+ MSG_TYPE_TOOL_CALL = 18# a tool call
33
+ MSG_TYPE_TOOL_OUTPUT = 19# the output of the tool
31
34
 
32
35
 
33
36
  class SENDER_TYPES(Enum):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.20.2
3
+ Version: 0.20.3
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -23,9 +23,9 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
23
23
  examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
24
24
  examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
25
25
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
26
- lollms_client/__init__.py,sha256=gtclROjgcwwzB6fNJbrrqZ0l5F80i-cDHOAVlLB7P1w,910
26
+ lollms_client/__init__.py,sha256=wRi23qidXTyMK09HNTzeoYEMKhB_7ZImWScltfYgntE,910
27
27
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
28
- lollms_client/lollms_core.py,sha256=NErsbmV22p8PPkjYhRyexlEd9WmK2OFSbjXQD3jk5cE,114308
28
+ lollms_client/lollms_core.py,sha256=iKvH20tckzdYLlIpp-srWswdI4lb0vo5po7xo3Ogsgk,114865
29
29
  lollms_client/lollms_discussion.py,sha256=EV90dIgw8a-f-82vB2GspR60RniYz7WnBmAWSIg5mW0,2158
30
30
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
31
31
  lollms_client/lollms_llm_binding.py,sha256=bdElz_IBx0zZ-85YTT1fyY_mSoHo46tKIMiHYJlKCkM,9809
@@ -36,7 +36,7 @@ lollms_client/lollms_tti_binding.py,sha256=afO0-d-Kqsmh8UHTijTvy6dZAt-XDB6R-IHmd
36
36
  lollms_client/lollms_ttm_binding.py,sha256=FjVVSNXOZXK1qvcKEfxdiX6l2b4XdGOSNnZ0utAsbDg,4167
37
37
  lollms_client/lollms_tts_binding.py,sha256=5cJYECj8PYLJAyB6SEH7_fhHYK3Om-Y3arkygCnZ24o,4342
38
38
  lollms_client/lollms_ttv_binding.py,sha256=KkTaHLBhEEdt4sSVBlbwr5i_g_TlhcrwrT-7DjOsjWQ,4131
39
- lollms_client/lollms_types.py,sha256=cfc1sremM8KR4avkYX99fIVkkdRvXErrCWKGjLrgv50,2723
39
+ lollms_client/lollms_types.py,sha256=CLiodudFgTbuXTGgupDt6IgMvJkrfiOHdw1clx_5UjA,2863
40
40
  lollms_client/lollms_utilities.py,sha256=WiG-HHMdo86j3LBndcBQ-PbMqQ8kGKLp1e9WuLDzRVU,7048
41
41
  lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
42
42
  lollms_client/llm_bindings/llamacpp/__init__.py,sha256=tUdCh00Tcg2VtavM5uRNsAoEkdeHI4p3nFsF9YUcYuk,58402
@@ -75,8 +75,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
75
75
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
76
76
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
77
77
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
- lollms_client-0.20.2.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
79
- lollms_client-0.20.2.dist-info/METADATA,sha256=rj4bqIZvMDqNnI1CswGJXPY8opCozNPFjve9-DZ8IFU,13374
80
- lollms_client-0.20.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
81
- lollms_client-0.20.2.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
82
- lollms_client-0.20.2.dist-info/RECORD,,
78
+ lollms_client-0.20.3.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
79
+ lollms_client-0.20.3.dist-info/METADATA,sha256=ShF3nPm1Y3KT9UkJ_3zkA8BudOb8O-zNn9iuo3KxCe4,13374
80
+ lollms_client-0.20.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
81
+ lollms_client-0.20.3.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
82
+ lollms_client-0.20.3.dist-info/RECORD,,