lollms-client 0.20.1__tar.gz → 0.20.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (87) hide show
  1. {lollms_client-0.20.1/lollms_client.egg-info → lollms_client-0.20.3}/PKG-INFO +1 -1
  2. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/__init__.py +1 -1
  3. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_core.py +15 -5
  4. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_types.py +3 -0
  5. {lollms_client-0.20.1 → lollms_client-0.20.3/lollms_client.egg-info}/PKG-INFO +1 -1
  6. {lollms_client-0.20.1 → lollms_client-0.20.3}/LICENSE +0 -0
  7. {lollms_client-0.20.1 → lollms_client-0.20.3}/README.md +0 -0
  8. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/article_summary/article_summary.py +0 -0
  9. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/deep_analyze/deep_analyse.py +0 -0
  10. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  11. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/external_mcp.py +0 -0
  12. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/function_calling_with_local_custom_mcp.py +0 -0
  13. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  14. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/generate_and_speak/generate_and_speak.py +0 -0
  15. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  16. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/generate_text_with_multihop_rag_example.py +0 -0
  17. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/internet_search_with_rag.py +0 -0
  18. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/local_mcp.py +0 -0
  19. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/openai_mcp.py +0 -0
  20. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/personality_test/chat_test.py +0 -0
  21. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/personality_test/chat_with_aristotle.py +0 -0
  22. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/personality_test/tesks_test.py +0 -0
  23. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/run_standard_mcp_example.py +0 -0
  24. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/simple_text_gen_test.py +0 -0
  25. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/simple_text_gen_with_image_test.py +0 -0
  26. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/test_local_models/local_chat.py +0 -0
  27. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/text_2_audio.py +0 -0
  28. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/text_2_image.py +0 -0
  29. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/text_2_image_diffusers.py +0 -0
  30. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/text_and_image_2_audio.py +0 -0
  31. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/text_gen.py +0 -0
  32. {lollms_client-0.20.1 → lollms_client-0.20.3}/examples/text_gen_system_prompt.py +0 -0
  33. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/__init__.py +0 -0
  34. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  35. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  36. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  37. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  38. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  39. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  40. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  41. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  42. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  43. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_config.py +0 -0
  44. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_discussion.py +0 -0
  45. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_js_analyzer.py +0 -0
  46. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_llm_binding.py +0 -0
  47. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_mcp_binding.py +0 -0
  48. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_python_analyzer.py +0 -0
  49. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_stt_binding.py +0 -0
  50. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_tti_binding.py +0 -0
  51. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_ttm_binding.py +0 -0
  52. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_tts_binding.py +0 -0
  53. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_ttv_binding.py +0 -0
  54. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/lollms_utilities.py +0 -0
  55. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  56. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  57. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  58. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  59. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  60. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  61. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  62. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/stt_bindings/__init__.py +0 -0
  63. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  64. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  65. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  66. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tti_bindings/__init__.py +0 -0
  67. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  68. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  69. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  70. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  71. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/ttm_bindings/__init__.py +0 -0
  72. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  73. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  74. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  75. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tts_bindings/__init__.py +0 -0
  76. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  77. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  78. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  79. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  80. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/ttv_bindings/__init__.py +0 -0
  81. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  82. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client.egg-info/SOURCES.txt +0 -0
  83. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client.egg-info/dependency_links.txt +0 -0
  84. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client.egg-info/requires.txt +0 -0
  85. {lollms_client-0.20.1 → lollms_client-0.20.3}/lollms_client.egg-info/top_level.txt +0 -0
  86. {lollms_client-0.20.1 → lollms_client-0.20.3}/pyproject.toml +0 -0
  87. {lollms_client-0.20.1 → lollms_client-0.20.3}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.20.1
3
+ Version: 0.20.3
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
7
7
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
8
8
 
9
9
 
10
- __version__ = "0.20.1" # Updated version
10
+ __version__ = "0.20.3" # Updated version
11
11
 
12
12
  # Optionally, you could define __all__ if you want to be explicit about exports
13
13
  __all__ = [
@@ -666,7 +666,7 @@ Respond with a JSON object containing ONE of the following structures:
666
666
  """ # No {self.ai_full_header} here, generate_code will get raw JSON
667
667
 
668
668
  if streaming_callback:
669
- streaming_callback(f"LLM deciding next step (iteration {llm_iterations})...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "decision_making"}, turn_history)
669
+ streaming_callback(f"LLM deciding next step (iteration {llm_iterations})...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "decision_making"}, turn_history)
670
670
 
671
671
  # Use generate_code to get structured JSON output from LLM
672
672
  # Note: generate_code itself uses generate_text. We are asking for JSON here.
@@ -679,7 +679,7 @@ Respond with a JSON object containing ONE of the following structures:
679
679
  # streaming_callback=None, # Decisions are usually not streamed chunk by chunk
680
680
  )
681
681
  if streaming_callback:
682
- streaming_callback(f"LLM decision received.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "decision_making"}, turn_history)
682
+ streaming_callback(f"LLM decision received.", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "decision_making"}, turn_history)
683
683
 
684
684
 
685
685
  if not raw_llm_decision_json:
@@ -733,10 +733,11 @@ Respond with a JSON object containing ONE of the following structures:
733
733
  current_conversation.append({"role":"assistant", "content":"(I decided to use a tool, but I'm unsure which one. Could you clarify?)"})
734
734
  break # Or ask LLM to try again without this faulty decision in history
735
735
 
736
- tool_call_info = {"type": "tool_call_request", "name": tool_name, "params": tool_params}
736
+ tool_call_info = {"id": "tool_call_request", "name": tool_name, "params": tool_params}
737
737
  turn_history.append(tool_call_info)
738
738
  if streaming_callback:
739
739
  streaming_callback(f"LLM requests to call tool: {tool_name} with params: {tool_params}", MSG_TYPE.MSG_TYPE_INFO, tool_call_info, turn_history)
740
+ streaming_callback("", MSG_TYPE.MSG_TYPE_TOOL_CALL, tool_call_info, turn_history)
740
741
 
741
742
  # Interactive execution if enabled
742
743
  if interactive_tool_execution:
@@ -760,15 +761,17 @@ Respond with a JSON object containing ONE of the following structures:
760
761
 
761
762
 
762
763
  if streaming_callback:
763
- streaming_callback(f"Executing tool: {tool_name}...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "tool_execution", "tool_name": tool_name}, turn_history)
764
+ streaming_callback(f"Executing tool: {tool_name}...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "tool_execution", "tool_name": tool_name}, turn_history)
764
765
 
765
766
  tool_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
766
767
 
767
768
  tool_call_info["result"] = tool_result # Add result to this call's info
768
769
  tool_calls_made_this_turn.append(tool_call_info) # Log the completed call
770
+ if streaming_callback:
771
+ streaming_callback(f"", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, tool_result, turn_history)
769
772
 
770
773
  if streaming_callback:
771
- streaming_callback(f"Tool {tool_name} execution finished. Result: {json.dumps(tool_result)}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "tool_execution", "tool_name": tool_name, "result": tool_result}, turn_history)
774
+ streaming_callback(f"Tool {tool_name} execution finished. Result: {json.dumps(tool_result)}", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "tool_execution", "tool_name": tool_name, "result": tool_result}, turn_history)
772
775
 
773
776
  # Add tool execution result to conversation for the LLM
774
777
  # The format of this message can influence how the LLM uses the tool output.
@@ -950,6 +953,9 @@ Respond with a JSON object containing ONE of the following structures:
950
953
  answer = json.loads(response)
951
954
  decision = answer["decision"]
952
955
  if not decision:
956
+ if streaming_callback:
957
+ streaming_callback(f"RAG Hop {hop_count + 1} done", MSG_TYPE.MSG_TYPE_STEP_END, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
958
+
953
959
  break
954
960
  else:
955
961
  current_query_for_rag = str(answer["query"])
@@ -969,12 +975,14 @@ Respond with a JSON object containing ONE of the following structures:
969
975
  hop_details = {"query": current_query_for_rag, "retrieved_chunks_details": [], "status": ""}
970
976
  previous_queries.append(current_query_for_rag)
971
977
  new_unique = 0
978
+ documents = []
972
979
  for chunk in retrieved:
973
980
  doc = chunk.get("file_path", "Unknown")
974
981
  content = str(chunk.get("chunk_text", ""))
975
982
  sim = float(chunk.get("similarity_percent", 0.0))
976
983
  detail = {"document": doc, "similarity": sim, "content": content,
977
984
  "retrieved_in_hop": hop_count + 1, "query_used": current_query_for_rag}
985
+ documents.append(doc)
978
986
  hop_details["retrieved_chunks_details"].append(detail)
979
987
  key = f"{doc}::{content[:100]}"
980
988
  if key not in all_unique_retrieved_chunks_map:
@@ -984,6 +992,8 @@ Respond with a JSON object containing ONE of the following structures:
984
992
  if hop_count > 0 and new_unique == 0:
985
993
  hop_details["status"] = "No *new* unique chunks retrieved"
986
994
  rag_hops_details_list.append(hop_details)
995
+ if streaming_callback:
996
+ streaming_callback(f"Retreived {len(retrieved)} data chunks from {set(documents)}", MSG_TYPE.MSG_TYPE_STEP, {"id": f"retreival {hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
987
997
 
988
998
  if streaming_callback:
989
999
  streaming_callback(f"RAG Hop {hop_count + 1} done", MSG_TYPE.MSG_TYPE_STEP_END, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
@@ -28,6 +28,9 @@ class MSG_TYPE(Enum):
28
28
  MSG_TYPE_NEW_MESSAGE = 15# A new message
29
29
  MSG_TYPE_FINISHED_MESSAGE = 17# End of current message
30
30
 
31
+ #Tool calling
32
+ MSG_TYPE_TOOL_CALL = 18# a tool call
33
+ MSG_TYPE_TOOL_OUTPUT = 19# the output of the tool
31
34
 
32
35
 
33
36
  class SENDER_TYPES(Enum):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.20.1
3
+ Version: 0.20.3
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
File without changes
File without changes
File without changes