lollms-client 0.20.0__tar.gz → 0.20.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (87) hide show
  1. {lollms_client-0.20.0/lollms_client.egg-info → lollms_client-0.20.2}/PKG-INFO +1 -1
  2. lollms_client-0.20.2/examples/openai_mcp.py +203 -0
  3. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/__init__.py +1 -1
  4. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_core.py +29 -10
  5. lollms_client-0.20.2/lollms_client/mcp_bindings/remote_mcp/__init__.py +241 -0
  6. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/mcp_bindings/standard_mcp/__init__.py +23 -7
  7. {lollms_client-0.20.0 → lollms_client-0.20.2/lollms_client.egg-info}/PKG-INFO +1 -1
  8. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client.egg-info/SOURCES.txt +2 -0
  9. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client.egg-info/top_level.txt +1 -0
  10. {lollms_client-0.20.0 → lollms_client-0.20.2}/LICENSE +0 -0
  11. {lollms_client-0.20.0 → lollms_client-0.20.2}/README.md +0 -0
  12. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/article_summary/article_summary.py +0 -0
  13. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/deep_analyze/deep_analyse.py +0 -0
  14. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  15. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/external_mcp.py +0 -0
  16. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/function_calling_with_local_custom_mcp.py +0 -0
  17. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  18. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/generate_and_speak/generate_and_speak.py +0 -0
  19. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  20. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/generate_text_with_multihop_rag_example.py +0 -0
  21. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/internet_search_with_rag.py +0 -0
  22. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/local_mcp.py +0 -0
  23. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/personality_test/chat_test.py +0 -0
  24. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/personality_test/chat_with_aristotle.py +0 -0
  25. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/personality_test/tesks_test.py +0 -0
  26. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/run_standard_mcp_example.py +0 -0
  27. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/simple_text_gen_test.py +0 -0
  28. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/simple_text_gen_with_image_test.py +0 -0
  29. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/test_local_models/local_chat.py +0 -0
  30. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/text_2_audio.py +0 -0
  31. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/text_2_image.py +0 -0
  32. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/text_2_image_diffusers.py +0 -0
  33. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/text_and_image_2_audio.py +0 -0
  34. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/text_gen.py +0 -0
  35. {lollms_client-0.20.0 → lollms_client-0.20.2}/examples/text_gen_system_prompt.py +0 -0
  36. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/__init__.py +0 -0
  37. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  38. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  39. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  40. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  41. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  42. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  43. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  44. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  45. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  46. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_config.py +0 -0
  47. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_discussion.py +0 -0
  48. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_js_analyzer.py +0 -0
  49. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_llm_binding.py +0 -0
  50. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_mcp_binding.py +0 -0
  51. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_python_analyzer.py +0 -0
  52. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_stt_binding.py +0 -0
  53. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_tti_binding.py +0 -0
  54. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_ttm_binding.py +0 -0
  55. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_tts_binding.py +0 -0
  56. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_ttv_binding.py +0 -0
  57. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_types.py +0 -0
  58. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/lollms_utilities.py +0 -0
  59. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  60. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  61. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  62. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  63. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  64. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/stt_bindings/__init__.py +0 -0
  65. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  66. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  67. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  68. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tti_bindings/__init__.py +0 -0
  69. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  70. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  71. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  72. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  73. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/ttm_bindings/__init__.py +0 -0
  74. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  75. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  76. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  77. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tts_bindings/__init__.py +0 -0
  78. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  79. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  80. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  81. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  82. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/ttv_bindings/__init__.py +0 -0
  83. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  84. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client.egg-info/dependency_links.txt +0 -0
  85. {lollms_client-0.20.0 → lollms_client-0.20.2}/lollms_client.egg-info/requires.txt +0 -0
  86. {lollms_client-0.20.0 → lollms_client-0.20.2}/pyproject.toml +0 -0
  87. {lollms_client-0.20.0 → lollms_client-0.20.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.20.0
3
+ Version: 0.20.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -0,0 +1,203 @@
1
+ # File: run_openai_mcp_example.py
2
+ # (Keep imports, path adjustments, helper functions, and initial setup as before)
3
+
4
+ import sys
5
+ import os
6
+ import shutil
7
+ from pathlib import Path
8
+ import json
9
+ import base64
10
+ from dotenv import load_dotenv
11
+
12
+ load_dotenv() # For this script's own potential .env
13
+
14
+ try:
15
+ from lollms_client import LollmsClient
16
+ from ascii_colors import ASCIIColors, trace_exception
17
+ from lollms_client.lollms_types import MSG_TYPE
18
+ except ImportError as e:
19
+ print(f"ERROR: Could not import LollmsClient components: {e}")
20
+ trace_exception(e)
21
+ sys.exit(1)
22
+
23
+ PATH_TO_OPENAI_MCP_SERVER_PROJECT = Path(__file__).resolve().parent # Standard if script is in PArisNeoMCPServers root
24
+ if not PATH_TO_OPENAI_MCP_SERVER_PROJECT.is_dir():
25
+ print(f"ERROR: openai-mcp-server project not found at {PATH_TO_OPENAI_MCP_SERVER_PROJECT}")
26
+ sys.exit(1)
27
+
28
+ OUTPUT_DIRECTORY = Path(__file__).resolve().parent / "mcp_example_outputs"
29
+ OUTPUT_DIRECTORY.mkdir(parents=True, exist_ok=True)
30
+
31
+ def save_base64_audio(base64_str: str, filename_stem: str, audio_format: str) -> Path:
32
+ audio_bytes = base64.b64decode(base64_str)
33
+ file_path = OUTPUT_DIRECTORY / f"{filename_stem}.{audio_format}"
34
+ with open(file_path, "wb") as f: f.write(audio_bytes)
35
+ ASCIIColors.green(f"Audio saved to: {file_path}")
36
+ return file_path
37
+
38
+ def save_base64_image(base64_str: str, filename_stem: str) -> Path:
39
+ image_bytes = base64.b64decode(base64_str)
40
+ file_path = OUTPUT_DIRECTORY / f"{filename_stem}.png"
41
+ with open(file_path, "wb") as f: f.write(image_bytes)
42
+ ASCIIColors.green(f"Image saved to: {file_path}")
43
+ return file_path
44
+
45
+ def main():
46
+ ASCIIColors.red(f"--- Example: Using LollmsClient with OpenAI MCP Server (TTS & DALL-E) ---")
47
+ ASCIIColors.red(f"--- Make sure OPENAI_API_KEY is set in: {PATH_TO_OPENAI_MCP_SERVER_PROJECT / '.env'} ---")
48
+ ASCIIColors.red(f"--- And that 'uv pip install -e .' has been run in: {PATH_TO_OPENAI_MCP_SERVER_PROJECT} ---")
49
+
50
+ # Determine the Python executable within the server's .venv IF IT EXISTS
51
+ # This is the most robust way to ensure the server runs with its own isolated dependencies.
52
+ path_to_openai_server_venv_python = PATH_TO_OPENAI_MCP_SERVER_PROJECT / ".venv" / ("Scripts" if os.name == "nt" else "bin") / "python"
53
+
54
+ python_exe_to_use = None
55
+ if path_to_openai_server_venv_python.exists():
56
+ python_exe_to_use = str(path_to_openai_server_venv_python.resolve())
57
+ ASCIIColors.cyan(f"Attempting to use Python from server's .venv: {python_exe_to_use}")
58
+ else:
59
+ python_exe_to_use = sys.executable # Fallback to current script's Python
60
+ ASCIIColors.yellow(f"Server's .venv Python not found at {path_to_openai_server_venv_python}. Using current environment's Python: {python_exe_to_use}")
61
+ ASCIIColors.yellow("Ensure openai-mcp-server dependencies are met in the current environment if its .venv is not used.")
62
+
63
+ mcp_config = {
64
+ "initial_servers": {
65
+ "my_openai_server": {
66
+ "command": [
67
+ "uv", # Use uv to manage the environment for the python execution
68
+ "run",
69
+ "--quiet", # Optional: reduce uv's own output unless there's an error
70
+ "--", # Separator: arguments after this are for the command being run by `uv run`
71
+ python_exe_to_use, # Explicitly specify the Python interpreter
72
+ str((PATH_TO_OPENAI_MCP_SERVER_PROJECT / "openai_mcp_server" / "server.py").resolve()) # Full path to your server script
73
+ ],
74
+ "args": [], # No *additional* arguments for server.py itself here
75
+ "cwd": str(PATH_TO_OPENAI_MCP_SERVER_PROJECT.resolve()), # CRUCIAL
76
+ }
77
+ }
78
+ }
79
+
80
+
81
+ ASCIIColors.magenta("\n1. Initializing LollmsClient...")
82
+ try:
83
+ client = LollmsClient(
84
+ binding_name="ollama",
85
+ model_name="mistral-nemo:latest",
86
+ mcp_binding_name="standard_mcp",
87
+ mcp_binding_config=mcp_config,
88
+ )
89
+ except Exception as e:
90
+ ASCIIColors.error(f"Failed to initialize LollmsClient: {e}")
91
+ trace_exception(e)
92
+ sys.exit(1)
93
+
94
+ if not client.binding or not client.mcp:
95
+ ASCIIColors.error("LollmsClient LLM or MCP binding failed to load.")
96
+ if hasattr(client, 'close'): client.close()
97
+ sys.exit(1)
98
+ ASCIIColors.green("LollmsClient initialized successfully.")
99
+
100
+ def mcp_streaming_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, history: list = None) -> bool:
101
+ prefix = ""
102
+ color_func = ASCIIColors.green
103
+ if metadata:
104
+ type_info = metadata.get('type', 'unknown_type')
105
+ tool_name_info = metadata.get('tool_name', '')
106
+ prefix = f"MCP ({type_info}{f' - {tool_name_info}' if tool_name_info else ''})"
107
+ if msg_type == MSG_TYPE.MSG_TYPE_STEP_START: color_func = ASCIIColors.cyan; prefix += " Step Start"
108
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END: color_func = ASCIIColors.cyan; prefix += " Step End"
109
+ elif msg_type == MSG_TYPE.MSG_TYPE_INFO: color_func = ASCIIColors.yellow; prefix += " Info"
110
+ elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION: color_func = ASCIIColors.red; prefix += " Exception"
111
+ else:
112
+ prefix = f"MCP (Type: {str(msg_type).split('.')[-1]})"
113
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.green(chunk, end="")
114
+ else: color_func(f"{prefix}: {chunk}")
115
+ sys.stdout.flush()
116
+ return True
117
+
118
+ # --- Test 1: General Text Query (handled by Ollama, no MCP tool expected) ---
119
+ ASCIIColors.magenta("\n2. Test: General Text Query (should be handled by Ollama)")
120
+ general_query = "What is the capital of France?"
121
+ general_response = client.generate_with_mcp( # generate_with_mcp will discover no suitable text tool
122
+ prompt=general_query,
123
+ streaming_callback=mcp_streaming_callback,
124
+ # tools=[] # Optionally explicitly pass an empty list of tools if you want to be sure
125
+ # generate_with_mcp will discover tools from the binding if not passed
126
+ )
127
+ print()
128
+ ASCIIColors.blue(f"Final response for general query: {json.dumps(general_response, indent=2)}")
129
+ assert general_response.get("error") is None, f"General query error: {general_response.get('error')}"
130
+ assert general_response.get("final_answer"), "General query: no final answer."
131
+ tool_calls_general = general_response.get("tool_calls", [])
132
+ assert len(tool_calls_general) == 0, "General query should NOT have called an MCP tool from my_openai_server."
133
+ ASCIIColors.green(f"General query handled by LLM directly, as expected. Answer: {general_response.get('final_answer')[:100]}...")
134
+
135
+
136
+ # --- Test 2: Text-to-Speech (TTS) ---
137
+ ASCIIColors.magenta("\n3. Test: OpenAI TTS via MCP")
138
+ tts_text = "This audio was generated by the OpenAI MCP server through Lollms Client."
139
+ tts_prompt_for_llm = f"Please use the OpenAI tool to say the following using tts: '{tts_text}'."
140
+
141
+ tts_response = client.generate_with_mcp(
142
+ prompt=tts_prompt_for_llm,
143
+ streaming_callback=mcp_streaming_callback,
144
+ max_tool_calls=1
145
+ )
146
+ print()
147
+ ASCIIColors.blue(f"Final response for TTS prompt: {json.dumps(tts_response, indent=2)}")
148
+
149
+ assert tts_response.get("error") is None, f"TTS error: {tts_response.get('error')}"
150
+ assert tts_response.get("final_answer"), "TTS: no final answer (LLM should confirm action)."
151
+ tool_calls_tts = tts_response.get("tool_calls", [])
152
+ assert len(tool_calls_tts) > 0, "TTS should have called a tool."
153
+ if tool_calls_tts:
154
+ assert tool_calls_tts[0]["name"] == "my_openai_server::generate_tts", "Incorrect tool for TTS."
155
+ tts_result_output = tool_calls_tts[0].get("result", {}).get("output", {})
156
+ assert "audio_base64" in tts_result_output, "TTS tool result missing 'audio_base64'."
157
+ assert "format" in tts_result_output, "TTS tool result missing 'format'."
158
+ if tts_result_output.get("audio_base64"):
159
+ save_base64_audio(tts_result_output["audio_base64"], "openai_tts_example_output", tts_result_output["format"])
160
+
161
+ # --- Test 3: DALL-E Image Generation ---
162
+ ASCIIColors.magenta("\n4. Test: OpenAI DALL-E Image Generation via MCP")
163
+ dalle_image_prompt = "A vibrant illustration of a friendly AI robot helping a human plant a tree on a futuristic Earth."
164
+ dalle_prompt_for_llm = f"I need an image for a presentation. Can you use DALL-E to create this: {dalle_image_prompt}. Please use URL format for the image."
165
+
166
+ dalle_response = client.generate_with_mcp(
167
+ prompt=dalle_prompt_for_llm,
168
+ streaming_callback=mcp_streaming_callback,
169
+ max_tool_calls=1,
170
+ # You could also try to force params for the tool if LLM struggles:
171
+ # Example: if LLM isn't picking response_format="url"
172
+ # This requires knowing the exact tool name and schema, usually let LLM handle it.
173
+ )
174
+ print()
175
+ ASCIIColors.blue(f"Final response for DALL-E prompt: {json.dumps(dalle_response, indent=2)}")
176
+
177
+ assert dalle_response.get("error") is None, f"DALL-E error: {dalle_response.get('error')}"
178
+ assert dalle_response.get("final_answer"), "DALL-E: no final answer (LLM should confirm action)."
179
+ tool_calls_dalle = dalle_response.get("tool_calls", [])
180
+ assert len(tool_calls_dalle) > 0, "DALL-E should have called a tool."
181
+ if tool_calls_dalle:
182
+ assert tool_calls_dalle[0]["name"] == "my_openai_server::generate_image_dalle", "Incorrect tool for DALL-E."
183
+ dalle_result_output = tool_calls_dalle[0].get("result", {}).get("output", {})
184
+ assert "images" in dalle_result_output and isinstance(dalle_result_output["images"], list), "DALL-E result missing 'images' list."
185
+ if dalle_result_output.get("images"):
186
+ image_data = dalle_result_output["images"][0]
187
+ if image_data.get("url"):
188
+ ASCIIColors.green(f"DALL-E image URL: {image_data['url']}")
189
+ ASCIIColors.info(f"Revised prompt by DALL-E: {image_data.get('revised_prompt')}")
190
+ elif image_data.get("b64_json"):
191
+ save_base64_image(image_data["b64_json"], "openai_dalle_example_output")
192
+ ASCIIColors.info(f"Revised prompt by DALL-E: {image_data.get('revised_prompt')}")
193
+
194
+ ASCIIColors.magenta("\n5. Closing LollmsClient...")
195
+ if client and hasattr(client, 'close'):
196
+ try: client.close()
197
+ except Exception as e: ASCIIColors.error(f"Error closing LollmsClient: {e}"); trace_exception(e)
198
+
199
+ ASCIIColors.info(f"Example finished. Check {OUTPUT_DIRECTORY} for any generated files.")
200
+ ASCIIColors.red("\n--- LollmsClient with OpenAI MCP Server (TTS & DALL-E) Example Finished ---")
201
+
202
+ if __name__ == "__main__":
203
+ main()
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
7
7
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
8
8
 
9
9
 
10
- __version__ = "0.20.0" # Updated version
10
+ __version__ = "0.20.2" # Updated version
11
11
 
12
12
  # Optionally, you could define __all__ if you want to be explicit about exports
13
13
  __all__ = [
@@ -587,7 +587,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
587
587
  # 1. Discover tools if not provided
588
588
  if tools is None:
589
589
  try:
590
- tools = self.mcp.discover_tools()
590
+ tools = self.mcp.discover_tools(force_refresh=True)
591
591
  if not tools:
592
592
  ASCIIColors.warning("No MCP tools discovered by the binding.")
593
593
  except Exception as e_disc:
@@ -898,7 +898,7 @@ Respond with a JSON object containing ONE of the following structures:
898
898
  # 0. Optional Objectives Extraction Step
899
899
  if extract_objectives:
900
900
  if streaming_callback:
901
- streaming_callback("Extracting and structuring objectives...", MSG_TYPE.MSG_TYPE_STEP, {"type": "objectives_extraction"}, turn_rag_history_for_callback)
901
+ streaming_callback("Extracting and structuring objectives...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "objectives_extraction"}, turn_rag_history_for_callback)
902
902
  obj_prompt = (
903
903
  "You are an expert analyst. "
904
904
  "Your task is to extract and structure the key objectives from the user's request below. "
@@ -914,22 +914,33 @@ Respond with a JSON object containing ONE of the following structures:
914
914
  )
915
915
  objectives_text = self.remove_thinking_blocks(obj_gen).strip()
916
916
  if streaming_callback:
917
- streaming_callback(f"Objectives extracted:\n{objectives_text}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "objectives_extracted"}, turn_rag_history_for_callback)
917
+ streaming_callback(f"Objectives: {objectives_text}", MSG_TYPE.MSG_TYPE_STEP, {"id": "objectives_extraction"}, turn_rag_history_for_callback)
918
+
919
+ if streaming_callback:
920
+ streaming_callback(f"Objectives extracted:\n{objectives_text}", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "objectives_extraction"}, turn_rag_history_for_callback)
918
921
 
919
922
  current_query_for_rag = rag_query_text or None
920
923
  previous_queries=[]
921
924
  # 1. RAG Hops
922
925
  for hop_count in range(max_rag_hops + 1):
923
926
  if streaming_callback:
924
- streaming_callback(f"Starting RAG Hop {hop_count + 1}", MSG_TYPE.MSG_TYPE_STEP, {"type": "rag_hop_start", "hop": hop_count + 1}, turn_rag_history_for_callback)
927
+ streaming_callback(f"Starting RAG Hop {hop_count + 1}", MSG_TYPE.MSG_TYPE_STEP_START, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
925
928
  txt_previous_queries = f"Previous queries:\n"+'\n'.join(previous_queries)+"\n\n" if len(previous_queries)>0 else ""
926
929
  txt_informations = f"Information:\n"+'\n'.join([f"(from {chunk['document']}):{chunk['content']}" for _, chunk in all_unique_retrieved_chunks_map.items()]) if len(all_unique_retrieved_chunks_map)>0 else "This is the first request. No data received yet. Build a new query."
927
- txt_sp = "Your objective is to analyze the provided chunks of information, then decise if they are sufficient to reach the objective. If you need more information, formulate a new query to extract more data."
928
- txt_formatting = """The output format must be in form of json placed inside a json markdown tag. Here is the schema to use:
930
+ txt_sp = (
931
+ "Your objective is to analyze the provided chunks of information to determine "
932
+ "whether they are sufficient to reach the objective. If not, formulate a refined and focused query "
933
+ "that can retrieve more relevant information from a vector database. Ensure the query captures the semantic essence "
934
+ "of what is missing, is contextually independent, and is optimized for vector-based similarity search. "
935
+ "Do not repeat or rephrase earlier queries—always generate a new, meaningful atomic query targeting the current gap in knowledge."
936
+ )
937
+
938
+ txt_formatting = """The output format must be in form of JSON placed inside a JSON markdown tag. Use the following schema:
929
939
  ```json
930
940
  {
931
- "decision": A boolean depicting your decision (true: more data is needed, false: there is enough data to reach objective),
932
- "query": (str, optional, only if decision is true). A new query to recover more information from the data source (do not use previous queries as they have already been used)
941
+ "decision": A boolean indicating your decision (true: more data is needed, false: the current data is sufficient),
942
+ "query": (str, optional, only if decision is true). A new, atomic query suitable for semantic search in a vector database.
943
+ It should capture the missing concept or insight in concise, context-rich language, avoiding reuse of earlier queries.
933
944
  }
934
945
  ```
935
946
  """
@@ -939,12 +950,18 @@ Respond with a JSON object containing ONE of the following structures:
939
950
  answer = json.loads(response)
940
951
  decision = answer["decision"]
941
952
  if not decision:
953
+ if streaming_callback:
954
+ streaming_callback(f"RAG Hop {hop_count + 1} done", MSG_TYPE.MSG_TYPE_STEP_END, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
955
+
942
956
  break
943
957
  else:
944
958
  current_query_for_rag = str(answer["query"])
945
959
  except Exception as ex:
946
960
  trace_exception(ex)
947
961
 
962
+ if streaming_callback:
963
+ streaming_callback(f"Query: {current_query_for_rag}", MSG_TYPE.MSG_TYPE_STEP, {"id": f"query for hop {hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
964
+
948
965
  # Retrieve chunks
949
966
  try:
950
967
  retrieved = rag_query_function(current_query_for_rag, rag_vectorizer_name, rag_top_k, rag_min_similarity_percent)
@@ -971,6 +988,8 @@ Respond with a JSON object containing ONE of the following structures:
971
988
  hop_details["status"] = "No *new* unique chunks retrieved"
972
989
  rag_hops_details_list.append(hop_details)
973
990
 
991
+ if streaming_callback:
992
+ streaming_callback(f"RAG Hop {hop_count + 1} done", MSG_TYPE.MSG_TYPE_STEP_END, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
974
993
 
975
994
 
976
995
  # 2. Prepare & Summarize Context
@@ -994,7 +1013,7 @@ Respond with a JSON object containing ONE of the following structures:
994
1013
  # If context exceeds our effective limit, summarize it
995
1014
  if self.count_tokens(accumulated_context) > effective_ctx_size:
996
1015
  if streaming_callback:
997
- streaming_callback("Context too large, performing intermediate summary...", MSG_TYPE.MSG_TYPE_STEP, {"type": "intermediate_summary"}, turn_rag_history_for_callback)
1016
+ streaming_callback("Context too large, performing intermediate summary...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "intermediate_summary"}, turn_rag_history_for_callback)
998
1017
  summary_prompt = (
999
1018
  "Summarize the following gathered context into a concise form "
1000
1019
  "that preserves all key facts and sources needed to answer the user's request:\n\n"
@@ -1009,7 +1028,7 @@ Respond with a JSON object containing ONE of the following structures:
1009
1028
  )
1010
1029
  accumulated_context = self.remove_thinking_blocks(summary).strip()
1011
1030
  if streaming_callback:
1012
- streaming_callback("Intermediate summary complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "intermediate_summary"}, turn_rag_history_for_callback)
1031
+ streaming_callback("Intermediate summary complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "intermediate_summary"}, turn_rag_history_for_callback)
1013
1032
 
1014
1033
  # 3. Final Answer Generation
1015
1034
  final_prompt = [
@@ -0,0 +1,241 @@
1
+ # Conceptual: lollms_client/mcp_bindings/remote_mcp/__init__.py
2
+
3
+ import asyncio
4
+ from contextlib import AsyncExitStack
5
+ from typing import Optional, List, Dict, Any, Tuple
6
+ from lollms_client.lollms_mcp_binding import LollmsMCPBinding
7
+ from ascii_colors import ASCIIColors, trace_exception
8
+ import threading
9
+ try:
10
+ from mcp import ClientSession, types
11
+ # Import the specific network client from MCP SDK
12
+ from mcp.client.streamable_http import streamablehttp_client
13
+ # If supporting OAuth, you'd import auth components:
14
+ # from mcp.client.auth import OAuthClientProvider, TokenStorage
15
+ # from mcp.shared.auth import OAuthClientMetadata, OAuthToken
16
+ MCP_LIBRARY_AVAILABLE = True
17
+ except ImportError:
18
+ # ... (error handling as in StandardMCPBinding) ...
19
+ MCP_LIBRARY_AVAILABLE = False
20
+ ClientSession = None # etc.
21
+ streamablehttp_client = None
22
+
23
+
24
+ BindingName = "RemoteMCPBinding"
25
+ # No TOOL_NAME_SEPARATOR needed if connecting to one remote server per instance,
26
+ # or if server aliases are handled differently (e.g. part of URL or config)
27
+ TOOL_NAME_SEPARATOR = "::"
28
+
29
+ class RemoteMCPBinding(LollmsMCPBinding):
30
+ def __init__(self,
31
+ server_url: str, # e.g., "http://localhost:8000/mcp"
32
+ alias: str = "remote_server", # An alias for this connection
33
+ auth_config: Optional[Dict[str, Any]] = None, # For API keys, OAuth, etc.
34
+ **other_config_params: Any):
35
+ super().__init__(binding_name="remote_mcp")
36
+
37
+ if not MCP_LIBRARY_AVAILABLE:
38
+ ASCIIColors.error(f"{self.binding_name}: MCP library not available.")
39
+ return
40
+
41
+ if not server_url:
42
+ ASCIIColors.error(f"{self.binding_name}: server_url is required.")
43
+ # Or raise ValueError
44
+ return
45
+
46
+ self.server_url = server_url
47
+ self.alias = alias # Could be used to prefix tool names if managing multiple remotes
48
+ self.auth_config = auth_config or {}
49
+ self.config = {
50
+ "server_url": server_url,
51
+ "alias": alias,
52
+ "auth_config": self.auth_config
53
+ }
54
+ self.config.update(other_config_params)
55
+
56
+ self._mcp_session: Optional[ClientSession] = None
57
+ self._exit_stack: Optional[AsyncExitStack] = None
58
+ self._discovered_tools_cache: List[Dict[str, Any]] = []
59
+ self._is_initialized = False
60
+ self._loop: Optional[asyncio.AbstractEventLoop] = None
61
+ self._thread: Optional[threading.Thread] = None
62
+
63
+ self._start_event_loop_thread() # Similar to StandardMCPBinding
64
+
65
+ def _start_event_loop_thread(self): # Simplified from StandardMCPBinding
66
+ if self._loop and self._loop.is_running(): return
67
+ self._loop = asyncio.new_event_loop()
68
+ self._thread = threading.Thread(target=self._run_loop_forever, daemon=True)
69
+ self._thread.start()
70
+
71
+ def _run_loop_forever(self):
72
+ if not self._loop: return
73
+ asyncio.set_event_loop(self._loop)
74
+ try: self._loop.run_forever()
75
+ finally:
76
+ # ... (loop cleanup as in StandardMCPBinding) ...
77
+ if not self._loop.is_closed(): self._loop.close()
78
+
79
+ def _run_async(self, coro, timeout=None): # Simplified
80
+ if not self._loop or not self._loop.is_running(): raise RuntimeError("Event loop not running.")
81
+ future = asyncio.run_coroutine_threadsafe(coro, self._loop)
82
+ return future.result(timeout)
83
+
84
+ async def _initialize_connection_async(self) -> bool:
85
+ if self._is_initialized: return True
86
+ ASCIIColors.info(f"{self.binding_name}: Initializing connection to {self.server_url}...")
87
+ try:
88
+ self._exit_stack = AsyncExitStack()
89
+
90
+ # --- Authentication Setup (Conceptual) ---
91
+ # oauth_provider = None
92
+ # if self.auth_config.get("type") == "oauth":
93
+ # # oauth_provider = OAuthClientProvider(...) # Setup based on auth_config
94
+ # pass
95
+ # http_headers = {}
96
+ # if self.auth_config.get("type") == "api_key":
97
+ # key = self.auth_config.get("key")
98
+ # header_name = self.auth_config.get("header_name", "X-API-Key")
99
+ # if key: http_headers[header_name] = key
100
+
101
+ # Use streamablehttp_client from MCP SDK
102
+ # The `auth` parameter of streamablehttp_client takes an OAuthClientProvider
103
+ # For simple API key headers, you might need to use `httpx` directly
104
+ # or see if streamablehttp_client allows passing custom headers.
105
+ # The MCP client example for streamable HTTP doesn't show custom headers directly,
106
+ # it focuses on OAuth.
107
+ # If `streamablehttp_client` takes `**kwargs` that are passed to `httpx.AsyncClient`,
108
+ # then `headers=http_headers` might work.
109
+
110
+ # Assuming streamablehttp_client can take headers if needed, or auth provider
111
+ # For now, let's assume no auth for simplicity or that it's handled by underlying httpx if passed via kwargs
112
+ client_streams = await self._exit_stack.enter_async_context(
113
+ streamablehttp_client(self.server_url) # Add auth=oauth_provider or headers=http_headers if supported
114
+ )
115
+ read_stream, write_stream, _http_client_instance = client_streams # http_client_instance might be useful
116
+
117
+ self._mcp_session = await self._exit_stack.enter_async_context(
118
+ ClientSession(read_stream, write_stream)
119
+ )
120
+ await self._mcp_session.initialize()
121
+ self._is_initialized = True
122
+ ASCIIColors.green(f"{self.binding_name}: Connected to {self.server_url}")
123
+ await self._refresh_tools_cache_async()
124
+ return True
125
+ except Exception as e:
126
+ trace_exception(e)
127
+ ASCIIColors.error(f"{self.binding_name}: Failed to connect to {self.server_url}: {e}")
128
+ if self._exit_stack: await self._exit_stack.aclose() # Cleanup on failure
129
+ self._exit_stack = None
130
+ self._mcp_session = None
131
+ self._is_initialized = False
132
+ return False
133
+
134
+ def _ensure_initialized_sync(self, timeout=30.0):
135
+ if not self._is_initialized:
136
+ success = self._run_async(self._initialize_connection_async(), timeout=timeout)
137
+ if not success: raise ConnectionError(f"Failed to initialize remote MCP connection to {self.server_url}")
138
+ if not self._mcp_session: # Double check
139
+ raise ConnectionError(f"MCP Session not valid after init attempt for {self.server_url}")
140
+
141
+
142
+ async def _refresh_tools_cache_async(self):
143
+ if not self._is_initialized or not self._mcp_session: return
144
+ ASCIIColors.info(f"{self.binding_name}: Refreshing tools from {self.server_url}...")
145
+ try:
146
+ list_tools_result = await self._mcp_session.list_tools()
147
+ current_tools = []
148
+ # ... (tool parsing logic similar to StandardMCPBinding, but no server alias prefix needed if one server per binding instance)
149
+ for tool_obj in list_tools_result.tools:
150
+ # ...
151
+ input_schema_dict = {}
152
+ tool_input_schema = getattr(tool_obj, 'inputSchema', getattr(tool_obj, 'input_schema', None))
153
+ if tool_input_schema:
154
+ if hasattr(tool_input_schema, 'model_dump'):
155
+ input_schema_dict = tool_input_schema.model_dump(mode='json', exclude_none=True)
156
+ elif isinstance(tool_input_schema, dict):
157
+ input_schema_dict = tool_input_schema
158
+
159
+ tool_name_for_client = f"{self.alias}{TOOL_NAME_SEPARATOR}{tool_obj.name}" if TOOL_NAME_SEPARATOR else tool_obj.name
160
+
161
+ current_tools.append({
162
+ "name": tool_name_for_client, # Use self.alias to prefix
163
+ "description": tool_obj.description or "",
164
+ "input_schema": input_schema_dict
165
+ })
166
+ self._discovered_tools_cache = current_tools
167
+ ASCIIColors.green(f"{self.binding_name}: Tools refreshed for {self.server_url}. Found {len(current_tools)} tools.")
168
+ except Exception as e:
169
+ trace_exception(e)
170
+ ASCIIColors.error(f"{self.binding_name}: Error refreshing tools from {self.server_url}: {e}")
171
+
172
+ def discover_tools(self, force_refresh: bool = False, timeout_per_server: float = 10.0, **kwargs) -> List[Dict[str, Any]]:
173
+ # This binding instance connects to ONE server, so timeout_per_server is just 'timeout'
174
+ try:
175
+ self._ensure_initialized_sync(timeout=timeout_per_server)
176
+ if force_refresh or not self._discovered_tools_cache:
177
+ self._run_async(self._refresh_tools_cache_async(), timeout=timeout_per_server)
178
+ return self._discovered_tools_cache
179
+ except Exception as e:
180
+ ASCIIColors.error(f"{self.binding_name}: Problem during tool discovery for {self.server_url}: {e}")
181
+ return []
182
+
183
+ async def _execute_tool_async(self, actual_tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
184
+ if not self._is_initialized or not self._mcp_session:
185
+ return {"error": f"Not connected to {self.server_url}", "status_code": 503}
186
+
187
+ ASCIIColors.info(f"{self.binding_name}: Executing remote tool '{actual_tool_name}' on {self.server_url} with params: {json.dumps(params)}")
188
+ try:
189
+ mcp_call_result = await self._mcp_session.call_tool(name=actual_tool_name, arguments=params)
190
+ # ... (result parsing as in StandardMCPBinding) ...
191
+ output_parts = [p.text for p in mcp_call_result.content if isinstance(p, types.TextContent) and p.text is not None] if mcp_call_result.content else []
192
+ if not output_parts: return {"output": {"message": "Tool executed but returned no textual content."}, "status_code": 200}
193
+ combined_output_str = "\n".join(output_parts)
194
+ try: return {"output": json.loads(combined_output_str), "status_code": 200}
195
+ except json.JSONDecodeError: return {"output": combined_output_str, "status_code": 200}
196
+ except Exception as e:
197
+ trace_exception(e)
198
+ return {"error": f"Error executing remote tool '{actual_tool_name}': {str(e)}", "status_code": 500}
199
+
200
+
201
+ def execute_tool(self, tool_name_with_alias: str, params: Dict[str, Any], **kwargs) -> Dict[str, Any]:
202
+ timeout = float(kwargs.get('timeout', 60.0))
203
+
204
+ # If using alias prefixing (self.alias + TOOL_NAME_SEPARATOR + actual_name)
205
+ expected_prefix = f"{self.alias}{TOOL_NAME_SEPARATOR}"
206
+ if TOOL_NAME_SEPARATOR and tool_name_with_alias.startswith(expected_prefix):
207
+ actual_tool_name = tool_name_with_alias[len(expected_prefix):]
208
+ elif not TOOL_NAME_SEPARATOR and tool_name_with_alias: # No prefixing, tool_name is actual_tool_name
209
+ actual_tool_name = tool_name_with_alias
210
+ else:
211
+ return {"error": f"Tool name '{tool_name_with_alias}' does not match expected alias '{self.alias}'.", "status_code": 400}
212
+
213
+ try:
214
+ self._ensure_initialized_sync(timeout=min(timeout, 30.0))
215
+ return self._run_async(self._execute_tool_async(actual_tool_name, params), timeout=timeout)
216
+ # ... (error handling as in StandardMCPBinding) ...
217
+ except ConnectionError as e: return {"error": f"{self.binding_name}: Connection issue for '{self.server_url}': {e}", "status_code": 503}
218
+ except TimeoutError: return {"error": f"{self.binding_name}: Remote tool '{actual_tool_name}' on '{self.server_url}' timed out.", "status_code": 504}
219
+ except Exception as e:
220
+ trace_exception(e)
221
+ return {"error": f"{self.binding_name}: Failed to run remote MCP tool '{actual_tool_name}': {e}", "status_code": 500}
222
+
223
+ def close(self):
224
+ ASCIIColors.info(f"{self.binding_name}: Closing connection to {self.server_url}...")
225
+ if self._exit_stack:
226
+ try:
227
+ # The anyio task error might also occur here if not careful
228
+ self._run_async(self._exit_stack.aclose(), timeout=10.0)
229
+ except Exception as e:
230
+ ASCIIColors.error(f"{self.binding_name}: Error during async close for {self.server_url}: {e}")
231
+ self._exit_stack = None
232
+ self._mcp_session = None
233
+ self._is_initialized = False
234
+
235
+ # Stop event loop thread
236
+ if self._loop and self._loop.is_running(): self._loop.call_soon_threadsafe(self._loop.stop)
237
+ if self._thread and self._thread.is_alive(): self._thread.join(timeout=5.0)
238
+ ASCIIColors.info(f"{self.binding_name}: Remote connection binding closed.")
239
+
240
+ def get_binding_config(self) -> Dict[str, Any]: # LollmsMCPBinding might expect this
241
+ return self.config
@@ -285,15 +285,22 @@ class StandardMCPBinding(LollmsMCPBinding):
285
285
  ASCIIColors.info(f"{self.binding_name}: Connection for '{alias}' not initialized. Attempting initialization...")
286
286
  try:
287
287
  success = self._run_async_task(self._initialize_connection_async(alias), timeout=timeout)
288
- if not success:
289
- raise ConnectionError(f"Failed to initialize MCP connection for server '{alias}'. Check previous logs for details.")
288
+ if not success:
289
+ # If init itself reports failure (e.g. returns False from _initialize_connection_async)
290
+ self._discovered_tools_cache[alias] = [] # CLEAR CACHE ON FAILURE
291
+ raise ConnectionError(f"MCP init for '{alias}' reported failure.")
290
292
  except TimeoutError:
291
- raise ConnectionError(f"MCP initialization for server '{alias}' timed out after {timeout} seconds.")
292
- except Exception as e:
293
- raise ConnectionError(f"MCP initialization for server '{alias}' failed: {str(e)}")
294
-
293
+ self._discovered_tools_cache[alias] = [] # CLEAR CACHE ON FAILURE
294
+ raise ConnectionError(f"MCP init for '{alias}' timed out.")
295
+ except Exception as e: # Other exceptions during run_async_task
296
+ self._discovered_tools_cache[alias] = [] # CLEAR CACHE ON FAILURE
297
+ raise ConnectionError(f"MCP init for '{alias}' failed: {e}")
298
+
295
299
  if not self._initialization_status.get(alias) or alias not in self._mcp_sessions:
296
- raise ConnectionError(f"MCP Session for server '{alias}' is not valid after initialization attempt.")
300
+ # This means init was thought to be successful by the lock block, but status is bad
301
+ # This case might indicate a race or an issue if _initialize_connection_async doesn't set status correctly on all paths
302
+ self._discovered_tools_cache[alias] = [] # Also clear here as a safeguard
303
+ raise ConnectionError(f"MCP Session for '{alias}' not valid post-init attempt, despite no immediate error.")
297
304
 
298
305
  async def _refresh_tools_cache_async(self, alias: str):
299
306
  if not MCP_LIBRARY_AVAILABLE or not types:
@@ -368,8 +375,17 @@ class StandardMCPBinding(LollmsMCPBinding):
368
375
 
369
376
  for alias in active_aliases:
370
377
  try:
378
+ if force_refresh: # Explicitly clear before ensuring init if forcing
379
+ ASCIIColors.yellow(f"{self.binding_name}: Force refresh - clearing cache for '{alias}' before init.")
380
+ self._discovered_tools_cache[alias] = []
381
+
371
382
  self._ensure_server_initialized_sync(alias, timeout=tps)
372
383
 
384
+ # If force_refresh OR if server is initialized but cache is empty/stale
385
+ if force_refresh or (self._initialization_status.get(alias) and not self._discovered_tools_cache.get(alias)):
386
+ ASCIIColors.info(f"{self.binding_name}: Refreshing tools for '{alias}' (force_refresh={force_refresh}, cache_empty={not self._discovered_tools_cache.get(alias)}).")
387
+ self._run_async_task(self._refresh_tools_cache_async(alias), timeout=tps)
388
+
373
389
  if fr or (self._initialization_status.get(alias) and not self._discovered_tools_cache.get(alias)):
374
390
  ASCIIColors.info(f"{self.binding_name}: Force refreshing tools for '{alias}' or cache is empty.")
375
391
  self._run_async_task(self._refresh_tools_cache_async(alias), timeout=tps)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.20.0
3
+ Version: 0.20.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -7,6 +7,7 @@ examples/generate_a_benchmark_for_safe_store.py
7
7
  examples/generate_text_with_multihop_rag_example.py
8
8
  examples/internet_search_with_rag.py
9
9
  examples/local_mcp.py
10
+ examples/openai_mcp.py
10
11
  examples/run_standard_mcp_example.py
11
12
  examples/simple_text_gen_test.py
12
13
  examples/simple_text_gen_with_image_test.py
@@ -60,6 +61,7 @@ lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py
60
61
  lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py
61
62
  lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py
62
63
  lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py
64
+ lollms_client/mcp_bindings/remote_mcp/__init__.py
63
65
  lollms_client/mcp_bindings/standard_mcp/__init__.py
64
66
  lollms_client/stt_bindings/__init__.py
65
67
  lollms_client/stt_bindings/lollms/__init__.py
@@ -1,4 +1,5 @@
1
1
  ai_documentation
2
2
  dist
3
+ docs
3
4
  examples
4
5
  lollms_client
File without changes
File without changes
File without changes