lollms-client 0.33.0__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (74) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
  3. lollms_client/llm_bindings/claude/__init__.py +4 -7
  4. lollms_client/llm_bindings/gemini/__init__.py +3 -7
  5. lollms_client/llm_bindings/grok/__init__.py +3 -7
  6. lollms_client/llm_bindings/groq/__init__.py +4 -6
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
  8. lollms_client/llm_bindings/litellm/__init__.py +15 -6
  9. lollms_client/llm_bindings/llamacpp/__init__.py +27 -9
  10. lollms_client/llm_bindings/lollms/__init__.py +24 -14
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
  12. lollms_client/llm_bindings/mistral/__init__.py +3 -5
  13. lollms_client/llm_bindings/ollama/__init__.py +6 -11
  14. lollms_client/llm_bindings/open_router/__init__.py +4 -6
  15. lollms_client/llm_bindings/openai/__init__.py +7 -14
  16. lollms_client/llm_bindings/openllm/__init__.py +12 -12
  17. lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
  18. lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
  19. lollms_client/llm_bindings/transformers/__init__.py +14 -6
  20. lollms_client/llm_bindings/vllm/__init__.py +16 -12
  21. lollms_client/lollms_core.py +303 -490
  22. lollms_client/lollms_discussion.py +431 -78
  23. lollms_client/lollms_llm_binding.py +192 -381
  24. lollms_client/lollms_mcp_binding.py +33 -2
  25. lollms_client/lollms_tti_binding.py +107 -2
  26. lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
  27. lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
  28. lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
  29. lollms_client/stt_bindings/lollms/__init__.py +6 -8
  30. lollms_client/stt_bindings/whisper/__init__.py +2 -4
  31. lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
  32. lollms_client/tti_bindings/dalle/__init__.py +50 -29
  33. lollms_client/tti_bindings/diffusers/__init__.py +227 -439
  34. lollms_client/tti_bindings/gemini/__init__.py +320 -0
  35. lollms_client/tti_bindings/lollms/__init__.py +8 -9
  36. lollms_client-1.1.0.dist-info/METADATA +1214 -0
  37. lollms_client-1.1.0.dist-info/RECORD +69 -0
  38. {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/top_level.txt +0 -2
  39. examples/article_summary/article_summary.py +0 -58
  40. examples/console_discussion/console_app.py +0 -266
  41. examples/console_discussion.py +0 -448
  42. examples/deep_analyze/deep_analyse.py +0 -30
  43. examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
  44. examples/function_calling_with_local_custom_mcp.py +0 -250
  45. examples/generate_a_benchmark_for_safe_store.py +0 -89
  46. examples/generate_and_speak/generate_and_speak.py +0 -251
  47. examples/generate_game_sfx/generate_game_fx.py +0 -240
  48. examples/generate_text_with_multihop_rag_example.py +0 -210
  49. examples/gradio_chat_app.py +0 -228
  50. examples/gradio_lollms_chat.py +0 -259
  51. examples/internet_search_with_rag.py +0 -226
  52. examples/lollms_chat/calculator.py +0 -59
  53. examples/lollms_chat/derivative.py +0 -48
  54. examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
  55. examples/lollms_discussions_test.py +0 -155
  56. examples/mcp_examples/external_mcp.py +0 -267
  57. examples/mcp_examples/local_mcp.py +0 -171
  58. examples/mcp_examples/openai_mcp.py +0 -203
  59. examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
  60. examples/mcp_examples/run_standard_mcp_example.py +0 -204
  61. examples/simple_text_gen_test.py +0 -173
  62. examples/simple_text_gen_with_image_test.py +0 -178
  63. examples/test_local_models/local_chat.py +0 -9
  64. examples/text_2_audio.py +0 -77
  65. examples/text_2_image.py +0 -144
  66. examples/text_2_image_diffusers.py +0 -274
  67. examples/text_and_image_2_audio.py +0 -59
  68. examples/text_gen.py +0 -30
  69. examples/text_gen_system_prompt.py +0 -29
  70. lollms_client-0.33.0.dist-info/METADATA +0 -854
  71. lollms_client-0.33.0.dist-info/RECORD +0 -101
  72. test/test_lollms_discussion.py +0 -368
  73. {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/WHEEL +0 -0
  74. {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,250 +0,0 @@
1
- from lollms_client import LollmsClient, MSG_TYPE
2
- from ascii_colors import ASCIIColors, trace_exception
3
- from pathlib import Path
4
- import json # For pretty printing results
5
-
6
- # --- Configuration ---
7
- # LLM Configuration
8
- LLM_BINDING_NAME = "ollama"
9
- OLLAMA_HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
10
- # Ensure you have a model capable of following instructions and generating JSON.
11
- # Mistral, Llama 3, or Phi-3 variants often work well.
12
- OLLAMA_MODEL_NAME = "mistral-nemo:latest" # Or "llama3:latest", "phi3:latest" - ensure it's pulled
13
-
14
- # Local MCP Binding Configuration
15
- # This path should point to the directory containing your tool subdirectories
16
- # (e.g., 'get_weather/', 'sum_numbers/')
17
- # For this example, we assume 'temp_mcp_tools_for_test' is in the parent directory
18
- # of this examples folder.
19
- TOOLS_FOLDER = Path(__file__).parent.parent / "temp_mcp_tools_for_test"
20
-
21
- # Function Calling Parameters
22
- MAX_LLM_ITERATIONS_FOR_TOOL_CALLS = 3 # How many times LLM can decide to call a tool in a sequence
23
- MAX_TOOL_CALLS_PER_TURN = 2 # Max distinct tools executed per user prompt
24
-
25
- # --- Helper to Create Dummy Tools (if they don't exist) ---
26
- def ensure_dummy_tools_exist(base_tools_dir: Path):
27
- if not base_tools_dir.exists():
28
- ASCIIColors.info(f"Creating dummy tools directory: {base_tools_dir}")
29
- base_tools_dir.mkdir(parents=True, exist_ok=True)
30
-
31
- tool_defs = {
32
- "get_weather": {
33
- "mcp": {
34
- "name": "get_weather",
35
- "description": "Fetches the current weather for a given city.",
36
- "input_schema": {
37
- "type": "object",
38
- "properties": {
39
- "city": {"type": "string", "description": "The city name."},
40
- "unit": {"type": "string", "enum": ["celsius", "fahrenheit"], "default": "celsius"}
41
- },
42
- "required": ["city"]
43
- },
44
- "output_schema": {
45
- "type": "object",
46
- "properties": {
47
- "temperature": {"type": "number"}, "condition": {"type": "string"}, "unit": {"type": "string"}
48
- }
49
- }
50
- },
51
- "py": """
52
- import random
53
- def execute(params: dict) -> dict:
54
- city = params.get("city")
55
- unit = params.get("unit", "celsius")
56
- if not city: return {"error": "City not provided"}
57
- conditions = ["sunny", "cloudy", "rainy", "snowy", "windy"]
58
- temp = random.randint(-5 if unit == "celsius" else 23, 30 if unit == "celsius" else 86)
59
- return {"temperature": temp, "condition": random.choice(conditions), "unit": unit}
60
- """
61
- },
62
- "sum_numbers": {
63
- "mcp": {
64
- "name": "sum_numbers",
65
- "description": "Calculates the sum of a list of numbers.",
66
- "input_schema": {
67
- "type": "object",
68
- "properties": {"numbers": {"type": "array", "items": {"type": "number"}}},
69
- "required": ["numbers"]
70
- },
71
- "output_schema": {"type": "object", "properties": {"sum": {"type": "number"}}}
72
- },
73
- "py": """
74
- def execute(params: dict) -> dict:
75
- numbers = params.get("numbers", [])
76
- if not isinstance(numbers, list) or not all(isinstance(n, (int, float)) for n in numbers):
77
- return {"error": "Invalid input: 'numbers' must be a list of numbers."}
78
- return {"sum": sum(numbers)}
79
- """
80
- }
81
- }
82
-
83
- for tool_name, files_content in tool_defs.items():
84
- tool_dir = base_tools_dir / tool_name
85
- tool_dir.mkdir(exist_ok=True)
86
-
87
- mcp_file = tool_dir / f"{tool_name}.mcp.json"
88
- py_file = tool_dir / f"{tool_name}.py"
89
-
90
- if not mcp_file.exists():
91
- with open(mcp_file, "w") as f:
92
- json.dump(files_content["mcp"], f, indent=2)
93
- ASCIIColors.info(f"Created MCP definition for {tool_name}")
94
-
95
- if not py_file.exists():
96
- with open(py_file, "w") as f:
97
- f.write(files_content["py"])
98
- ASCIIColors.info(f"Created Python code for {tool_name}")
99
-
100
- # --- Callback for streaming ---
101
- def function_calling_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
102
- """
103
- Callback to handle streamed output during function calling.
104
- """
105
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # LLM generating text
106
- ASCIIColors.success(chunk, end="", flush=True)
107
- elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
108
- step_type = metadata.get("type", "step") if metadata else "step"
109
- step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
110
- ASCIIColors.info(f">> Starting {step_type}: {step_info}", flush=True)
111
- elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
112
- step_type = metadata.get("type", "step") if metadata else "step"
113
- step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
114
- if metadata and "result" in metadata:
115
- ASCIIColors.success(f"<< Finished {step_type}: {step_info} -> Result: {json.dumps(metadata['result'])}", flush=True)
116
- else:
117
- ASCIIColors.success(f"<< Finished {step_type}: {step_info}", flush=True)
118
- elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
119
- if metadata and metadata.get("type") == "tool_call_request":
120
- ASCIIColors.info(f"AI requests to call tool: {metadata.get('name')} with params: {metadata.get('params')}", flush=True)
121
- else:
122
- ASCIIColors.info(f"INFO: {chunk}", flush=True)
123
- elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
124
- ASCIIColors.error(f"ERROR in stream: {chunk}", flush=True)
125
-
126
- # Optional debug info:
127
- # ASCIIColors.info(f"DEBUG Turn History (so far): {turn_history}")
128
- return True
129
-
130
-
131
-
132
- def run_function_calling_example():
133
- ASCIIColors.red("--- LoLLMs Client with Local MCP Function Calling Example ---")
134
-
135
- ensure_dummy_tools_exist(TOOLS_FOLDER) # Make sure our example tools are present
136
-
137
- try:
138
- ASCIIColors.magenta("\n1. Initializing LollmsClient...")
139
- # MCP binding config is passed directly to the binding constructor
140
- mcp_binding_configuration = {"tools_folder_path": str(TOOLS_FOLDER)}
141
-
142
- lc = LollmsClient(
143
- binding_name=LLM_BINDING_NAME,
144
- host_address=OLLAMA_HOST_ADDRESS,
145
- model_name=OLLAMA_MODEL_NAME,
146
- mcp_binding_name="local_mcp", # Activate the LocalMCP binding
147
- mcp_binding_config=mcp_binding_configuration, # Pass its specific config
148
- # Optional: Configure default LLM generation params if needed
149
- temperature=0.2, # Lower temp for more focused tool decisions / final answer
150
- n_predict=1024
151
- )
152
- ASCIIColors.green("LollmsClient initialized successfully.")
153
- if lc.mcp:
154
- ASCIIColors.info(f"MCP Binding '{lc.mcp.binding_name}' loaded.")
155
- discovered_tools_on_init = lc.mcp.discover_tools()
156
- ASCIIColors.info(f"Tools discovered by MCP binding on init: {[t['name'] for t in discovered_tools_on_init]}")
157
- else:
158
- ASCIIColors.error("MCP binding was not loaded correctly. Aborting.")
159
- return
160
-
161
- # --- Example Interaction 1: Weather Request ---
162
- ASCIIColors.magenta("\n2. Example 1: Asking for weather")
163
- user_prompt_weather = "What's the weather like in Paris today, and can you tell me in Fahrenheit?"
164
- ASCIIColors.blue(f"User: {user_prompt_weather}")
165
- ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
166
-
167
- weather_result = lc.generate_with_mcp(
168
- prompt=user_prompt_weather,
169
- # tools=None, # Let it discover from the binding
170
- max_tool_calls=MAX_TOOL_CALLS_PER_TURN,
171
- max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS,
172
- streaming_callback=function_calling_stream_callback,
173
- # interactive_tool_execution=True # Uncomment to confirm tool calls
174
- )
175
- print("\n--- End of AI Response for Weather ---")
176
- if weather_result["error"]:
177
- ASCIIColors.error(f"Error in weather example: {weather_result['error']}")
178
- else:
179
- ASCIIColors.cyan(f"\nFinal Answer (Weather): {weather_result['final_answer']}")
180
- ASCIIColors.info("\nTool Calls Made (Weather Example):")
181
- for tc in weather_result["tool_calls"]:
182
- print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
183
-
184
-
185
- # --- Example Interaction 2: Summation Request ---
186
- ASCIIColors.magenta("\n3. Example 2: Asking to sum numbers")
187
- user_prompt_sum = "Hey, can you please calculate the sum of 15.5, 25, and -5.5 for me?"
188
- ASCIIColors.blue(f"User: {user_prompt_sum}")
189
- ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
190
-
191
- sum_result_data = lc.generate_with_mcp(
192
- prompt=user_prompt_sum,
193
- max_tool_calls=MAX_TOOL_CALLS_PER_TURN,
194
- max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS,
195
- streaming_callback=function_calling_stream_callback
196
- )
197
- print("\n--- End of AI Response for Sum ---")
198
- if sum_result_data["error"]:
199
- ASCIIColors.error(f"Error in sum example: {sum_result_data['error']}")
200
- else:
201
- ASCIIColors.cyan(f"\nFinal Answer (Sum): {sum_result_data['final_answer']}")
202
- ASCIIColors.info("\nTool Calls Made (Sum Example):")
203
- for tc in sum_result_data["tool_calls"]:
204
- print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
205
-
206
-
207
- # --- Example Interaction 3: Multi-step (hypothetical, weather then sum) ---
208
- ASCIIColors.magenta("\n4. Example 3: Multi-step (Weather, then maybe sum if AI decides)")
209
- user_prompt_multi = "What's the weather in Berlin? And also, what's 100 + 200 + 300?"
210
- ASCIIColors.blue(f"User: {user_prompt_sum}")
211
- ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
212
-
213
- multi_result_data = lc.generate_with_mcp(
214
- prompt=user_prompt_multi,
215
- max_tool_calls=MAX_TOOL_CALLS_PER_TURN, # Allow up to 2 different tools
216
- max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS +1, # Allow a bit more LLM thinking
217
- streaming_callback=function_calling_stream_callback
218
- )
219
- print("\n--- End of AI Response for Multi-step ---")
220
- if multi_result_data["error"]:
221
- ASCIIColors.error(f"Error in multi-step example: {multi_result_data['error']}")
222
- else:
223
- ASCIIColors.cyan(f"\nFinal Answer (Multi-step): {multi_result_data['final_answer']}")
224
- ASCIIColors.info("\nTool Calls Made (Multi-step Example):")
225
- for tc in multi_result_data["tool_calls"]:
226
- print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
227
-
228
-
229
- except ValueError as ve: # Catch init errors
230
- ASCIIColors.error(f"Initialization Error: {ve}")
231
- trace_exception(ve)
232
- except ConnectionRefusedError:
233
- ASCIIColors.error(f"Connection refused. Is the Ollama server running at {OLLAMA_HOST_ADDRESS}?")
234
- except Exception as e:
235
- ASCIIColors.error(f"An unexpected error occurred: {e}")
236
- trace_exception(e)
237
- finally:
238
- ASCIIColors.info(f"If dummy tools were created, they are in: {TOOLS_FOLDER.resolve()}")
239
- # Consider cleaning up TOOLS_FOLDER if it was created purely for this test run
240
- # For this example, we'll leave them.
241
- # import shutil
242
- # if "ensure_dummy_tools_exist" in globals() and TOOLS_FOLDER.exists() and "temp_mcp_tools_for_test" in str(TOOLS_FOLDER):
243
- # if input(f"Clean up dummy tools at {TOOLS_FOLDER}? (y/n): ").lower() == 'y':
244
- # shutil.rmtree(TOOLS_FOLDER)
245
- # ASCIIColors.info("Cleaned up dummy tools folder.")
246
-
247
- ASCIIColors.red("\n--- Example Finished ---")
248
-
249
- if __name__ == "__main__":
250
- run_function_calling_example()
@@ -1,89 +0,0 @@
1
- from lollms_client import LollmsClient
2
- from ascii_colors import ASCIIColors, trace_exception, ProgressBar
3
- import pipmaster as pm
4
- pm.ensure_packages(["datasets"])
5
- #assuming you have an active lollms_webui instance running
6
- #you can also use ollama or openai or any other lollmc_client binding
7
- lc = LollmsClient()
8
-
9
- from datasets import load_dataset
10
- import json
11
- # 1. Define the dataset name
12
- dataset_name = "agentlans/high-quality-english-sentences"
13
-
14
- try:
15
- # 2. Load the dataset
16
- # This dataset only has a 'train' split by default.
17
- # If a dataset had multiple splits (e.g., 'train', 'validation', 'test'),
18
- # load_dataset() would return a DatasetDict.
19
- # We can directly access the 'train' split.
20
- dataset = load_dataset(dataset_name, split='train')
21
- print(f"Dataset loaded successfully: {dataset_name}")
22
- print(f"Dataset structure: {dataset}")
23
-
24
- # 3. Extract the sentences into a list
25
- # The sentences are in a column likely named 'text' (common for text datasets).
26
- # Let's inspect the features to be sure.
27
- print(f"Dataset features: {dataset.features}")
28
-
29
- # Assuming the column containing sentences is 'text'
30
- # This is standard for many text datasets on Hugging Face.
31
- # dataset['text'] directly gives a list of all values in the 'text' column.
32
- sentences_list = dataset['text']
33
-
34
- # If you want to be absolutely sure it's a Python list (it usually is or acts like one):
35
- # sentences_list = list(dataset['text'])
36
-
37
- # 4. Verify and print some examples
38
- print(f"\nSuccessfully extracted {len(sentences_list)} sentences into a list.")
39
-
40
- if sentences_list:
41
- print("\nFirst 5 sentences:")
42
- for i in range(min(5, len(sentences_list))):
43
- print(f"{i+1}. {sentences_list[i]}")
44
-
45
- print("\nLast 5 sentences:")
46
- for i in range(max(0, len(sentences_list) - 5), len(sentences_list)):
47
- print(f"{len(sentences_list) - (len(sentences_list) - 1 - i)}. {sentences_list[i]}")
48
- else:
49
- print("The list of sentences is empty.")
50
-
51
- except Exception as e:
52
- print(f"An error occurred: {e}")
53
- print("Please ensure you have an active internet connection and the `datasets` library is installed.")
54
- print("Dataset name might be incorrect or the dataset might require authentication or specific configurations.")
55
-
56
- entries = []
57
- for sentence in ProgressBar(sentences_list, desc="Processing Items"):
58
- prompt = f"""Given the following text chunk:
59
- "{sentence}"
60
-
61
- Generate a JSON object with the following keys and corresponding string values:
62
- - "id": A title to the sentence being processed
63
- - "highly_similar": A paraphrase of the original chunk, maintaining the core meaning but using different wording and sentence structure.
64
- - "related": A sentence or short paragraph that is on the same general topic as the original chunk but discusses a different aspect or a related concept. It should not be a direct paraphrase.
65
- - "dissimilar": A sentence or short paragraph on a completely unrelated topic.
66
- - "question_form": A question that encapsulates the main idea or asks about a key aspect of the original chunk.
67
- - "negation": A sentence that negates the main assertion or a key aspect of the original chunk, while still being topically relevant if possible (e.g., not "The sky is not blue" if the topic is computers).
68
-
69
- Ensure the output is ONLY a valid JSON object. Example:
70
- {{
71
- "id": "...",
72
- "highly_similar": "...",
73
- "related": "...",
74
- "dissimilar": "...",
75
- "question_form": "...",
76
- "negation": "..."
77
- }}
78
-
79
- JSON object:
80
- """
81
- try:
82
- output = lc.generate_code(prompt)
83
- entry = json.loads(output)
84
- entry["query"]=sentence
85
- entries.append(entry)
86
- with open("benchmark_db.json","w") as f:
87
- json.dump(entries, f, indent=4)
88
- except Exception as ex:
89
- trace_exception(ex)
@@ -1,251 +0,0 @@
1
- # lollms_client/examples/text_and_speech_demo/generate_and_speak.py
2
- from pathlib import Path
3
- import time
4
- import argparse
5
-
6
- # Ensure pygame is installed for this example
7
- try:
8
- import pipmaster as pm
9
- pm.ensure_packages(["pygame"])
10
- import pygame
11
- PYGAME_AVAILABLE = True
12
- except ImportError:
13
- print("Pygame not found or pipmaster failed. Please install it manually: pip install pygame")
14
- PYGAME_AVAILABLE = False
15
- except Exception as e:
16
- print(f"Could not ensure pygame: {e}")
17
- PYGAME_AVAILABLE = False
18
-
19
- from lollms_client import LollmsClient, MSG_TYPE
20
- from ascii_colors import ASCIIColors, trace_exception
21
-
22
- # --- Configuration ---
23
- SPEECH_OUTPUT_DIR = Path(__file__).parent / "speech_output"
24
- SPEECH_OUTPUT_DIR.mkdir(exist_ok=True)
25
-
26
- # Default path for Piper voices relative to this example script for convenience
27
- DEFAULT_PIPER_VOICES_SUBDIR = Path(__file__).parent / "piper_voices_for_demo"
28
- DEFAULT_PIPER_VOICE_FILENAME = "en_US-lessac-medium.onnx" # A common, good quality English voice
29
-
30
- def text_stream_callback(chunk: str, message_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
31
- if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
32
- print(chunk, end="", flush=True)
33
- elif message_type == MSG_TYPE.MSG_TYPE_STEP_START:
34
- ASCIIColors.yellow(f"\n>> Starting step: {chunk}")
35
- elif message_type == MSG_TYPE.MSG_TYPE_STEP_END:
36
- ASCIIColors.green(f"\n<< Finished step: {chunk}")
37
- return True
38
-
39
- def ensure_default_piper_voice_for_demo(voices_dir: Path, voice_filename: str):
40
- """Helper to download a default Piper voice if not present for the demo."""
41
- voices_dir.mkdir(exist_ok=True)
42
- onnx_path = voices_dir / voice_filename
43
- json_path = voices_dir / f"{voice_filename}.json"
44
-
45
- if not onnx_path.exists() or not json_path.exists():
46
- ASCIIColors.info(f"Default Piper test voice '{voice_filename}' not found in {voices_dir}. Attempting to download...")
47
- try:
48
- import requests
49
- # Construct URLs (assuming en_US/lessac/medium structure)
50
- voice_parts = voice_filename.split('-') # e.g., ['en_US', 'lessac', 'medium.onnx']
51
- lang_code = voice_parts[0].split('_')[0] # en
52
- voice_name_path = "/".join(voice_parts[0:2]) # en_US/lessac
53
- quality_path = voice_parts[2].split('.')[0] # medium
54
-
55
- # Base URL for Piper voices on Hugging Face
56
- PIPER_VOICES_HF_BASE_URL = "https://huggingface.co/rhasspy/piper-voices/resolve/main/"
57
-
58
- onnx_url = f"{PIPER_VOICES_HF_BASE_URL}{lang_code}/{voice_name_path}/{quality_path}/{voice_filename}"
59
- json_url = f"{PIPER_VOICES_HF_BASE_URL}{lang_code}/{voice_name_path}/{quality_path}/{voice_filename}.json"
60
-
61
-
62
- if not onnx_path.exists():
63
- ASCIIColors.info(f"Downloading {onnx_url} to {onnx_path}")
64
- r_onnx = requests.get(onnx_url, stream=True)
65
- r_onnx.raise_for_status()
66
- with open(onnx_path, 'wb') as f:
67
- for chunk in r_onnx.iter_content(chunk_size=8192): f.write(chunk)
68
-
69
- if not json_path.exists():
70
- ASCIIColors.info(f"Downloading {json_url} to {json_path}")
71
- r_json = requests.get(json_url)
72
- r_json.raise_for_status()
73
- with open(json_path, 'w', encoding='utf-8') as f: f.write(r_json.text)
74
- ASCIIColors.green(f"Default Piper test voice '{voice_filename}' downloaded successfully to {voices_dir}.")
75
- return True
76
- except Exception as e_download:
77
- ASCIIColors.error(f"Failed to download default Piper test voice '{voice_filename}': {e_download}")
78
- ASCIIColors.warning(f"Please manually download '{voice_filename}' and '{voice_filename}.json' "
79
- f"from rhasspy.github.io/piper-voices/ or Hugging Face "
80
- f"and place them in {voices_dir.resolve()}")
81
- return False
82
- return True
83
-
84
-
85
- def main():
86
- parser = argparse.ArgumentParser(description="Generate text with an LLM and synthesize it to speech using LOLLMS.")
87
- # LLM Arguments
88
- parser.add_argument(
89
- "--llm_binding", type=str, default="ollama", choices=["ollama", "openai", "lollms", "llamacpp", "pythonllamacpp", "transformers", "vllm"],
90
- help="The LLM binding to use for text generation."
91
- )
92
- parser.add_argument(
93
- "--llm_model", type=str, default="mistral",
94
- help="Model name or path for the LLM binding."
95
- )
96
- parser.add_argument("--llm_host", type=str, default=None, help="Host address for server-based LLM bindings.")
97
- parser.add_argument("--models_path", type=str, default=None, help="Path to models directory for local LLM bindings.")
98
- parser.add_argument("--openai_key", type=str, default=None, help="OpenAI API key.")
99
-
100
- # TTS Arguments
101
- parser.add_argument(
102
- "--tts_binding", type=str, default="bark", choices=["bark", "lollms", "xtts", "piper"],
103
- help="The TTS binding to use for speech synthesis."
104
- )
105
- # Bark specific
106
- parser.add_argument("--bark_model", type=str, default="suno/bark-small", help="Bark model ID for TTS.")
107
- parser.add_argument("--bark_voice_preset", type=str, default="v2/en_speaker_6", help="Bark voice preset.")
108
- # XTTS specific
109
- parser.add_argument("--xtts_model", type=str, default="tts_models/multilingual/multi-dataset/xtts_v2", help="XTTS model identifier for Coqui TTS.")
110
- parser.add_argument("--xtts_speaker_wav", type=str, default=None, help="Path to speaker WAV for XTTS voice cloning.")
111
- parser.add_argument("--xtts_language", type=str, default="en", help="Language for XTTS.")
112
- # Piper specific
113
- parser.add_argument("--piper_default_voice_model_path", type=str, default=None, help="Path to the default .onnx Piper voice model.")
114
- parser.add_argument("--piper_voices_dir", type=str, default=str(DEFAULT_PIPER_VOICES_SUBDIR), help="Directory containing Piper voice models.")
115
- parser.add_argument("--piper_voice_file", type=str, default=DEFAULT_PIPER_VOICE_FILENAME, help="Filename of the Piper voice to use from piper_voices_dir (e.g., en_US-ryan-medium.onnx).")
116
-
117
- # Common TTS/LLM args
118
- parser.add_argument("--tts_host", type=str, default=None, help="Host address for server-based TTS bindings (e.g., lollms TTS).")
119
- parser.add_argument("--device", type=str, default=None, choices=["cpu", "cuda", "mps", None], help="Device for local TTS/LLM models.")
120
- args = parser.parse_args()
121
-
122
- ASCIIColors.red("--- LOLLMS Text Generation and Speech Synthesis Demo ---")
123
- ASCIIColors.info(f"Using LLM Binding: {args.llm_binding} (Model: {args.llm_model})")
124
- ASCIIColors.info(f"Using TTS Binding: {args.tts_binding}")
125
- if args.tts_binding == "bark":
126
- ASCIIColors.info(f" Bark Model: {args.bark_model}, Voice Preset: {args.bark_voice_preset}")
127
- elif args.tts_binding == "xtts":
128
- ASCIIColors.info(f" XTTS Model: {args.xtts_model}, Speaker WAV: {args.xtts_speaker_wav or 'Default in binding'}, Lang: {args.xtts_language}")
129
- elif args.tts_binding == "piper":
130
- ASCIIColors.info(f" Piper Voices Dir: {args.piper_voices_dir}, Voice File: {args.piper_voice_file}")
131
- # Ensure default Piper voice for demo if Piper is selected and no specific default path is given
132
- if not args.piper_default_voice_model_path:
133
- ensure_default_piper_voice_for_demo(Path(args.piper_voices_dir), args.piper_voice_file)
134
- args.piper_default_voice_model_path = str(Path(args.piper_voices_dir) / args.piper_voice_file)
135
-
136
-
137
- llm_binding_config = {}
138
- if args.llm_binding == "openai" and args.openai_key: llm_binding_config["service_key"] = args.openai_key
139
- elif args.llm_binding in ["llamacpp", "pythonllamacpp", "transformers", "vllm"]:
140
- if args.device: llm_binding_config["device"] = args.device
141
- if args.llm_binding == "pythonllamacpp": llm_binding_config["n_gpu_layers"] = -1 if args.device == "cuda" else 0
142
-
143
- tts_binding_config = {"device": args.device}
144
- if args.tts_binding == "bark":
145
- tts_binding_config["model_name"] = args.bark_model
146
- tts_binding_config["default_voice"] = args.bark_voice_preset
147
- elif args.tts_binding == "xtts":
148
- tts_binding_config["model_name"] = args.xtts_model
149
- tts_binding_config["default_speaker_wav"] = args.xtts_speaker_wav
150
- tts_binding_config["default_language"] = args.xtts_language
151
- elif args.tts_binding == "piper":
152
- tts_binding_config["default_voice_model_path"] = args.piper_default_voice_model_path
153
- tts_binding_config["piper_voices_dir"] = args.piper_voices_dir
154
- elif args.tts_binding == "lollms":
155
- tts_binding_config["model_name"] = "default_lollms_voice" # Placeholder, server handles actual voice
156
-
157
- lollms_client = None
158
- try:
159
- ASCIIColors.magenta("Initializing LollmsClient...")
160
- lollms_client = LollmsClient(
161
- binding_name=args.llm_binding, model_name=args.llm_model,
162
- host_address=args.llm_host, models_path=args.models_path,
163
- llm_binding_config=llm_binding_config,
164
- tts_binding_name=args.tts_binding, tts_host_address=args.tts_host,
165
- tts_binding_config=tts_binding_config,
166
- verify_ssl_certificate=False
167
- )
168
- ASCIIColors.green("LollmsClient initialized.")
169
- except Exception as e:
170
- ASCIIColors.error(f"Failed to initialize LollmsClient: {e}"); trace_exception(e)
171
- return
172
-
173
- generated_text = ""
174
- text_prompt = "Craft a very short, cheerful message about the joy of discovery."
175
- ASCIIColors.cyan(f"\n--- Generating Text (Prompt: '{text_prompt[:50]}...') ---")
176
- if not lollms_client.binding:
177
- ASCIIColors.error("LLM binding not available."); return
178
- try:
179
- print(f"{ASCIIColors.YELLOW}AI is thinking: {ASCIIColors.RESET}", end="")
180
- generated_text = lollms_client.generate_text(
181
- prompt=text_prompt, n_predict=100, stream=True,
182
- streaming_callback=text_stream_callback, temperature=0.7
183
- )
184
- print("\n"); ASCIIColors.green("Text generation complete.")
185
- ASCIIColors.magenta("Generated Text:\n"); ASCIIColors.yellow(generated_text)
186
- except Exception as e:
187
- ASCIIColors.error(f"Text generation failed: {e}"); trace_exception(e); return
188
- if not generated_text:
189
- ASCIIColors.warning("LLM did not generate any text."); return
190
-
191
- speech_file_path = None
192
- ASCIIColors.cyan(f"\n--- Synthesizing Speech (using {args.tts_binding}) ---")
193
- if not lollms_client.tts:
194
- ASCIIColors.error("TTS binding not available."); return
195
- try:
196
- tts_call_kwargs = {}
197
- if args.tts_binding == "bark":
198
- # For Bark, 'voice' in generate_audio is the voice_preset.
199
- # If not using the default from init, pass it here.
200
- # tts_call_kwargs['voice'] = args.bark_voice_preset
201
- pass # Uses default_voice from init if args.bark_voice_preset not specified to override
202
- elif args.tts_binding == "xtts":
203
- tts_call_kwargs['language'] = args.xtts_language
204
- # 'voice' for XTTS is the speaker_wav path. If not using default from init, pass here.
205
- # tts_call_kwargs['voice'] = args.xtts_speaker_wav
206
- elif args.tts_binding == "piper":
207
- # 'voice' for Piper is the .onnx filename.
208
- tts_call_kwargs['voice'] = args.piper_voice_file
209
- # Example Piper specific param:
210
- # tts_call_kwargs['length_scale'] = 1.0
211
-
212
- audio_bytes = lollms_client.tts.generate_audio(text=generated_text, **tts_call_kwargs)
213
-
214
- if audio_bytes:
215
- filename_stem = f"speech_output_{args.llm_binding}_{args.tts_binding}"
216
- speech_file_path = SPEECH_OUTPUT_DIR / f"{filename_stem.replace('/', '_')}.wav"
217
- with open(speech_file_path, "wb") as f: f.write(audio_bytes)
218
- ASCIIColors.green(f"Speech synthesized and saved to: {speech_file_path}")
219
- elif args.tts_binding == "lollms":
220
- ASCIIColors.warning("LOLLMS TTS binding returned empty bytes. Server might have saved file if 'fn' was used.")
221
- speech_file_path = None
222
- else:
223
- ASCIIColors.warning("Speech synthesis returned empty bytes."); speech_file_path = None
224
- except Exception as e:
225
- ASCIIColors.error(f"Speech synthesis failed: {e}"); trace_exception(e); return
226
-
227
- if speech_file_path and PYGAME_AVAILABLE:
228
- ASCIIColors.magenta("\n--- Playing Synthesized Speech ---")
229
- try:
230
- pygame.mixer.init()
231
- speech_sound = pygame.mixer.Sound(str(speech_file_path))
232
- ASCIIColors.cyan("Playing audio... Press Ctrl+C in console to stop playback early.")
233
- speech_sound.play()
234
- while pygame.mixer.get_busy():
235
- pygame.time.Clock().tick(10)
236
- for event in pygame.event.get():
237
- if event.type == pygame.QUIT: pygame.mixer.stop(); break
238
- ASCIIColors.green("Playback finished.")
239
- except pygame.error as e: ASCIIColors.warning(f"Could not play audio with pygame: {e}")
240
- except KeyboardInterrupt: pygame.mixer.stop(); ASCIIColors.yellow("\nPlayback interrupted.")
241
- finally: pygame.quit()
242
- elif not PYGAME_AVAILABLE:
243
- ASCIIColors.warning("Pygame is not available for playback.")
244
- if speech_file_path: ASCIIColors.info(f"Generated speech: {speech_file_path.resolve()}")
245
- elif not speech_file_path:
246
- ASCIIColors.warning("No speech file generated/path unknown. Skipping playback.")
247
-
248
- ASCIIColors.red("\n--- Demo Finished ---")
249
-
250
- if __name__ == "__main__":
251
- main()