lollms-client 0.33.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (73) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
  3. lollms_client/llm_bindings/claude/__init__.py +4 -7
  4. lollms_client/llm_bindings/gemini/__init__.py +3 -7
  5. lollms_client/llm_bindings/grok/__init__.py +3 -7
  6. lollms_client/llm_bindings/groq/__init__.py +4 -6
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
  8. lollms_client/llm_bindings/litellm/__init__.py +15 -6
  9. lollms_client/llm_bindings/llamacpp/__init__.py +27 -9
  10. lollms_client/llm_bindings/lollms/__init__.py +24 -14
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
  12. lollms_client/llm_bindings/mistral/__init__.py +3 -5
  13. lollms_client/llm_bindings/ollama/__init__.py +6 -11
  14. lollms_client/llm_bindings/open_router/__init__.py +4 -6
  15. lollms_client/llm_bindings/openai/__init__.py +7 -14
  16. lollms_client/llm_bindings/openllm/__init__.py +12 -12
  17. lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
  18. lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
  19. lollms_client/llm_bindings/transformers/__init__.py +14 -6
  20. lollms_client/llm_bindings/vllm/__init__.py +16 -12
  21. lollms_client/lollms_core.py +296 -487
  22. lollms_client/lollms_discussion.py +431 -78
  23. lollms_client/lollms_llm_binding.py +191 -380
  24. lollms_client/lollms_mcp_binding.py +33 -2
  25. lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
  26. lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
  27. lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
  28. lollms_client/stt_bindings/lollms/__init__.py +6 -8
  29. lollms_client/stt_bindings/whisper/__init__.py +2 -4
  30. lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
  31. lollms_client/tti_bindings/dalle/__init__.py +29 -28
  32. lollms_client/tti_bindings/diffusers/__init__.py +25 -21
  33. lollms_client/tti_bindings/gemini/__init__.py +215 -0
  34. lollms_client/tti_bindings/lollms/__init__.py +8 -9
  35. lollms_client-1.0.0.dist-info/METADATA +1214 -0
  36. lollms_client-1.0.0.dist-info/RECORD +69 -0
  37. {lollms_client-0.33.0.dist-info → lollms_client-1.0.0.dist-info}/top_level.txt +0 -2
  38. examples/article_summary/article_summary.py +0 -58
  39. examples/console_discussion/console_app.py +0 -266
  40. examples/console_discussion.py +0 -448
  41. examples/deep_analyze/deep_analyse.py +0 -30
  42. examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
  43. examples/function_calling_with_local_custom_mcp.py +0 -250
  44. examples/generate_a_benchmark_for_safe_store.py +0 -89
  45. examples/generate_and_speak/generate_and_speak.py +0 -251
  46. examples/generate_game_sfx/generate_game_fx.py +0 -240
  47. examples/generate_text_with_multihop_rag_example.py +0 -210
  48. examples/gradio_chat_app.py +0 -228
  49. examples/gradio_lollms_chat.py +0 -259
  50. examples/internet_search_with_rag.py +0 -226
  51. examples/lollms_chat/calculator.py +0 -59
  52. examples/lollms_chat/derivative.py +0 -48
  53. examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
  54. examples/lollms_discussions_test.py +0 -155
  55. examples/mcp_examples/external_mcp.py +0 -267
  56. examples/mcp_examples/local_mcp.py +0 -171
  57. examples/mcp_examples/openai_mcp.py +0 -203
  58. examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
  59. examples/mcp_examples/run_standard_mcp_example.py +0 -204
  60. examples/simple_text_gen_test.py +0 -173
  61. examples/simple_text_gen_with_image_test.py +0 -178
  62. examples/test_local_models/local_chat.py +0 -9
  63. examples/text_2_audio.py +0 -77
  64. examples/text_2_image.py +0 -144
  65. examples/text_2_image_diffusers.py +0 -274
  66. examples/text_and_image_2_audio.py +0 -59
  67. examples/text_gen.py +0 -30
  68. examples/text_gen_system_prompt.py +0 -29
  69. lollms_client-0.33.0.dist-info/METADATA +0 -854
  70. lollms_client-0.33.0.dist-info/RECORD +0 -101
  71. test/test_lollms_discussion.py +0 -368
  72. {lollms_client-0.33.0.dist-info → lollms_client-1.0.0.dist-info}/WHEEL +0 -0
  73. {lollms_client-0.33.0.dist-info → lollms_client-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,178 +0,0 @@
1
- from lollms_client import LollmsClient, ELF_COMPLETION_FORMAT
2
- from lollms_client.lollms_types import MSG_TYPE # For callback signature
3
- from ascii_colors import ASCIIColors, trace_exception
4
-
5
- # --- Configuration ---
6
- # Choose your LLM binding and parameters here
7
- # Option 1: Default LOLLMS server binding
8
- # BINDING_NAME = "lollms"
9
- # HOST_ADDRESS = "http://localhost:9600"
10
- # MODEL_NAME = None # Server will use its default or last loaded model
11
-
12
- # Option 2: Ollama binding
13
- BINDING_NAME = "ollama"
14
- HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
15
- MODEL_NAME = "llava:latest" # Or "llama3:latest", "phi3:latest", etc. - ensure it's pulled in Ollama
16
-
17
- # Option 3: llamacpp binding
18
- # BINDING_NAME = "llamacpp"
19
- # MODELS_PATH = r"E:\drumber" # Change to your own models folder
20
- # MODEL_NAME = "llava-v1.6-mistral-7b.Q3_K_XS.gguf" # Change to your vision capable model (make sure you have a mmprj file with the gguf model with the same name but without the quantization name and with mmproj- prefix (mmproj-llava-v1.6-mistral-7b.gguf))
21
- # You can also add a clip_model_path parameter to your lc_params
22
- img = "E:\\drumber\\1711741182996.jpg"
23
- # Option 3: OpenAI binding (requires OPENAI_API_KEY environment variable or service_key)
24
- # BINDING_NAME = "openai"
25
- # HOST_ADDRESS = None # Defaults to OpenAI API
26
- # MODEL_NAME = "gpt-3.5-turbo"
27
- # SERVICE_KEY = "sk-your_openai_api_key_here" # Optional, can use env var
28
-
29
- # --- Callback for streaming ---
30
- def simple_streaming_callback(chunk: str, msg_type: MSG_TYPE, params=None, metadata=None) -> bool:
31
- """
32
- Simple callback function to print streamed text chunks.
33
- """
34
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
35
- print(chunk, end="", flush=True)
36
- elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
37
- ASCIIColors.error(f"\nStreaming Error: {chunk}")
38
- # Return True to continue streaming, False to stop
39
- return True
40
-
41
- def test_text_generation():
42
- ASCIIColors.cyan(f"\n--- Testing Text Generation with '{BINDING_NAME}' binding ---")
43
-
44
- if BINDING_NAME!="llamacpp":
45
- ASCIIColors.cyan(f"Host: {HOST_ADDRESS or 'Default'}, Model: {MODEL_NAME or 'Default'}")
46
- else:
47
- ASCIIColors.cyan(f"Host: {MODELS_PATH or 'Default'}, Model: {MODEL_NAME or 'Default'}")
48
- try:
49
- # Initialize LollmsClient
50
- lc_params = {
51
- "binding_name": BINDING_NAME,
52
- "model_name": MODEL_NAME,
53
- # "service_key": SERVICE_KEY, # Uncomment for OpenAI if needed
54
- }
55
- if BINDING_NAME!="llamacpp":
56
- lc_params["host_address"]= HOST_ADDRESS
57
- # Remove None host_address for bindings that have internal defaults (like OpenAI)
58
- if lc_params["host_address"] is None and BINDING_NAME in ["openai"]:
59
- del lc_params["host_address"]
60
- else:
61
- lc_params["models_path"]= MODELS_PATH
62
-
63
-
64
- lc = LollmsClient(**lc_params)
65
-
66
- # 1. Test basic non-streaming generation
67
- ASCIIColors.magenta("\n1. Basic Non-Streaming Generation:")
68
- prompt_non_stream = "describe this image"
69
- ASCIIColors.yellow(f"Prompt: {prompt_non_stream}")
70
- response_non_stream = lc.generate_text(
71
- prompt=prompt_non_stream,
72
- images=[img],
73
- stream=False,
74
- temperature=0.7,
75
- n_predict=100 # Max tokens for the joke
76
- )
77
-
78
- if isinstance(response_non_stream, str):
79
- ASCIIColors.green("Response:")
80
- print(response_non_stream)
81
- elif isinstance(response_non_stream, dict) and "error" in response_non_stream:
82
- ASCIIColors.error(f"Error in non-streaming generation: {response_non_stream['error']}")
83
- else:
84
- ASCIIColors.warning(f"Unexpected response format: {response_non_stream}")
85
-
86
- # 2. Test streaming generation
87
- ASCIIColors.magenta("\n\n2. Streaming Generation:")
88
- prompt_stream = "Explain the concept of recursion in one sentence."
89
- ASCIIColors.yellow(f"Prompt: {prompt_stream}")
90
- ASCIIColors.green("Response (streaming):")
91
- response_stream = lc.generate_text(
92
- prompt=prompt_stream,
93
- stream=True,
94
- streaming_callback=simple_streaming_callback,
95
- temperature=0.5,
96
- n_predict=150
97
- )
98
- print() # Newline after streaming
99
-
100
- # The 'response_stream' variable will contain the full concatenated text if streaming_callback returns True throughout
101
- # or an error dictionary if generation failed.
102
- if isinstance(response_stream, str):
103
- ASCIIColors.cyan(f"\n(Full streamed text was: {response_stream[:100]}...)") # Show a snippet of full text
104
- elif isinstance(response_stream, dict) and "error" in response_stream:
105
- ASCIIColors.error(f"Error in streaming generation: {response_stream['error']}")
106
- # else: if callback returns False early, response_stream might be partial.
107
-
108
- # 3. Test generation with a specific model (if applicable and different from default)
109
- # This tests the switch_model or model loading mechanism of the binding.
110
- # For 'lollms' binding, this would set the model on the server.
111
- # For 'ollama' or 'openai', it means the next generate_text will use this model.
112
- ASCIIColors.magenta("\n\n3. List Available Models & Generate with Specific Model:")
113
- available_models = lc.listModels()
114
- if isinstance(available_models, list) and available_models:
115
- ASCIIColors.green("Available models:")
116
- for i, model_info in enumerate(available_models[:5]): # Print first 5
117
- model_id = model_info.get('model_name', model_info.get('id', str(model_info)))
118
- print(f" - {model_id}")
119
-
120
- # Try to use the first available model (or a known one if list is too generic)
121
- target_model = None
122
- if BINDING_NAME == "ollama":
123
- # For Ollama, try using a different small model if available, or the same one
124
- if "phi3:latest" in [m.get('name') for m in available_models if isinstance(m, dict)]:
125
- target_model = "phi3:latest"
126
- elif available_models: # Fallback to first model in list if phi3 not present
127
- first_model_entry = available_models[0]
128
- target_model = first_model_entry.get('name', first_model_entry.get('model_name'))
129
-
130
-
131
- elif BINDING_NAME == "lollms":
132
- # For lollms, this would typically be a path or server-recognized name
133
- # This part is harder to make generic without knowing server's models
134
- ASCIIColors.yellow("For 'lollms' binding, ensure the target model is known to the server.")
135
- if available_models and isinstance(available_models[0], str): # e.g. gptq model paths
136
- target_model = available_models[0]
137
-
138
-
139
- if target_model and target_model != lc.binding.model_name: # Only if different and valid
140
- ASCIIColors.info(f"\nSwitching to model (or using for next gen): {target_model}")
141
- # For bindings like ollama/openai, setting model_name on binding directly works.
142
- # For 'lollms' server binding, LollmsClient doesn't have a direct 'switch_model_on_server'
143
- # but setting lc.binding.model_name will make the next generate_text request it.
144
- lc.binding.model_name = target_model # Update the binding's current model_name
145
-
146
- prompt_specific_model = f"What is the main capability of the {target_model.split(':')[0]} language model?"
147
- ASCIIColors.yellow(f"Prompt (for {target_model}): {prompt_specific_model}")
148
- ASCIIColors.green("Response:")
149
- response_specific = lc.generate_text(
150
- prompt=prompt_specific_model,
151
- stream=True, # Keep it streaming for responsiveness
152
- streaming_callback=simple_streaming_callback,
153
- n_predict=200
154
- )
155
- print()
156
- elif target_model == lc.binding.model_name:
157
- ASCIIColors.yellow(f"Target model '{target_model}' is already the current model. Skipping specific model test.")
158
- else:
159
- ASCIIColors.yellow("Could not determine a different target model from the list to test specific model generation.")
160
-
161
- elif isinstance(available_models, dict) and "error" in available_models:
162
- ASCIIColors.error(f"Error listing models: {available_models['error']}")
163
- else:
164
- ASCIIColors.yellow("No models listed by the binding or format not recognized.")
165
-
166
-
167
- except ValueError as ve:
168
- ASCIIColors.error(f"Initialization Error: {ve}")
169
- trace_exception(ve)
170
- except RuntimeError as re:
171
- ASCIIColors.error(f"Runtime Error (binding likely not initialized): {re}")
172
- trace_exception(re)
173
- except Exception as e:
174
- ASCIIColors.error(f"An unexpected error occurred: {e}")
175
- trace_exception(e)
176
-
177
- if __name__ == "__main__":
178
- test_text_generation()
@@ -1,9 +0,0 @@
1
- from lollms_client import LollmsClient
2
-
3
- # Initialize the LollmsClient instance please select a different model to test with
4
- lc = LollmsClient("transformers", model_name= r"microsoft/Phi-4-mini-instruct")
5
- def cb(text, msg_type=0):
6
- print(text,end='', flush=True)
7
- return True
8
- out = lc.generate_text(f"{lc.system_full_header} Act as lollms, a helpful assistant.\n!@>user:Write a poem about love.\n!@>lollms:",streaming_callback=cb)
9
- print(out)
examples/text_2_audio.py DELETED
@@ -1,77 +0,0 @@
1
- from lollms_client import LollmsClient # Removed LollmsTTS import
2
- from lollms_client.lollms_types import MSG_TYPE # Import MSG_TYPE if callback uses it
3
- import random
4
- from ascii_colors import ASCIIColors # Assuming this might be used for better output
5
-
6
- # Initialize the LollmsClient instance, enabling the TTS binding
7
- # We'll use the 'lollms' tts binding by default.
8
- # The host_address in LollmsClient will be used by the lollms tts binding.
9
- lc = LollmsClient(
10
- tts_binding_name="lollms" # Explicitly enable the lollms TTS binding
11
- )
12
-
13
- if not lc.tts:
14
- ASCIIColors.error("TTS binding could not be initialized. Please check your LollmsClient setup and server.")
15
- exit()
16
-
17
- voices = lc.tts.list_voices() # Use the new method via lc.tts
18
-
19
- # Pick a voice randomly
20
- if voices:
21
- random_voice = random.choice(voices)
22
- ASCIIColors.info(f"Selected voice: {random_voice}")
23
- else:
24
- ASCIIColors.warning("No voices found. Using server default.")
25
- random_voice = None # Or a known default like "main_voice"
26
-
27
- # Generate Text
28
- # response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
29
- # print(response)
30
-
31
- # # Generate Completion
32
- # response = lc.generate_completion(prompt="What is the capital of France", stream=False, temperature=0.5)
33
- # print(response)
34
-
35
-
36
- def cb(chunk, msg_type: MSG_TYPE, params=None, metadata=None): # Added params and metadata for full signature
37
- print(chunk,end="",flush=True)
38
- return True # Callback should return True to continue streaming
39
-
40
- response_text = lc.generate_text(prompt="One plus one equals ", stream=False, temperature=0.5, streaming_callback=cb)
41
- print() # For newline after streaming
42
- ASCIIColors.green(f"Generated text: {response_text}")
43
- print()
44
-
45
- if response_text and not isinstance(response_text, dict): # Check if generation was successful
46
- try:
47
- # Assuming generate_audio now might return status or file path rather than direct audio bytes for 'lollms' binding
48
- # based on its current server behavior.
49
- # If generate_audio for 'lollms' binding is expected to save a file and return status:
50
- audio_generation_status = lc.tts.generate_audio(response_text, voice=random_voice, fn="output_example_text_2_audio.wav") # Example filename
51
- ASCIIColors.info(f"Audio generation request status: {audio_generation_status}")
52
- ASCIIColors.yellow(f"Audio should be saved as 'output_example_text_2_audio.wav' by the server in its default output path.")
53
-
54
- except Exception as e:
55
- ASCIIColors.error(f"Error during text to audio conversion: {e}")
56
- else:
57
- ASCIIColors.error(f"Text generation failed or returned an error: {response_text}")
58
-
59
-
60
- # List Mounted Personalities (This is an LLM feature, specific to 'lollms' LLM binding)
61
- if lc.binding and hasattr(lc.binding, 'lollms_listMountedPersonalities'):
62
- personalities_response = lc.listMountedPersonalities()
63
- ASCIIColors.blue("\nMounted Personalities:")
64
- print(personalities_response)
65
- else:
66
- ASCIIColors.yellow("\nlistMountedPersonalities not available for the current LLM binding.")
67
-
68
-
69
- # List Models (This is an LLM feature)
70
- models_response = lc.listModels()
71
- ASCIIColors.blue("\nAvailable LLM Models:")
72
- print(models_response)
73
-
74
- # List available TTS bindings (for demonstration)
75
- if hasattr(lc, 'tts_binding_manager'):
76
- available_tts_bindings = lc.tts_binding_manager.get_available_bindings()
77
- ASCIIColors.cyan(f"\nAvailable TTS bindings in client: {available_tts_bindings}")
examples/text_2_image.py DELETED
@@ -1,144 +0,0 @@
1
- from lollms_client import LollmsClient
2
- from lollms_client.lollms_types import MSG_TYPE # If using callbacks
3
- from ascii_colors import ASCIIColors, trace_exception
4
- from PIL import Image
5
- from pathlib import Path
6
- import io
7
- import os
8
-
9
- # --- Configuration ---
10
- # This client_id should match one known by your LoLLMs WebUI if security is enabled for these endpoints.
11
- # For a default local setup, it might not be strictly checked for /generate_image,
12
- # but IS required for /list_tti_services, /get_active_tti_settings, /set_active_tti_settings.
13
- LOLLMS_CLIENT_ID = "my_lollms_client_id" # Replace with your actual client ID or a test one
14
-
15
- # Initialize LollmsClient, enabling the TTI 'lollms' binding
16
- # The service_key here is used as client_id by the TTI binding for lollms
17
- # lc = LollmsClient(
18
- # tti_binding_name="lollms"
19
- # )
20
-
21
- # make sure you have a OPENAI_API_KEY environment variable
22
- lc = LollmsClient(
23
- tti_binding_name="dalle"
24
- )
25
-
26
- if not lc.tti:
27
- ASCIIColors.error("TTI binding could not be initialized. Please check LollmsClient setup.")
28
- exit()
29
-
30
- def test_list_tti_services():
31
- ASCIIColors.cyan("\n--- Testing List TTI Services ---")
32
- try:
33
- # client_id is taken from lc.service_key by the binding
34
- services = lc.tti.list_services()
35
- if services:
36
- ASCIIColors.green("Available TTI Services:")
37
- for i, service in enumerate(services):
38
- print(f" {i+1}. Name: {service.get('name')}, Caption: {service.get('caption')}")
39
- else:
40
- ASCIIColors.yellow("No TTI services listed or an empty list was returned.")
41
- except Exception as e:
42
- ASCIIColors.error(f"Error listing TTI services: {e}")
43
- trace_exception(e)
44
-
45
- def test_get_tti_settings():
46
- ASCIIColors.cyan("\n--- Testing Get Active TTI Settings ---")
47
- try:
48
- # client_id is taken from lc.service_key by the binding
49
- settings = lc.tti.get_settings()
50
- if settings: # Server returns a list for settings template
51
- ASCIIColors.green("Current Active TTI Settings/Template:")
52
- # Assuming settings is a list of dicts (template format)
53
- for setting_item in settings:
54
- print(f" - Name: {setting_item.get('name')}, Type: {setting_item.get('type')}, Value: {setting_item.get('value')}, Help: {setting_item.get('help')}")
55
- elif isinstance(settings, dict) and not settings: # Empty dict if no TTI active
56
- ASCIIColors.yellow("No active TTI service or settings configured on the server.")
57
- else:
58
- ASCIIColors.yellow("Could not retrieve TTI settings or format unexpected.")
59
- print(f"Received: {settings}")
60
- except Exception as e:
61
- ASCIIColors.error(f"Error getting TTI settings: {e}")
62
- trace_exception(e)
63
-
64
- def test_set_tti_settings():
65
- ASCIIColors.cyan("\n--- Testing Set Active TTI Settings (Illustrative) ---")
66
- ASCIIColors.yellow("Note: This test requires knowing the exact settings structure of your active TTI service.")
67
- ASCIIColors.yellow("Skipping actual setting change to avoid misconfiguration.")
68
- # Example: If you knew your TTI service had a 'quality' setting:
69
- # example_settings_to_set = [
70
- # {"name": "quality", "value": "high", "type": "str", "help": "Image quality"},
71
- # # ... other settings from get_settings()
72
- # ]
73
- # try:
74
- # # client_id is taken from lc.service_key
75
- # success = lc.tti.set_settings(example_settings_to_set)
76
- # if success:
77
- # ASCIIColors.green("Successfully sent settings update request.")
78
- # else:
79
- # ASCIIColors.red("Failed to set TTI settings (server indicated failure or no change).")
80
- # except Exception as e:
81
- # ASCIIColors.error(f"Error setting TTI settings: {e}")
82
-
83
- def test_generate_image():
84
- ASCIIColors.cyan("\n--- Testing Generate Image ---")
85
- prompt = "A futuristic cityscape at sunset, neon lights, flying vehicles"
86
- negative_prompt = "blurry, low quality, ugly, text, watermark"
87
- width = 1024
88
- height = 1024
89
- home_dir = Path.home()
90
- documents_dir = home_dir / "Documents"
91
- output_filename = documents_dir/"generated_lollms_image.jpg"
92
-
93
- ASCIIColors.info(f"Prompt: {prompt}")
94
- ASCIIColors.info(f"Negative Prompt: {negative_prompt}")
95
- ASCIIColors.info(f"Dimensions: {width}x{height}")
96
-
97
- try:
98
- image_bytes = lc.tti.generate_image(
99
- prompt=prompt,
100
- negative_prompt=negative_prompt,
101
- width=width,
102
- height=height
103
- # You can add other kwargs here if your TTI service supports them, e.g., seed=12345
104
- )
105
-
106
- if image_bytes:
107
- ASCIIColors.green(f"Image generated successfully ({len(image_bytes)} bytes).")
108
- try:
109
- image = Image.open(io.BytesIO(image_bytes))
110
- image.save(output_filename)
111
- ASCIIColors.green(f"Image saved as {output_filename}")
112
- # Attempt to show image if possible (platform dependent)
113
- if os.name == 'nt': # Windows
114
- os.startfile(output_filename)
115
- elif os.name == 'posix': # MacOS/Linux
116
- try:
117
- import subprocess
118
- opener = "open" if platform.system() == "Darwin" else "xdg-open"
119
- subprocess.call([opener, output_filename])
120
- except:
121
- ASCIIColors.yellow(f"Could not auto-open image. Please find it at {output_filename}")
122
-
123
- except Exception as e:
124
- ASCIIColors.error(f"Error processing or saving image: {e}")
125
- # Save raw bytes if PIL fails, for debugging
126
- with open("generated_lollms_image_raw.data", "wb") as f_raw:
127
- f_raw.write(image_bytes)
128
- ASCIIColors.yellow("Raw image data saved as generated_lollms_image_raw.data for inspection.")
129
-
130
- else:
131
- ASCIIColors.red("Image generation returned empty bytes.")
132
-
133
- except Exception as e:
134
- ASCIIColors.error(f"Error during image generation: {e}")
135
- trace_exception(e)
136
-
137
- if __name__ == "__main__":
138
- # Test management functions first
139
- test_list_tti_services()
140
- test_get_tti_settings()
141
- test_set_tti_settings() # Currently illustrative
142
-
143
- # Then test image generation
144
- test_generate_image()