lollms-client 1.6.6__py3-none-any.whl → 1.6.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -1208,14 +1208,27 @@ class LollmsDiscussion:
1208
1208
  prompt_for_agent = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
1209
1209
  if debug:
1210
1210
  ASCIIColors.cyan("\n" + "="*50 + "\n--- DEBUG: AGENTIC TURN TRIGGERED ---\n" + f"--- PROMPT FOR AGENT (from discussion history) ---\n{prompt_for_agent}\n" + "="*50 + "\n")
1211
-
1211
+
1212
+
1213
+ # Combine system prompt and data zones
1214
+ system_prompt_part = (self._system_prompt or "").strip()
1215
+ data_zone_part = self.get_full_data_zone() # This now returns a clean, multi-part block or an empty string
1216
+ full_system_prompt = ""
1217
+
1218
+ # Combine them intelligently
1219
+ if system_prompt_part and data_zone_part:
1220
+ full_system_prompt = f"{system_prompt_part}\n\n{data_zone_part}"
1221
+ elif system_prompt_part:
1222
+ full_system_prompt = system_prompt_part
1223
+ else:
1224
+ full_system_prompt = data_zone_part
1212
1225
  agent_result = self.lollmsClient.generate_with_mcp_rag(
1213
1226
  prompt=prompt_for_agent,
1214
1227
  use_mcps=effective_use_mcps,
1215
1228
  use_data_store=use_data_store,
1216
1229
  max_reasoning_steps=max_reasoning_steps,
1217
1230
  images=images,
1218
- system_prompt = self._system_prompt,
1231
+ system_prompt = full_system_prompt,
1219
1232
  debug=debug,
1220
1233
  **kwargs
1221
1234
  )
@@ -4,7 +4,7 @@ import importlib
4
4
  from pathlib import Path
5
5
  from typing import Optional, List, Dict, Any, Union
6
6
  from ascii_colors import trace_exception
7
-
7
+ import yaml
8
8
  class LollmsSTTBinding(ABC):
9
9
  """Abstract base class for all LOLLMS Speech-to-Text bindings."""
10
10
 
@@ -122,4 +122,58 @@ class LollmsSTTBindingManager:
122
122
  list[str]: List of binding names.
123
123
  """
124
124
  return [binding_dir.name for binding_dir in self.stt_bindings_dir.iterdir()
125
- if binding_dir.is_dir() and (binding_dir / "__init__.py").exists()]
125
+ if binding_dir.is_dir() and (binding_dir / "__init__.py").exists()]
126
+
127
+
128
+ @staticmethod
129
+ def _get_fallback_description(binding_name: str) -> Dict:
130
+ return {
131
+ "binding_name": binding_name,
132
+ "title": binding_name.replace("_", " ").title(),
133
+ "author": "Unknown",
134
+ "version": "N/A",
135
+ "description": f"A binding for {binding_name}. No description.yaml file was found.",
136
+ "input_parameters": [
137
+ {
138
+ "name": "model_name",
139
+ "type": "str",
140
+ "description": "The model name or ID to be used.",
141
+ "mandatory": False,
142
+ "default": ""
143
+ }
144
+ ],
145
+ "generate_audio_parameters": []
146
+ }
147
+ @staticmethod
148
+ def get_bindings_list(stt_bindings_dir: Union[str, Path]) -> List[Dict]:
149
+ bindings_dir = Path(stt_bindings_dir)
150
+ if not bindings_dir.is_dir():
151
+ return []
152
+
153
+ bindings_list = []
154
+ for binding_folder in bindings_dir.iterdir():
155
+ if binding_folder.is_dir() and (binding_folder / "__init__.py").exists():
156
+ binding_name = binding_folder.name
157
+ description_file = binding_folder / "description.yaml"
158
+
159
+ binding_info = {}
160
+ if description_file.exists():
161
+ try:
162
+ with open(description_file, 'r', encoding='utf-8') as f:
163
+ binding_info = yaml.safe_load(f)
164
+ binding_info['binding_name'] = binding_name
165
+ except Exception as e:
166
+ print(f"Error loading description.yaml for {binding_name}: {e}")
167
+ binding_info = LollmsSTTBindingManager._get_fallback_description(binding_name)
168
+ else:
169
+ binding_info = LollmsSTTBindingManager._get_fallback_description(binding_name)
170
+
171
+ bindings_list.append(binding_info)
172
+
173
+ return sorted(bindings_list, key=lambda b: b.get('title', b['binding_name']))
174
+
175
+
176
+ def get_available_bindings(stt_bindings_dir: Union[str, Path] = None) -> List[Dict]:
177
+ if stt_bindings_dir is None:
178
+ stt_bindings_dir = Path(__file__).resolve().parent / "stt_bindings"
179
+ return LollmsSTTBindingManager.get_bindings_list(stt_bindings_dir)
@@ -82,6 +82,7 @@ class WhisperSTTBinding(LollmsSTTBinding):
82
82
  If None, `torch` will attempt to auto-detect. Defaults to None.
83
83
  """
84
84
  super().__init__(binding_name="whisper") # Not applicable
85
+ self.default_model_name = kwargs.get("model_name", "base")
85
86
 
86
87
  if not _whisper_installed:
87
88
  raise ImportError(f"Whisper STT binding dependencies not met. Please ensure 'openai-whisper' and 'torch' are installed. Error: {_whisper_installation_error}")
@@ -104,7 +105,7 @@ class WhisperSTTBinding(LollmsSTTBinding):
104
105
 
105
106
  def _load_whisper_model(self, model_name_to_load: str):
106
107
  """Loads or reloads the Whisper model."""
107
- if model_name_to_load not in self.WHISPER_MODEL_SIZES:
108
+ if model_name_to_load not in whisper.available_models():
108
109
  ASCIIColors.warning(f"'{model_name_to_load}' is not a standard Whisper model size. Attempting to load anyway. Known sizes: {self.WHISPER_MODEL_SIZES}")
109
110
 
110
111
  if self.model is not None and self.loaded_model_name == model_name_to_load:
@@ -202,7 +203,7 @@ class WhisperSTTBinding(LollmsSTTBinding):
202
203
  Returns:
203
204
  List[str]: A list of available Whisper model size identifiers.
204
205
  """
205
- return self.WHISPER_MODEL_SIZES.copy() # Return a copy
206
+ return whisper.available_models() # Return a copy
206
207
 
207
208
  def __del__(self):
208
209
  """Clean up: Unload the model to free resources."""
@@ -31,6 +31,8 @@ class WhisperCppSTTBinding(LollmsSTTBinding):
31
31
  n_threads = kwargs.get("n_threads", 4)
32
32
  extra_whisper_args = kwargs.get("extra_whisper_args", []) # e.g. ["--no-timestamps"]
33
33
 
34
+ self.default_model_name = "base"
35
+
34
36
  # --- Validate FFMPEG ---
35
37
  self.ffmpeg_exe = None
36
38
  if ffmpeg_path:
@@ -376,4 +378,8 @@ if __name__ == '__main__':
376
378
  TEST_MODELS_SEARCH_DIR.rmdir()
377
379
  except OSError: pass # Ignore if not empty or other issues
378
380
 
379
- ASCIIColors.yellow("\n--- WhisperCppSTTBinding Test Finished ---")
381
+ ASCIIColors.yellow("\n--- WhisperCppSTTBinding Test Finished ---")
382
+
383
+ def list_models(self) -> List[Dict[str, Any]]:
384
+ return ["base" , "small", "medium", "large"]
385
+
@@ -45,7 +45,7 @@ class DiffusersBinding(LollmsTTIBinding):
45
45
 
46
46
  self.config = kwargs
47
47
  self.host = kwargs.get("host", "localhost")
48
- self.port = kwargs.get("port", 9630)
48
+ self.port = kwargs.get("port", 9632)
49
49
  self.auto_start_server = kwargs.get("auto_start_server", True)
50
50
  self.server_process = None
51
51
  self.base_url = f"http://{self.host}:{self.port}"
@@ -61,7 +61,7 @@ class DiffusersBinding(LollmsTTIBinding):
61
61
  def is_server_running(self) -> bool:
62
62
  """Checks if the server is already running and responsive."""
63
63
  try:
64
- response = requests.get(f"{self.base_url}/status", timeout=2)
64
+ response = requests.get(f"{self.base_url}/status", timeout=4)
65
65
  if response.status_code == 200 and response.json().get("status") == "running":
66
66
  return True
67
67
  except requests.exceptions.RequestException:
@@ -90,7 +90,7 @@ class DiffusersBinding(LollmsTTIBinding):
90
90
  try:
91
91
  # Try to acquire the lock with a timeout. If another process is starting
92
92
  # the server, this will wait until it's finished.
93
- with lock.acquire(timeout=60):
93
+ with lock.acquire(timeout=3):
94
94
  # After acquiring the lock, we MUST re-check if the server is running.
95
95
  # Another process might have started it and released the lock while we were waiting.
96
96
  if not self.is_server_running():
@@ -105,7 +105,7 @@ class DiffusersBinding(LollmsTTIBinding):
105
105
  # This happens if the process holding the lock takes more than 60 seconds to start the server.
106
106
  # We don't try to start another one. We just wait for the existing one to be ready.
107
107
  ASCIIColors.yellow("Could not acquire lock, another process is taking a long time to start the server. Waiting...")
108
- self._wait_for_server(timeout=300) # Give it a longer timeout here just in case.
108
+ self._wait_for_server(timeout=60) # Give it a longer timeout here just in case.
109
109
 
110
110
  # A final verification to ensure we are connected.
111
111
  if not self.is_server_running():
@@ -214,7 +214,7 @@ class DiffusersBinding(LollmsTTIBinding):
214
214
  self.server_process = subprocess.Popen(command, creationflags=creationflags)
215
215
  ASCIIColors.info("Diffusers server process launched in the background.")
216
216
 
217
- def _wait_for_server(self, timeout=300):
217
+ def _wait_for_server(self, timeout=30):
218
218
  """Waits for the server to become responsive."""
219
219
  ASCIIColors.info("Waiting for Diffusers server to become available...")
220
220
  start_time = time.time()
@@ -374,4 +374,4 @@ class DiffusersBinding(LollmsTTIBinding):
374
374
  def __del__(self):
375
375
  # The client destructor does not stop the server,
376
376
  # as it is a shared resource for all worker processes.
377
- pass
377
+ pass
@@ -60,66 +60,14 @@ MODELS_PATH = Path("./models")
60
60
 
61
61
  # --- START: Core Logic (Complete and Unabridged) ---
62
62
  CIVITAI_MODELS = {
63
- "realistic-vision-v6": {
64
- "display_name": "Realistic Vision V6.0", "url": "https://civitai.com/api/download/models/501240?type=Model&format=SafeTensor&size=pruned&fp=fp16",
65
- "filename": "realisticVisionV60_v60B1.safensors", "description": "Photorealistic SD1.5 checkpoint.", "owned_by": "civitai"
66
- },
67
- "absolute-reality": {
68
- "display_name": "Absolute Reality", "url": "https://civitai.com/api/download/models/132760?type=Model&format=SafeTensor&size=pruned&fp=fp16",
69
- "filename": "absolutereality_v181.safetensors", "description": "General realistic SD1.5.", "owned_by": "civitai"
70
- },
71
- "dreamshaper-8": {
63
+ "DreamShaper-8": {
72
64
  "display_name": "DreamShaper 8", "url": "https://civitai.com/api/download/models/128713",
73
65
  "filename": "dreamshaper_8.safetensors", "description": "Versatile SD1.5 style model.", "owned_by": "civitai"
74
66
  },
75
- "juggernaut-xl": {
67
+ "Juggernaut-xl": {
76
68
  "display_name": "Juggernaut XL", "url": "https://civitai.com/api/download/models/133005",
77
69
  "filename": "juggernautXL_version6Rundiffusion.safetensors", "description": "Artistic SDXL.", "owned_by": "civitai"
78
70
  },
79
- "lyriel-v1.6": {
80
- "display_name": "Lyriel v1.6", "url": "https://civitai.com/api/download/models/72396?type=Model&format=SafeTensor&size=full&fp=fp16",
81
- "filename": "lyriel_v16.safetensors", "description": "Fantasy/stylized SD1.5.", "owned_by": "civitai"
82
- },
83
- "ui_icons": {
84
- "display_name": "UI Icons", "url": "https://civitai.com/api/download/models/367044?type=Model&format=SafeTensor&size=full&fp=fp16",
85
- "filename": "uiIcons_v10.safetensors", "description": "A model for generating UI icons.", "owned_by": "civitai"
86
- },
87
- "meinamix": {
88
- "display_name": "MeinaMix", "url": "https://civitai.com/api/download/models/948574?type=Model&format=SafeTensor&size=pruned&fp=fp16",
89
- "filename": "meinamix_meinaV11.safetensors", "description": "Anime/illustration SD1.5.", "owned_by": "civitai"
90
- },
91
- "rpg-v5": {
92
- "display_name": "RPG v5", "url": "https://civitai.com/api/download/models/124626?type=Model&format=SafeTensor&size=pruned&fp=fp16",
93
- "filename": "rpg_v5.safetensors", "description": "RPG assets SD1.5.", "owned_by": "civitai"
94
- },
95
- "pixel-art-xl": {
96
- "display_name": "Pixel Art XL", "url": "https://civitai.com/api/download/models/135931?type=Model&format=SafeTensor",
97
- "filename": "pixelartxl_v11.safetensors", "description": "Pixel art SDXL.", "owned_by": "civitai"
98
- },
99
- "lowpoly-world": {
100
- "display_name": "Lowpoly World", "url": "https://civitai.com/api/download/models/146502?type=Model&format=SafeTensor",
101
- "filename": "LowpolySDXL.safetensors", "description": "Lowpoly style SD1.5.", "owned_by": "civitai"
102
- },
103
- "toonyou": {
104
- "display_name": "ToonYou", "url": "https://civitai.com/api/download/models/125771?type=Model&format=SafeTensor&size=pruned&fp=fp16",
105
- "filename": "toonyou_beta6.safetensors", "description": "Cartoon/Disney SD1.5.", "owned_by": "civitai"
106
- },
107
- "papercut": {
108
- "display_name": "Papercut", "url": "https://civitai.com/api/download/models/133503?type=Model&format=SafeTensor",
109
- "filename": "papercut.safetensors", "description": "Paper cutout SD1.5.", "owned_by": "civitai"
110
- },
111
- "fantassifiedIcons": {
112
- "display_name": "Fantassified Icons", "url": "https://civitai.com/api/download/models/67584?type=Model&format=SafeTensor&size=pruned&fp=fp16",
113
- "filename": "fantassifiedIcons_fantassifiedIconsV20.safetensors", "description": "Flat, modern Icons.", "owned_by": "civitai"
114
- },
115
- "game_icon_institute": {
116
- "display_name": "Game icon institute", "url": "https://civitai.com/api/download/models/158776?type=Model&format=SafeTensor&size=full&fp=fp16",
117
- "filename": "gameIconInstituteV10_v10.safetensors", "description": "Flat, modern game Icons.", "owned_by": "civitai"
118
- },
119
- "M4RV3LS_DUNGEONS": {
120
- "display_name": "M4RV3LS & DUNGEONS", "url": "https://civitai.com/api/download/models/139417?type=Model&format=SafeTensor&size=pruned&fp=fp16",
121
- "filename": "M4RV3LSDUNGEONSNEWV40COMICS_mD40.safetensors", "description": "comics.", "owned_by": "civitai"
122
- },
123
71
  }
124
72
 
125
73
  HF_PUBLIC_MODELS = {
@@ -145,8 +93,11 @@ HF_PUBLIC_MODELS = {
145
93
  ],
146
94
  "Image Editing Tools": [
147
95
  {"model_name": "stabilityai/stable-diffusion-xl-refiner-1.0", "display_name": "SDXL Refiner 1.0", "desc": "A dedicated refiner model to improve details in SDXL generations."},
148
- {"model_name": "Qwen/Qwen-Image-Edit", "display_name": "Qwen Image Edit", "desc": "An instruction-based model for various image editing tasks."},
149
- {"model_name": "Qwen/Qwen-Image-Edit-2509", "display_name": "Qwen Image Edit Plus", "desc": "Advanced multi-image editing, fusion, and pose transfer."},
96
+ {"model_name": "timbrooks/instruct-pix2pix", "display_name": "Instruct-Pix2Pix", "desc": "The original instruction-based image editing model (SD 1.5)."},
97
+ {"model_name": "kandinsky-community/kandinsky-2-2-instruct-pix2pix", "display_name": "Kandinsky 2.2 Instruct", "desc": "An instruction-based model with strong prompt adherence, based on Kandinsky 2.2."},
98
+ {"model_name": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", "display_name": "SDXL Inpainting", "desc": "A dedicated inpainting model based on SDXL 1.0 for filling in masked areas."},
99
+ {"model_name": "Qwen/Qwen-Image-Edit", "display_name": "Qwen Image Edit", "desc": "An instruction-based model for various image editing tasks. (Review License)."},
100
+ {"model_name": "Qwen/Qwen-Image-Edit-2509", "display_name": "Qwen Image Edit Plus", "desc": "Advanced multi-image editing and fusion. (Review License)."},
150
101
  ],
151
102
  "Legacy & Base Models": [
152
103
  {"model_name": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion 1.5", "desc": "The classic and versatile SD1.5 base model."},
@@ -180,6 +131,7 @@ SCHEDULER_MAPPING = {
180
131
  "dpm2_karras": "KDPM2DiscreteScheduler", "dpm2_a": "KDPM2AncestralDiscreteScheduler", "dpm2_a_karras": "KDPM2AncestralDiscreteScheduler",
181
132
  "euler": "EulerDiscreteScheduler", "euler_a": "EulerAncestralDiscreteScheduler", "heun": "HeunDiscreteScheduler", "lms": "LMSDiscreteScheduler"
182
133
  }
134
+
183
135
  SCHEDULER_USES_KARRAS_SIGMAS = [
184
136
  "dpm_multistep_karras","dpm++_2m_karras","dpm++_2s_ancestral_karras", "dpm++_sde_karras","heun_karras","lms_karras",
185
137
  "dpm++_2m_sde_karras","dpm2_karras","dpm2_a_karras"
@@ -601,7 +553,7 @@ class ServerState:
601
553
  return {
602
554
  "model_name": "", "device": "auto", "torch_dtype_str": "auto", "use_safetensors": True,
603
555
  "scheduler_name": "default", "safety_checker_on": True, "num_inference_steps": 25,
604
- "guidance_scale": 7.0, "width": 512, "height": 512, "seed": -1,
556
+ "guidance_scale": 7.0, "width": 1024, "height": 1024, "seed": -1,
605
557
  "enable_cpu_offload": False, "enable_sequential_cpu_offload": False, "enable_xformers": False,
606
558
  "hf_variant": None, "hf_token": None, "hf_cache_path": None, "local_files_only": False,
607
559
  "unload_inactive_model_after": 0
@@ -728,36 +680,33 @@ async def generate_image(request: T2IRequest):
728
680
  manager = None
729
681
  temp_config = None
730
682
  try:
731
- params = request.params
732
-
733
683
  # Determine which model manager to use for this specific request
734
- if "model_name" in params and params["model_name"]:
684
+ if "model_name" in request.params and request.params["model_name"]:
735
685
  temp_config = state.config.copy()
736
- temp_config["model_name"] = params.pop("model_name") # Remove from params to avoid being passed to pipeline
686
+ temp_config["model_name"] = request.params.pop("model_name") # Remove from params to avoid being passed to pipeline
737
687
  manager = state.registry.get_manager(temp_config, state.models_path)
738
688
  ASCIIColors.info(f"Using per-request model: {temp_config['model_name']}")
739
689
  else:
740
690
  manager = state.get_active_manager()
741
691
  ASCIIColors.info(f"Using session-configured model: {manager.config.get('model_name')}")
742
692
 
743
- seed = int(params.get("seed", manager.config.get("seed", -1)))
744
- generator = None
693
+ # Start with the manager's config (base settings)
694
+ pipeline_args = manager.config.copy()
695
+ # Override with per-request parameters
696
+ pipeline_args.update(request.params)
697
+
698
+ # Add prompts and ensure types for specific args
699
+ pipeline_args["prompt"] = request.prompt
700
+ pipeline_args["negative_prompt"] = request.negative_prompt
701
+ pipeline_args["width"] = int(pipeline_args.get("width", 1024))
702
+ pipeline_args["height"] = int(pipeline_args.get("height", 1024))
703
+ pipeline_args["num_inference_steps"] = int(pipeline_args.get("num_inference_steps", 25))
704
+ pipeline_args["guidance_scale"] = float(pipeline_args.get("guidance_scale", 7.0))
705
+
706
+ seed = int(pipeline_args.get("seed", -1))
707
+ pipeline_args["generator"] = None
745
708
  if seed != -1:
746
- generator = torch.Generator(device=manager.config["device"]).manual_seed(seed)
747
-
748
- width = int(params.get("width", manager.config.get("width", 512)))
749
- height = int(params.get("height", manager.config.get("height", 512)))
750
-
751
- pipeline_args = {
752
- "prompt": request.prompt,
753
- "negative_prompt": request.negative_prompt,
754
- "width": width,
755
- "height": height,
756
- "num_inference_steps": int(params.get("num_inference_steps", manager.config.get("num_inference_steps", 25))),
757
- "guidance_scale": float(params.get("guidance_scale", manager.config.get("guidance_scale", 7.0))),
758
- "generator": generator
759
- }
760
- pipeline_args.update(params)
709
+ pipeline_args["generator"] = torch.Generator(device=manager.config["device"]).manual_seed(seed)
761
710
 
762
711
  model_name = manager.config.get("model_name", "")
763
712
  task = "text2image"
@@ -765,12 +714,24 @@ async def generate_image(request: T2IRequest):
765
714
  if "Qwen-Image-Edit" in model_name:
766
715
  rng_seed = seed if seed != -1 else None
767
716
  rng = np.random.default_rng(seed=rng_seed)
768
- random_pixels = rng.integers(0, 256, size=(height, width, 3), dtype=np.uint8)
717
+ random_pixels = rng.integers(0, 256, size=(pipeline_args["height"], pipeline_args["width"], 3), dtype=np.uint8)
769
718
  placeholder_image = Image.fromarray(random_pixels, 'RGB')
770
719
  pipeline_args["image"] = placeholder_image
771
- pipeline_args["strength"] = float(params.get("strength", 1.0))
720
+ pipeline_args["strength"] = float(pipeline_args.get("strength", 1.0))
772
721
  task = "image2image"
773
722
 
723
+ log_args = {k: v for k, v in pipeline_args.items() if k not in ['generator', 'image']}
724
+ if pipeline_args.get("generator"): log_args['generator'] = f"<torch.Generator(seed={seed})>"
725
+ if pipeline_args.get("image"): log_args['image'] = "<PIL Image object>"
726
+
727
+ ASCIIColors.cyan("--- Generating Image with Settings ---")
728
+ try:
729
+ print(json.dumps(log_args, indent=2, default=str))
730
+ except Exception as e:
731
+ ASCIIColors.warning(f"Could not print all settings: {e}")
732
+ print(log_args)
733
+ ASCIIColors.cyan("------------------------------------")
734
+
774
735
  future = Future()
775
736
  manager.queue.put((future, task, pipeline_args))
776
737
  result_bytes = future.result()
@@ -789,17 +750,20 @@ async def edit_image(request: EditRequestJSON):
789
750
  manager = None
790
751
  temp_config = None
791
752
  try:
792
- params = request.params
793
-
794
- if "model_name" in params and params["model_name"]:
753
+ if "model_name" in request.params and request.params["model_name"]:
795
754
  temp_config = state.config.copy()
796
- temp_config["model_name"] = params.pop("model_name")
755
+ temp_config["model_name"] = request.params.pop("model_name")
797
756
  manager = state.registry.get_manager(temp_config, state.models_path)
798
757
  ASCIIColors.info(f"Using per-request model: {temp_config['model_name']}")
799
758
  else:
800
759
  manager = state.get_active_manager()
801
760
  ASCIIColors.info(f"Using session-configured model: {manager.config.get('model_name')}")
802
761
 
762
+ # Start with manager's config, then override with request params
763
+ pipeline_args = manager.config.copy()
764
+ pipeline_args.update(request.params)
765
+
766
+ pipeline_args["prompt"] = request.prompt
803
767
  model_name = manager.config.get("model_name", "")
804
768
 
805
769
  pil_images = []
@@ -810,27 +774,38 @@ async def edit_image(request: EditRequestJSON):
810
774
 
811
775
  if not pil_images: raise HTTPException(status_code=400, detail="No valid images provided.")
812
776
 
813
- pipeline_args = {"prompt": request.prompt}
814
- seed = int(params.get("seed", -1))
777
+ seed = int(pipeline_args.get("seed", -1))
778
+ pipeline_args["generator"] = None
815
779
  if seed != -1: pipeline_args["generator"] = torch.Generator(device=manager.config["device"]).manual_seed(seed)
816
780
 
817
- if "mask_image" in params and params["mask_image"]:
818
- b64_mask = params["mask_image"]
781
+ if "mask_image" in pipeline_args and pipeline_args["mask_image"]:
782
+ b64_mask = pipeline_args["mask_image"]
819
783
  b64_data = b64_mask.split(";base64,")[1] if ";base64," in b64_mask else b64_mask
820
784
  mask_bytes = base64.b64decode(b64_data)
821
785
  pipeline_args["mask_image"] = Image.open(BytesIO(mask_bytes)).convert("L")
822
786
 
823
- task = "inpainting" if "mask_image" in pipeline_args else "image2image"
787
+ task = "inpainting" if "mask_image" in pipeline_args and pipeline_args["mask_image"] else "image2image"
824
788
 
825
789
  if "Qwen-Image-Edit-2509" in model_name:
826
790
  task = "image2image"
827
791
  pipeline_args.update({"true_cfg_scale": 4.0, "guidance_scale": 1.0, "num_inference_steps": 40, "negative_prompt": " "})
828
- edit_mode = params.get("edit_mode", "fusion")
792
+ edit_mode = pipeline_args.get("edit_mode", "fusion")
829
793
  if edit_mode == "fusion": pipeline_args["image"] = pil_images
830
794
  else:
831
795
  pipeline_args.update({"image": pil_images[0], "strength": 0.8, "guidance_scale": 7.5, "num_inference_steps": 25})
832
796
 
833
- pipeline_args.update(params)
797
+ log_args = {k: v for k, v in pipeline_args.items() if k not in ['generator', 'image', 'mask_image']}
798
+ if pipeline_args.get("generator"): log_args['generator'] = f"<torch.Generator(seed={seed})>"
799
+ if 'image' in pipeline_args: log_args['image'] = f"[<{len(pil_images)} PIL Image(s)>]"
800
+ if 'mask_image' in pipeline_args and pipeline_args['mask_image']: log_args['mask_image'] = "<PIL Mask Image>"
801
+
802
+ ASCIIColors.cyan("--- Editing Image with Settings ---")
803
+ try:
804
+ print(json.dumps(log_args, indent=2, default=str))
805
+ except Exception as e:
806
+ ASCIIColors.warning(f"Could not print all settings: {e}")
807
+ print(log_args)
808
+ ASCIIColors.cyan("---------------------------------")
834
809
 
835
810
  future = Future(); manager.queue.put((future, task, pipeline_args))
836
811
  return Response(content=future.result(), media_type="image/png")
@@ -963,4 +938,4 @@ if __name__ == "__main__":
963
938
  else:
964
939
  ASCIIColors.info(f"Detected device: {state.config['device']}")
965
940
 
966
- uvicorn.run(app, host=args.host, port=args.port, reload=False)
941
+ uvicorn.run(app, host=args.host, port=args.port, reload=False)
@@ -6,13 +6,6 @@ import time
6
6
  from pathlib import Path
7
7
  from typing import Optional, List
8
8
 
9
- # Ensure pipmaster is available.
10
- try:
11
- import pipmaster as pm
12
- except ImportError:
13
- print("FATAL: pipmaster is not installed. Please install it using: pip install pipmaster")
14
- sys.exit(1)
15
-
16
9
  # Ensure filelock is available for process-safe server startup.
17
10
  try:
18
11
  from filelock import FileLock, Timeout
@@ -39,7 +32,7 @@ class XTTSClientBinding(LollmsTTSBinding):
39
32
 
40
33
  self.config = kwargs
41
34
  self.host = kwargs.get("host", "localhost")
42
- self.port = kwargs.get("port", 8081)
35
+ self.port = kwargs.get("port", 9633)
43
36
  self.auto_start_server = kwargs.get("auto_start_server", True)
44
37
  self.server_process = None
45
38
  self.base_url = f"http://{self.host}:{self.port}"
@@ -76,7 +69,7 @@ class XTTSClientBinding(LollmsTTSBinding):
76
69
  return
77
70
 
78
71
  try:
79
- with lock.acquire(timeout=60):
72
+ with lock.acquire(timeout=10):
80
73
  if not self.is_server_running():
81
74
  ASCIIColors.yellow("Lock acquired. Starting dedicated XTTS server...")
82
75
  self.start_server()
@@ -85,7 +78,7 @@ class XTTSClientBinding(LollmsTTSBinding):
85
78
  ASCIIColors.green("Server was started by another process while we waited. Connected successfully.")
86
79
  except Timeout:
87
80
  ASCIIColors.yellow("Could not acquire lock, another process is starting the server. Waiting...")
88
- self._wait_for_server(timeout=180)
81
+ self._wait_for_server(timeout=60)
89
82
 
90
83
  if not self.is_server_running():
91
84
  raise RuntimeError("Failed to start or connect to the XTTS server after all attempts.")
@@ -97,6 +90,12 @@ class XTTSClientBinding(LollmsTTSBinding):
97
90
  using pipmaster, which handles complex packages like PyTorch.
98
91
  """
99
92
  ASCIIColors.info(f"Setting up virtual environment in: {self.venv_dir}")
93
+ # Ensure pipmaster is available.
94
+ try:
95
+ import pipmaster as pm
96
+ except ImportError:
97
+ print("FATAL: pipmaster is not installed. Please install it using: pip install pipmaster")
98
+ raise Exception("pipmaster not found")
100
99
  pm_v = pm.PackageManager(venv_path=str(self.venv_dir))
101
100
 
102
101
  requirements_file = self.server_dir / "requirements.txt"
@@ -141,7 +140,7 @@ class XTTSClientBinding(LollmsTTSBinding):
141
140
  self.server_process = subprocess.Popen(command, creationflags=creationflags)
142
141
  ASCIIColors.info("XTTS server process launched in the background.")
143
142
 
144
- def _wait_for_server(self, timeout=120):
143
+ def _wait_for_server(self, timeout=10):
145
144
  """Waits for the server to become responsive."""
146
145
  ASCIIColors.info("Waiting for XTTS server to become available...")
147
146
  start_time = time.time()
@@ -272,4 +272,4 @@ except Exception as e:
272
272
  from ascii_colors import ASCIIColors
273
273
  ASCIIColors.red(f"Server: CRITICAL ERROR during startup: {e}")
274
274
  import traceback
275
- ASCIIColors.red(f"Server: Traceback:\n{traceback.format_exc()}")```
275
+ ASCIIColors.red(f"Server: Traceback:\n{traceback.format_exc()}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.6.6
3
+ Version: 1.6.10
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License
@@ -1,15 +1,15 @@
1
- lollms_client/__init__.py,sha256=51YtCHNJCmroyA9htiIgjui1ZSFfkn_zhhe0USpE8nc,1146
2
- lollms_client/lollms_agentic.py,sha256=pQiMEuB_XkG29-SW6u4KTaMFPr6eKqacInggcCuCW3k,13914
1
+ lollms_client/__init__.py,sha256=0D-nwmkSe8qYbyPSuKdYU09t7x5P2BYwfpI4mbzEZlU,1147
2
+ lollms_client/lollms_agentic.py,sha256=ljalnmeSU-sbzH19-c9TzrJ-HhEeo4mxXmpJGkXj720,14094
3
3
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
4
- lollms_client/lollms_core.py,sha256=Un74iLbnnn2yZYH6HBNRz1mTZ454NEMBEndS4nvh3ZI,244887
5
- lollms_client/lollms_discussion.py,sha256=LZc9jYbUMRTovehiFJKEp-NXuCl_WnrqUtT3t4Nzayk,123922
4
+ lollms_client/lollms_core.py,sha256=PGHPu_V5rKnO-032EjgMw8M5T0SLpNOWrzSUlWKBBgE,253052
5
+ lollms_client/lollms_discussion.py,sha256=4uzXLqGz72xZcXEtamWGudTOR54cYwuo6k8JY37scqY,124574
6
6
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
7
7
  lollms_client/lollms_llm_binding.py,sha256=_6d0q9g9lk8FRZ1oYnLpuqG7Y_WLyBJBn4ANdk-C8gU,25020
8
8
  lollms_client/lollms_mcp_binding.py,sha256=psb27A23VFWDfZsR2WUbQXQxiZDW5yfOak6ZtbMfszI,10222
9
9
  lollms_client/lollms_mcp_security.py,sha256=FhVTDhSBjksGEZnopVnjFmEF5dv7D8bBTqoaj4BiF0E,3562
10
10
  lollms_client/lollms_personality.py,sha256=kGuFwmgA9QDLcQlLQ9sKeceMujdEo0Aw28fN5H8MpjI,8847
11
11
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
12
- lollms_client/lollms_stt_binding.py,sha256=jAUhLouEhh2hmm1bK76ianfw_6B59EHfY3FmLv6DU-g,5111
12
+ lollms_client/lollms_stt_binding.py,sha256=WkREwu0uc0UzeCv5Z9ok8AFG42iBq20ZOQJnydSTE0s,7505
13
13
  lollms_client/lollms_tti_binding.py,sha256=MhDntyXVNoZeqMH0YpoNtiLijXPL8Y--if2qjQAS0-w,8520
14
14
  lollms_client/lollms_ttm_binding.py,sha256=FjVVSNXOZXK1qvcKEfxdiX6l2b4XdGOSNnZ0utAsbDg,4167
15
15
  lollms_client/lollms_tts_binding.py,sha256=k13rNq4YmuR50kkAEacwADW7COoDUOMLGAcnm27xjO4,5150
@@ -49,11 +49,11 @@ lollms_client/mcp_bindings/remote_mcp/__init__.py,sha256=YpSclbNJDYVUe2W0H5Xki4g
49
49
  lollms_client/mcp_bindings/standard_mcp/__init__.py,sha256=wJQofr4zS5RIS9V5_WuMMFsJxSDJgXDW3PQPX1hlx6g,31519
50
50
  lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  lollms_client/stt_bindings/lollms/__init__.py,sha256=9Vmn1sQQZKLGLe7nZnc-0LnNeSY8r9xw3pYZF-wVtPo,5889
52
- lollms_client/stt_bindings/whisper/__init__.py,sha256=1Ej67GdRKBy1bba14jMaYDYHiZkxJASkWm5eF07ztDQ,15363
53
- lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmdb6WmwTzPjXyaIB85KLGU,21439
52
+ lollms_client/stt_bindings/whisper/__init__.py,sha256=HVVYRGIPkTTwNw5uhvxvRkSYeAv6nNRZp_geS8SwKZ4,15428
53
+ lollms_client/stt_bindings/whispercpp/__init__.py,sha256=5YQKFy3UaN-S-HGZiFCIcuPGTJTELPgqqct1AcTqz-Q,21595
54
54
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
- lollms_client/tti_bindings/diffusers/__init__.py,sha256=esrcyy_z_6HVCFKMVXl1h_qY_pX3kMHwO81M2C8hSIg,17706
56
- lollms_client/tti_bindings/diffusers/server/main.py,sha256=PQ3WXhkQzEzyT100k7nu1ZHQtkGphvpWNGl7Bcg26eY,49593
55
+ lollms_client/tti_bindings/diffusers/__init__.py,sha256=_Nd3OotY1kBlEmHBuxVsNwIc_vvUy8sBo1Ug9_lUOzI,17705
56
+ lollms_client/tti_bindings/diffusers/server/main.py,sha256=-Eo9vrpsK_LXRFJWkplriSdUX8VnkcL6tfFdxontNnM,48136
57
57
  lollms_client/tti_bindings/gemini/__init__.py,sha256=eYGz6gnOxWGdJu2O0H-EwGG-Hg7Yo3Hzsgn4neqx29Q,12963
58
58
  lollms_client/tti_bindings/leonardo_ai/__init__.py,sha256=pUbF1rKPZib1x0Kn2Bk1A7sTFWmZzNG02kmW6Iu1j2w,5885
59
59
  lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
@@ -76,13 +76,13 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=7LQUuWV8I3IEdacc65NRHmDf
76
76
  lollms_client/tts_bindings/piper_tts/server/install_piper.py,sha256=g71Ne2T18wAytOPipfQ9DNeTAOD9PrII5qC-vr9DtLA,3256
77
77
  lollms_client/tts_bindings/piper_tts/server/main.py,sha256=DMozfSR1aCbrlmOXltRFjtXhYhXajsGcNKQjsWgRwZk,17402
78
78
  lollms_client/tts_bindings/piper_tts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
79
- lollms_client/tts_bindings/xtts/__init__.py,sha256=lTlExBPZ97FPaf9DoqxE4ilwwO5y88dPOHeRaR5BCnc,8002
80
- lollms_client/tts_bindings/xtts/server/main.py,sha256=JYKUzg4qFOGW8O_QDb9ChEdhcPRSccdwOlR3q-kJX7I,12306
79
+ lollms_client/tts_bindings/xtts/__init__.py,sha256=q5xuNUYz4l9ajmZo4yvcTgxPuv9HT6T16u-weh3lzC8,8073
80
+ lollms_client/tts_bindings/xtts/server/main.py,sha256=feTAX4eAo2HY6PpcDTrgRMak5AXocO7UIhKPuGuWpxY,12303
81
81
  lollms_client/tts_bindings/xtts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
82
82
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
83
83
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
- lollms_client-1.6.6.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
85
- lollms_client-1.6.6.dist-info/METADATA,sha256=i6Gb5wKrXNF6OPUCz41s5YbpBY5HEvLdAD5a6ONZV84,76835
86
- lollms_client-1.6.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
87
- lollms_client-1.6.6.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
88
- lollms_client-1.6.6.dist-info/RECORD,,
84
+ lollms_client-1.6.10.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
85
+ lollms_client-1.6.10.dist-info/METADATA,sha256=Mqk7RqLL6F5By0K_XIJemPSNoQhDcaW9JBT5-H9Tewg,76836
86
+ lollms_client-1.6.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
87
+ lollms_client-1.6.10.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
88
+ lollms_client-1.6.10.dist-info/RECORD,,