lollms-client 1.1.0__tar.gz → 1.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (76) hide show
  1. {lollms_client-1.1.0/src/lollms_client.egg-info → lollms_client-1.1.1}/PKG-INFO +1 -1
  2. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/__init__.py +1 -1
  3. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/ollama/__init__.py +1 -1
  4. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_llm_binding.py +2 -1
  5. lollms_client-1.1.1/src/lollms_client/tti_bindings/diffusers/__init__.py +613 -0
  6. {lollms_client-1.1.0 → lollms_client-1.1.1/src/lollms_client.egg-info}/PKG-INFO +1 -1
  7. lollms_client-1.1.0/src/lollms_client/tti_bindings/diffusers/__init__.py +0 -480
  8. {lollms_client-1.1.0 → lollms_client-1.1.1}/LICENSE +0 -0
  9. {lollms_client-1.1.0 → lollms_client-1.1.1}/README.md +0 -0
  10. {lollms_client-1.1.0 → lollms_client-1.1.1}/pyproject.toml +0 -0
  11. {lollms_client-1.1.0 → lollms_client-1.1.1}/setup.cfg +0 -0
  12. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/__init__.py +0 -0
  13. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
  14. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
  15. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  16. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
  17. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
  18. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
  19. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  20. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  21. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  22. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
  23. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
  24. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
  25. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
  26. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  27. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  28. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  29. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  30. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  31. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_config.py +0 -0
  32. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_core.py +0 -0
  33. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_discussion.py +0 -0
  34. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_js_analyzer.py +0 -0
  35. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_mcp_binding.py +0 -0
  36. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_mcp_security.py +0 -0
  37. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_personality.py +0 -0
  38. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_python_analyzer.py +0 -0
  39. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_stt_binding.py +0 -0
  40. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_tti_binding.py +0 -0
  41. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_ttm_binding.py +0 -0
  42. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_tts_binding.py +0 -0
  43. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_ttv_binding.py +0 -0
  44. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_types.py +0 -0
  45. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/lollms_utilities.py +0 -0
  46. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  47. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  48. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  49. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  50. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  51. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  52. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  53. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/stt_bindings/__init__.py +0 -0
  54. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  55. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  56. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  57. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tti_bindings/__init__.py +0 -0
  58. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  59. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  60. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  61. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/ttm_bindings/__init__.py +0 -0
  62. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  63. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  64. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  65. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tts_bindings/__init__.py +0 -0
  66. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
  67. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  68. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  69. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  70. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/ttv_bindings/__init__.py +0 -0
  71. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  72. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client.egg-info/SOURCES.txt +0 -0
  73. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client.egg-info/dependency_links.txt +0 -0
  74. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client.egg-info/requires.txt +0 -0
  75. {lollms_client-1.1.0 → lollms_client-1.1.1}/src/lollms_client.egg-info/top_level.txt +0 -0
  76. {lollms_client-1.1.0 → lollms_client-1.1.1}/test/test_lollms_discussion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.1.0
3
+ Version: 1.1.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.1.0" # Updated version
11
+ __version__ = "1.1.1" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -434,7 +434,7 @@ class OllamaBinding(LollmsLLMBinding):
434
434
  list: List of individual characters.
435
435
  """
436
436
  ## Since ollama has no endpoints to tokenize the text, we use tiktoken to have a rough estimate
437
- return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text)
437
+ return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text, disallowed_special=())
438
438
 
439
439
  def detokenize(self, tokens: list) -> str:
440
440
  """
@@ -373,7 +373,8 @@ class LollmsLLMBinding(ABC):
373
373
  if messages[-1]["content"]=="":
374
374
  del messages[-1]
375
375
  return messages
376
-
376
+ def ps(self):
377
+ return []
377
378
 
378
379
 
379
380
 
@@ -0,0 +1,613 @@
1
+ # lollms_client/tti_bindings/diffusers/__init__.py
2
+ import os
3
+ import importlib
4
+ from io import BytesIO
5
+ from typing import Optional, List, Dict, Any, Union
6
+ from pathlib import Path
7
+ import pipmaster as pm
8
+ # --- Concurrency Imports ---
9
+ import threading
10
+ import queue
11
+ from concurrent.futures import Future
12
+ import time
13
+ import hashlib
14
+ import re
15
+ # -------------------------
16
+ # --- Download Imports ---
17
+ import requests
18
+ from tqdm import tqdm
19
+ # --------------------
20
+
21
+ pm.ensure_packages(["torch","torchvision"],index_url="https://download.pytorch.org/whl/cu126")
22
+ pm.ensure_packages(["diffusers","pillow","transformers","safetensors", "requests", "tqdm"])
23
+
24
+ # Attempt to import core dependencies and set availability flag
25
+ try:
26
+ import torch
27
+ from diffusers import AutoPipelineForText2Image, DiffusionPipeline, StableDiffusionPipeline
28
+ from diffusers.utils import load_image
29
+ from PIL import Image
30
+ DIFFUSERS_AVAILABLE = True
31
+ except ImportError:
32
+ torch = None
33
+ AutoPipelineForText2Image = None
34
+ DiffusionPipeline = None
35
+ StableDiffusionPipeline = None
36
+ Image = None
37
+ load_image = None
38
+ DIFFUSERS_AVAILABLE = False
39
+
40
+ from lollms_client.lollms_tti_binding import LollmsTTIBinding
41
+ from ascii_colors import trace_exception, ASCIIColors
42
+ import json
43
+ import shutil
44
+
45
+ # Defines the binding name for the manager
46
+ BindingName = "DiffusersTTIBinding_Impl"
47
+
48
+ # --- START: Civitai Model Definitions ---
49
+ # Expanded list of popular Civitai models (as single .safetensors files)
50
+ CIVITAI_MODELS = {
51
+ # Photorealistic
52
+ "realistic-vision-v6": {
53
+ "display_name": "Realistic Vision V6.0",
54
+ "url": "https://civitai.com/api/download/models/130072",
55
+ "filename": "realisticVisionV60_v60B1.safetensors",
56
+ "description": "One of the most popular photorealistic models.",
57
+ "owned_by": "civitai"
58
+ },
59
+ "absolute-reality": {
60
+ "display_name": "Absolute Reality",
61
+ "url": "https://civitai.com/api/download/models/132760",
62
+ "filename": "absolutereality_v181.safetensors",
63
+ "description": "A top-tier model for generating realistic images.",
64
+ "owned_by": "civitai"
65
+ },
66
+ # Artistic / General Purpose
67
+ "dreamshaper-8": {
68
+ "display_name": "DreamShaper 8",
69
+ "url": "https://civitai.com/api/download/models/128713",
70
+ "filename": "dreamshaper_8.safetensors",
71
+ "description": "A very popular and versatile general-purpose model.",
72
+ "owned_by": "civitai"
73
+ },
74
+ "juggernaut-xl": {
75
+ "display_name": "Juggernaut XL",
76
+ "url": "https://civitai.com/api/download/models/133005",
77
+ "filename": "juggernautXL_version6Rundiffusion.safetensors",
78
+ "description": "High-quality artistic model, great for cinematic styles (SDXL-based).",
79
+ "owned_by": "civitai"
80
+ },
81
+ # Anime
82
+ "anything-v5": {
83
+ "display_name": "Anything V5",
84
+ "url": "https://civitai.com/api/download/models/9409",
85
+ "filename": "anythingV5_PrtRE.safetensors",
86
+ "description": "A classic and highly popular model for anime-style generation.",
87
+ "owned_by": "civitai"
88
+ },
89
+ "lyriel-v1.6": {
90
+ "display_name": "Lyriel v1.6",
91
+ "url": "https://civitai.com/api/download/models/92407",
92
+ "filename": "lyriel_v16.safetensors",
93
+ "description": "A popular artistic model for fantasy and stylized images.",
94
+ "owned_by": "civitai"
95
+ }
96
+ }
97
+ # --- END: Civitai Model Definitions ---
98
+
99
+ # Helper for torch.dtype string conversion
100
+ TORCH_DTYPE_MAP_STR_TO_OBJ = {
101
+ "float16": getattr(torch, 'float16', 'float16'),
102
+ "bfloat16": getattr(torch, 'bfloat16', 'bfloat16'),
103
+ "float32": getattr(torch, 'float32', 'float32'),
104
+ "auto": "auto"
105
+ }
106
+ TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items()}
107
+ if torch:
108
+ TORCH_DTYPE_MAP_OBJ_TO_STR[None] = "None"
109
+
110
+ # Common Schedulers mapping
111
+ SCHEDULER_MAPPING = {
112
+ "default": None, "ddim": "DDIMScheduler", "ddpm": "DDPMScheduler", "deis_multistep": "DEISMultistepScheduler",
113
+ "dpm_multistep": "DPMSolverMultistepScheduler", "dpm_multistep_karras": "DPMSolverMultistepScheduler",
114
+ "dpm_single": "DPMSolverSinglestepScheduler", "dpm_adaptive": "DPMSolverPlusPlusScheduler",
115
+ "dpm++_2m": "DPMSolverMultistepScheduler", "dpm++_2m_karras": "DPMSolverMultistepScheduler",
116
+ "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler", "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
117
+ "dpm++_sde": "DPMSolverSDEScheduler", "dpm++_sde_karras": "DPMSolverSDEScheduler",
118
+ "euler_ancestral_discrete": "EulerAncestralDiscreteScheduler", "euler_discrete": "EulerDiscreteScheduler",
119
+ "heun_discrete": "HeunDiscreteScheduler", "heun_karras": "HeunDiscreteScheduler",
120
+ "lms_discrete": "LMSDiscreteScheduler", "lms_karras": "LMSDiscreteScheduler",
121
+ "pndm": "PNDMScheduler", "unipc_multistep": "UniPCMultistepScheduler",
122
+ }
123
+ SCHEDULER_USES_KARRAS_SIGMAS = [
124
+ "dpm_multistep_karras", "dpm++_2m_karras", "dpm++_2s_ancestral_karras",
125
+ "dpm++_sde_karras", "heun_karras", "lms_karras"
126
+ ]
127
+
128
+ # --- START: Concurrency and Singleton Management ---
129
+
130
+ class ModelManager:
131
+ """
132
+ Manages a single pipeline instance, its generation queue, and a worker thread.
133
+ This ensures all interactions with a specific model are thread-safe.
134
+ """
135
+ def __init__(self, config: Dict[str, Any], models_path: Path):
136
+ self.config = config
137
+ self.models_path = models_path
138
+ self.pipeline: Optional[DiffusionPipeline] = None
139
+ self.ref_count = 0
140
+ self.lock = threading.Lock()
141
+ self.queue = queue.Queue()
142
+ self.worker_thread = threading.Thread(target=self._generation_worker, daemon=True)
143
+ self._stop_event = threading.Event()
144
+ self.is_loaded = False
145
+
146
+ self.worker_thread.start()
147
+
148
+ def acquire(self):
149
+ with self.lock:
150
+ self.ref_count += 1
151
+ return self
152
+
153
+ def release(self):
154
+ with self.lock:
155
+ self.ref_count -= 1
156
+ return self.ref_count
157
+
158
+ def stop(self):
159
+ self._stop_event.set()
160
+ self.queue.put(None)
161
+ self.worker_thread.join(timeout=5)
162
+
163
+ def _load_pipeline(self):
164
+ if self.pipeline:
165
+ return
166
+
167
+ model_name = self.config.get("model_name", "")
168
+ if not model_name:
169
+ raise ValueError("Model name cannot be empty for loading.")
170
+
171
+ ASCIIColors.info(f"Loading Diffusers model: {model_name}")
172
+ model_path = self._resolve_model_path(model_name)
173
+ torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower())
174
+
175
+ try:
176
+ if str(model_path).endswith(".safetensors"):
177
+ ASCIIColors.info(f"Loading from single safetensors file: {model_path}")
178
+ try:
179
+ # Modern, preferred method for newer diffusers versions
180
+ self.pipeline = AutoPipelineForText2Image.from_single_file(
181
+ model_path,
182
+ torch_dtype=torch_dtype,
183
+ cache_dir=self.config.get("hf_cache_path")
184
+ )
185
+ except AttributeError:
186
+ # Fallback for older diffusers versions
187
+ ASCIIColors.warning("AutoPipelineForText2Image.from_single_file not found. Falling back to StableDiffusionPipeline.")
188
+ ASCIIColors.warning("Consider updating diffusers for better compatibility: pip install --upgrade diffusers")
189
+ self.pipeline = StableDiffusionPipeline.from_single_file(
190
+ model_path,
191
+ torch_dtype=torch_dtype,
192
+ cache_dir=self.config.get("hf_cache_path")
193
+ )
194
+ else:
195
+ ASCIIColors.info(f"Loading from pretrained folder/repo: {model_path}")
196
+ load_args = {
197
+ "torch_dtype": torch_dtype, "use_safetensors": self.config["use_safetensors"],
198
+ "token": self.config["hf_token"], "local_files_only": self.config["local_files_only"],
199
+ }
200
+ if self.config["hf_variant"]: load_args["variant"] = self.config["hf_variant"]
201
+ if not self.config["safety_checker_on"]: load_args["safety_checker"] = None
202
+ if self.config.get("hf_cache_path"): load_args["cache_dir"] = str(self.config["hf_cache_path"])
203
+ self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
204
+
205
+ except Exception as e:
206
+ error_str = str(e).lower()
207
+ if "401" in error_str or "gated" in error_str or "authorization" in error_str:
208
+ auth_error_msg = (
209
+ f"AUTHENTICATION FAILED for model '{model_name}'. This is likely a 'gated' model on Hugging Face.\n"
210
+ "Please ensure you have accepted its license and provided a valid HF Access Token in the settings."
211
+ )
212
+ raise RuntimeError(auth_error_msg) from e
213
+ else:
214
+ raise e
215
+
216
+ self._set_scheduler()
217
+ self.pipeline.to(self.config["device"])
218
+
219
+ if self.config["enable_xformers"]:
220
+ try:
221
+ self.pipeline.enable_xformers_memory_efficient_attention()
222
+ except Exception as e:
223
+ ASCIIColors.warning(f"Could not enable xFormers: {e}.")
224
+
225
+ if self.config["enable_cpu_offload"] and self.config["device"] != "cpu":
226
+ self.pipeline.enable_model_cpu_offload()
227
+ elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
228
+ self.pipeline.enable_sequential_cpu_offload()
229
+
230
+ self.is_loaded = True
231
+ ASCIIColors.green(f"Model '{model_name}' loaded successfully on '{self.config['device']}'.")
232
+
233
+ def _unload_pipeline(self):
234
+ if self.pipeline:
235
+ del self.pipeline
236
+ self.pipeline = None
237
+ if torch and torch.cuda.is_available():
238
+ torch.cuda.empty_cache()
239
+ self.is_loaded = False
240
+ ASCIIColors.info(f"Model '{self.config.get('model_name')}' unloaded.")
241
+
242
+ def _generation_worker(self):
243
+ while not self._stop_event.is_set():
244
+ try:
245
+ job = self.queue.get(timeout=1)
246
+ if job is None:
247
+ break
248
+ future, pipeline_args = job
249
+ try:
250
+ with self.lock:
251
+ if not self.pipeline:
252
+ self._load_pipeline()
253
+ with torch.no_grad():
254
+ pipeline_output = self.pipeline(**pipeline_args)
255
+ pil_image: Image.Image = pipeline_output.images[0]
256
+ img_byte_arr = BytesIO()
257
+ pil_image.save(img_byte_arr, format="PNG")
258
+ future.set_result(img_byte_arr.getvalue())
259
+ except Exception as e:
260
+ trace_exception(e)
261
+ future.set_exception(e)
262
+ finally:
263
+ self.queue.task_done()
264
+ except queue.Empty:
265
+ continue
266
+
267
+ def _download_civitai_model(self, model_key: str):
268
+ model_info = CIVITAI_MODELS[model_key]
269
+ url = model_info["url"]
270
+ filename = model_info["filename"]
271
+ dest_path = self.models_path / filename
272
+ temp_path = dest_path.with_suffix(".temp")
273
+
274
+ ASCIIColors.cyan(f"Downloading '{filename}' from Civitai...")
275
+ try:
276
+ with requests.get(url, stream=True) as r:
277
+ r.raise_for_status()
278
+ total_size = int(r.headers.get('content-length', 0))
279
+ with open(temp_path, 'wb') as f, tqdm(
280
+ total=total_size, unit='iB', unit_scale=True, desc=f"Downloading {filename}"
281
+ ) as bar:
282
+ for chunk in r.iter_content(chunk_size=8192):
283
+ f.write(chunk)
284
+ bar.update(len(chunk))
285
+
286
+ shutil.move(temp_path, dest_path)
287
+ ASCIIColors.green(f"Model '{filename}' downloaded successfully.")
288
+ except Exception as e:
289
+ if temp_path.exists():
290
+ temp_path.unlink()
291
+ raise Exception(f"Failed to download model {filename}: {e}") from e
292
+
293
+ def _resolve_model_path(self, model_name: str) -> Union[str, Path]:
294
+ path_obj = Path(model_name)
295
+ if path_obj.is_absolute() and path_obj.exists():
296
+ return model_name
297
+
298
+ if model_name in CIVITAI_MODELS:
299
+ filename = CIVITAI_MODELS[model_name]["filename"]
300
+ local_path = self.models_path / filename
301
+ if not local_path.exists():
302
+ self._download_civitai_model(model_name)
303
+ return local_path
304
+
305
+ local_path = self.models_path / model_name
306
+ if local_path.exists():
307
+ return local_path
308
+
309
+ return model_name
310
+
311
+ def _set_scheduler(self):
312
+ if not self.pipeline: return
313
+ scheduler_name_key = self.config["scheduler_name"].lower()
314
+ if scheduler_name_key == "default": return
315
+
316
+ scheduler_class_name = SCHEDULER_MAPPING.get(scheduler_name_key)
317
+ if scheduler_class_name:
318
+ try:
319
+ SchedulerClass = getattr(importlib.import_module("diffusers.schedulers"), scheduler_class_name)
320
+ scheduler_config = self.pipeline.scheduler.config
321
+ scheduler_config["use_karras_sigmas"] = scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS
322
+ self.pipeline.scheduler = SchedulerClass.from_config(scheduler_config)
323
+ except Exception as e:
324
+ ASCIIColors.warning(f"Could not switch scheduler to {scheduler_name_key}: {e}. Using current default.")
325
+
326
+ class PipelineRegistry:
327
+ _instance = None
328
+ _lock = threading.Lock()
329
+
330
+ def __new__(cls, *args, **kwargs):
331
+ with cls._lock:
332
+ if cls._instance is None:
333
+ cls._instance = super().__new__(cls)
334
+ cls._instance._managers = {}
335
+ cls._instance._registry_lock = threading.Lock()
336
+ return cls._instance
337
+
338
+ def _get_config_key(self, config: Dict[str, Any]) -> str:
339
+ critical_keys = [
340
+ "model_name", "device", "torch_dtype_str", "use_safetensors",
341
+ "safety_checker_on", "hf_variant", "enable_cpu_offload",
342
+ "enable_sequential_cpu_offload", "enable_xformers",
343
+ "local_files_only", "hf_cache_path"
344
+ ]
345
+ key_data = tuple(sorted((k, config.get(k)) for k in critical_keys))
346
+ return hashlib.sha256(str(key_data).encode('utf-8')).hexdigest()
347
+
348
+ def get_manager(self, config: Dict[str, Any], models_path: Path) -> ModelManager:
349
+ key = self._get_config_key(config)
350
+ with self._registry_lock:
351
+ if key not in self._managers:
352
+ self._managers[key] = ModelManager(config.copy(), models_path)
353
+ return self._managers[key].acquire()
354
+
355
+ def release_manager(self, config: Dict[str, Any]):
356
+ key = self._get_config_key(config)
357
+ with self._registry_lock:
358
+ if key in self._managers:
359
+ manager = self._managers[key]
360
+ ref_count = manager.release()
361
+ if ref_count == 0:
362
+ ASCIIColors.info(f"Reference count for model '{config.get('model_name')}' is zero. Cleaning up.")
363
+ manager.stop()
364
+ manager._unload_pipeline()
365
+ del self._managers[key]
366
+
367
+ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
368
+ DEFAULT_CONFIG = {
369
+ "model_name": "", "device": "auto", "torch_dtype_str": "auto", "use_safetensors": True,
370
+ "scheduler_name": "default", "safety_checker_on": True, "num_inference_steps": 25,
371
+ "guidance_scale": 7.0, "default_width": 512, "default_height": 512, "seed": -1,
372
+ "enable_cpu_offload": False, "enable_sequential_cpu_offload": False, "enable_xformers": False,
373
+ "hf_variant": None, "hf_token": None, "hf_cache_path": None, "local_files_only": False,
374
+ }
375
+
376
+ def __init__(self, **kwargs):
377
+ super().__init__(binding_name=BindingName)
378
+
379
+ if not DIFFUSERS_AVAILABLE:
380
+ raise ImportError(
381
+ "Diffusers or its dependencies not installed. "
382
+ "Please run: pip install torch torchvision diffusers Pillow transformers safetensors requests tqdm"
383
+ )
384
+
385
+ self.config = {**self.DEFAULT_CONFIG, **kwargs}
386
+ self.model_name = self.config.get("model_name", "")
387
+ self.models_path = Path(kwargs.get("models_path", Path(__file__).parent / "models"))
388
+ self.models_path.mkdir(parents=True, exist_ok=True)
389
+
390
+ self.registry = PipelineRegistry()
391
+ self.manager: Optional[ModelManager] = None
392
+
393
+ self._resolve_device_and_dtype()
394
+ if self.model_name:
395
+ self._acquire_manager()
396
+
397
+ def _acquire_manager(self):
398
+ if self.manager:
399
+ self.registry.release_manager(self.manager.config)
400
+ self.manager = self.registry.get_manager(self.config, self.models_path)
401
+ ASCIIColors.info(f"Binding instance acquired manager for '{self.config['model_name']}'.")
402
+
403
+ def _resolve_device_and_dtype(self):
404
+ if self.config["device"].lower() == "auto":
405
+ self.config["device"] = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
406
+
407
+ if self.config["torch_dtype_str"].lower() == "auto":
408
+ self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
409
+
410
+ def list_safetensor_models(self) -> List[str]:
411
+ if not self.models_path.exists(): return []
412
+ return sorted([f.name for f in self.models_path.iterdir() if f.is_file() and f.suffix == ".safetensors"])
413
+
414
+ def listModels(self) -> list:
415
+ # Start with hardcoded Civitai and Hugging Face models
416
+ civitai_list = [
417
+ {'model_name': key, 'display_name': info['display_name'], 'description': info['description'], 'owned_by': info['owned_by']}
418
+ for key, info in CIVITAI_MODELS.items()
419
+ ]
420
+ hf_default_list = [
421
+ # SDXL Models (1024x1024 native)
422
+ {'model_name': "stabilityai/stable-diffusion-xl-base-1.0", 'display_name': "Stable Diffusion XL 1.0", 'description': "Official SDXL base model from Stability AI. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
423
+ {'model_name': "playgroundai/playground-v2.5-1024px-aesthetic", 'display_name': "Playground v2.5", 'description': "Known for high aesthetic quality. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
424
+ # SD 1.5 Models (512x512 native)
425
+ {'model_name': "runwayml/stable-diffusion-v1-5", 'display_name': "Stable Diffusion 1.5", 'description': "A popular and versatile open-access text-to-image model.", 'owned_by': 'HuggingFace'},
426
+ {'model_name': "dataautogpt3/OpenDalleV1.1", 'display_name': "OpenDalle v1.1", 'description': "An open-source reproduction of DALL-E 3, good for prompt adherence.", 'owned_by': 'HuggingFace'},
427
+ {'model_name': "stabilityai/stable-diffusion-2-1-base", 'display_name': "Stable Diffusion 2.1 (512px)", 'description': "A 512x512 resolution model from Stability AI.", 'owned_by': 'HuggingFace'},
428
+ {'model_name': "CompVis/stable-diffusion-v1-4", 'display_name': "Stable Diffusion 1.4 (Gated)", 'description': "Original SD v1.4. Requires accepting license on Hugging Face and an HF token.", 'owned_by': 'HuggingFace'}
429
+ ]
430
+
431
+ # Discover local .safetensors files
432
+ custom_local_models = []
433
+ civitai_filenames = {info['filename'] for info in CIVITAI_MODELS.values()}
434
+ local_safetensors = self.list_safetensor_models()
435
+
436
+ for filename in local_safetensors:
437
+ if filename not in civitai_filenames:
438
+ custom_local_models.append({
439
+ 'model_name': filename,
440
+ 'display_name': filename,
441
+ 'description': 'Local safetensors file from your models folder.',
442
+ 'owned_by': 'local_user'
443
+ })
444
+
445
+ return civitai_list + hf_default_list + custom_local_models
446
+
447
+ def load_model(self):
448
+ ASCIIColors.info("load_model() called. Loading is now automatic.")
449
+ if self.model_name and not self.manager:
450
+ self._acquire_manager()
451
+
452
+ def unload_model(self):
453
+ if self.manager:
454
+ ASCIIColors.info(f"Binding instance releasing manager for '{self.manager.config['model_name']}'.")
455
+ self.registry.release_manager(self.manager.config)
456
+ self.manager = None
457
+
458
+ def generate_image(self, prompt: str, negative_prompt: str = "", width: int = None, height: int = None, **kwargs) -> bytes:
459
+ if not self.model_name:
460
+ raise RuntimeError("No model_name configured. Please select a model in settings.")
461
+
462
+ if not self.manager:
463
+ self._acquire_manager()
464
+
465
+ _width = width or self.config["default_width"]
466
+ _height = height or self.config["default_height"]
467
+ _num_inference_steps = kwargs.get("num_inference_steps", self.config["num_inference_steps"])
468
+ _guidance_scale = kwargs.get("guidance_scale", self.config["guidance_scale"])
469
+ _seed = kwargs.get("seed", self.config["seed"])
470
+
471
+ generator = torch.Generator(device=self.config["device"]).manual_seed(_seed) if _seed != -1 else None
472
+
473
+ pipeline_args = {
474
+ "prompt": prompt, "negative_prompt": negative_prompt or None, "width": _width,
475
+ "height": _height, "num_inference_steps": _num_inference_steps,
476
+ "guidance_scale": _guidance_scale, "generator": generator,
477
+ }
478
+
479
+ future = Future()
480
+ self.manager.queue.put((future, pipeline_args))
481
+ ASCIIColors.info(f"Job for prompt '{prompt[:50]}...' queued. Waiting...")
482
+
483
+ try:
484
+ image_bytes = future.result()
485
+ ASCIIColors.green("Image generated successfully.")
486
+ return image_bytes
487
+ except Exception as e:
488
+ raise Exception(f"Image generation failed: {e}") from e
489
+
490
+ def list_local_models(self) -> List[str]:
491
+ if not self.models_path.exists(): return []
492
+
493
+ folders = [
494
+ d.name for d in self.models_path.iterdir()
495
+ if d.is_dir() and ((d / "model_index.json").exists() or (d / "unet" / "config.json").exists())
496
+ ]
497
+ safetensors = self.list_safetensor_models()
498
+ return sorted(folders + safetensors)
499
+
500
+ def list_available_models(self) -> List[str]:
501
+ discoverable_models = [m['model_name'] for m in self.listModels()]
502
+ local_models = self.list_local_models()
503
+
504
+ combined_list = sorted(list(set(local_models + discoverable_models)))
505
+ return combined_list
506
+
507
+ def list_services(self, **kwargs) -> List[Dict[str, str]]:
508
+ models = self.list_available_models()
509
+ local_models = self.list_local_models()
510
+
511
+ if not models:
512
+ return [{"name": "diffusers_no_models", "caption": "No models found", "help": f"Place models in '{self.models_path.resolve()}'."}]
513
+
514
+ services = []
515
+ for m in models:
516
+ help_text = "Hugging Face model ID"
517
+ if m in local_models:
518
+ help_text = f"Local model from: {self.models_path.resolve()}"
519
+ elif m in CIVITAI_MODELS:
520
+ filename = CIVITAI_MODELS[m]['filename']
521
+ help_text = f"Civitai model (downloads as {filename})"
522
+
523
+ services.append({"name": m, "caption": f"Diffusers: {m}", "help": help_text})
524
+ return services
525
+
526
+ def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
527
+ available_models = self.list_available_models()
528
+ return [
529
+ {"name": "model_name", "type": "str", "value": self.model_name, "description": "Local, Civitai, or Hugging Face model.", "options": available_models},
530
+ {"name": "device", "type": "str", "value": self.config["device"], "description": f"Inference device. Resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
531
+ {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
532
+ {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "HF model variant (e.g., 'fp16')."},
533
+ {"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer .safetensors when loading from Hugging Face."},
534
+ {"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler for diffusion.", "options": list(SCHEDULER_MAPPING.keys())},
535
+ {"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker."},
536
+ {"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
537
+ {"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload (more VRAM savings, much slower)."},
538
+ {"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
539
+ {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default image width. Note: SDXL models prefer 1024."},
540
+ {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default image height. Note: SDXL models prefer 1024."},
541
+ {"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default inference steps."},
542
+ {"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
543
+ {"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed (-1 for random)."},
544
+ {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "HF API token (for private/gated models).", "is_secret": True},
545
+ {"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to HF cache."},
546
+ {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Do not download from Hugging Face."},
547
+ ]
548
+
549
+ def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
550
+ parsed_settings = settings if isinstance(settings, dict) else \
551
+ {item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
552
+
553
+ critical_keys = self.registry._get_config_key({}).__self__.critical_keys
554
+ needs_manager_swap = False
555
+
556
+ for key, value in parsed_settings.items():
557
+ if self.config.get(key) != value:
558
+ ASCIIColors.info(f"Setting '{key}' changed to: {value}")
559
+ self.config[key] = value
560
+ if key == "model_name": self.model_name = value
561
+ if key in critical_keys: needs_manager_swap = True
562
+
563
+ if needs_manager_swap and self.model_name:
564
+ ASCIIColors.info("Critical settings changed. Swapping model manager...")
565
+ self._resolve_device_and_dtype()
566
+ self._acquire_manager()
567
+
568
+ if not needs_manager_swap and self.manager:
569
+ self.manager.config.update(parsed_settings)
570
+ if 'scheduler_name' in parsed_settings and self.manager.pipeline:
571
+ with self.manager.lock:
572
+ self.manager._set_scheduler()
573
+
574
+ return True
575
+
576
+ def __del__(self):
577
+ self.unload_model()
578
+
579
+ # Example Usage
580
+ if __name__ == '__main__':
581
+ ASCIIColors.magenta("--- Diffusers TTI Binding Test ---")
582
+
583
+ if not DIFFUSERS_AVAILABLE:
584
+ ASCIIColors.error("Diffusers not available. Cannot run test.")
585
+ exit(1)
586
+
587
+ temp_paths_dir = Path(__file__).parent / "temp_lollms_paths_diffusers"
588
+ temp_models_path = temp_paths_dir / "models"
589
+
590
+ if temp_paths_dir.exists(): shutil.rmtree(temp_paths_dir)
591
+ temp_models_path.mkdir(parents=True, exist_ok=True)
592
+
593
+ try:
594
+ ASCIIColors.cyan("\n--- Test: Loading a Hugging Face model ---")
595
+ # Using a very small model for fast testing
596
+ binding_config = {"models_path": str(temp_models_path), "model_name": "hf-internal-testing/tiny-stable-diffusion-torch"}
597
+ binding = DiffusersTTIBinding_Impl(**binding_config)
598
+
599
+ img_bytes = binding.generate_image("a tiny robot", width=64, height=64, num_inference_steps=2)
600
+ assert len(img_bytes) > 1000, "Image generation from HF model should succeed."
601
+ ASCIIColors.green("HF model loading and generation successful.")
602
+
603
+ del binding
604
+ time.sleep(0.1)
605
+
606
+ except Exception as e:
607
+ trace_exception(e)
608
+ ASCIIColors.error(f"Diffusers binding test failed: {e}")
609
+ finally:
610
+ ASCIIColors.cyan("\nCleaning up temporary directories...")
611
+ if temp_paths_dir.exists():
612
+ shutil.rmtree(temp_paths_dir)
613
+ ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.1.0
3
+ Version: 1.1.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License