lollms-client 1.6.4__py3-none-any.whl → 1.6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +3 -1
- lollms_client/tti_bindings/diffusers/__init__.py +129 -59
- lollms_client/tti_bindings/diffusers/server/main.py +354 -118
- lollms_client/tti_bindings/gemini/__init__.py +179 -239
- lollms_client/tts_bindings/xtts/__init__.py +106 -81
- lollms_client/tts_bindings/xtts/server/main.py +128 -183
- {lollms_client-1.6.4.dist-info → lollms_client-1.6.6.dist-info}/METADATA +1 -1
- {lollms_client-1.6.4.dist-info → lollms_client-1.6.6.dist-info}/RECORD +12 -12
- {lollms_client-1.6.4.dist-info → lollms_client-1.6.6.dist-info}/WHEEL +0 -0
- {lollms_client-1.6.4.dist-info → lollms_client-1.6.6.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.6.4.dist-info → lollms_client-1.6.6.dist-info}/top_level.txt +0 -0
|
@@ -2,319 +2,259 @@
|
|
|
2
2
|
import sys
|
|
3
3
|
from typing import Optional, List, Dict, Any, Union
|
|
4
4
|
import os
|
|
5
|
+
import io
|
|
6
|
+
import base64
|
|
7
|
+
import requests
|
|
8
|
+
import binascii
|
|
9
|
+
import time
|
|
5
10
|
|
|
6
11
|
from lollms_client.lollms_tti_binding import LollmsTTIBinding
|
|
7
12
|
from ascii_colors import trace_exception, ASCIIColors
|
|
8
|
-
import math
|
|
9
13
|
|
|
10
14
|
# --- SDK & Dependency Management ---
|
|
11
15
|
try:
|
|
12
16
|
import pipmaster as pm
|
|
13
|
-
|
|
14
|
-
pm.ensure_packages(['google-cloud-aiplatform', 'google-generativeai', 'Pillow'])
|
|
17
|
+
pm.ensure_packages(['google-cloud-aiplatform', 'google-generativeai', 'Pillow', 'requests'])
|
|
15
18
|
except ImportError:
|
|
16
|
-
pass
|
|
19
|
+
pass
|
|
17
20
|
|
|
18
|
-
# Attempt to import Vertex AI
|
|
21
|
+
# Attempt to import Vertex AI
|
|
19
22
|
try:
|
|
20
23
|
import vertexai
|
|
21
|
-
from vertexai.preview.vision_models import ImageGenerationModel
|
|
24
|
+
from vertexai.preview.vision_models import ImageGenerationModel as VertexImageGenerationModel, Image as VertexImage
|
|
22
25
|
from google.api_core import exceptions as google_exceptions
|
|
26
|
+
from PIL import Image as PILImage
|
|
23
27
|
VERTEX_AI_AVAILABLE = True
|
|
24
28
|
except ImportError:
|
|
25
29
|
VERTEX_AI_AVAILABLE = False
|
|
26
30
|
|
|
27
|
-
# Attempt to import Gemini API
|
|
31
|
+
# Attempt to import Gemini API
|
|
28
32
|
try:
|
|
29
|
-
|
|
30
|
-
from google.
|
|
33
|
+
import google.generativeai as genai
|
|
34
|
+
from google.api_core.exceptions import ResourceExhausted
|
|
31
35
|
GEMINI_API_AVAILABLE = True
|
|
32
36
|
except ImportError:
|
|
33
37
|
GEMINI_API_AVAILABLE = False
|
|
38
|
+
ResourceExhausted = type('ResourceExhausted', (Exception,), {}) # Define dummy exception if import fails
|
|
34
39
|
|
|
35
40
|
# Defines the binding name for the manager
|
|
36
41
|
BindingName = "GeminiTTIBinding_Impl"
|
|
37
42
|
|
|
38
|
-
#
|
|
39
|
-
IMAGEN_VERTEX_MODELS = ["imagegeneration@006", "
|
|
40
|
-
IMAGEN_GEMINI_API_MODELS = ["imagen-3", "gemini-1.5-flash-preview-0514"] # Short names are often aliases
|
|
43
|
+
# Static list for Vertex AI, as it's project-based and more predictable
|
|
44
|
+
IMAGEN_VERTEX_MODELS = ["imagegeneration@006", "imagen-3.0-generate-002", "gemini-2.5-flash-image"]
|
|
41
45
|
GEMINI_API_KEY_ENV_VAR = "GEMINI_API_KEY"
|
|
42
46
|
|
|
43
|
-
class GeminiTTIBinding_Impl(LollmsTTIBinding):
|
|
44
|
-
"""
|
|
45
|
-
Concrete implementation of LollmsTTIBinding for Google's Imagen models.
|
|
46
|
-
Supports both Vertex AI (project_id) and Gemini API (api_key) authentication.
|
|
47
|
-
"""
|
|
48
|
-
def __init__(self, **kwargs):
|
|
49
|
-
"""
|
|
50
|
-
Initialize the Gemini (Vertex AI / API) TTI binding.
|
|
51
47
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
- model_name (str): The Imagen model to use.
|
|
59
|
-
- default_seed (int): Default seed for generation (-1 for random).
|
|
60
|
-
- default_guidance_scale (float): Default guidance scale (CFG).
|
|
61
|
-
"""
|
|
62
|
-
super().__init__(binding_name="gemini")
|
|
48
|
+
def _is_base64(s):
|
|
49
|
+
try:
|
|
50
|
+
base64.b64decode(s.split(',')[-1], validate=True)
|
|
51
|
+
return True
|
|
52
|
+
except (TypeError, ValueError, binascii.Error):
|
|
53
|
+
return False
|
|
63
54
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
55
|
+
def _load_image_from_str(image_str: str) -> bytes:
|
|
56
|
+
if image_str.startswith(('http://', 'https://')):
|
|
57
|
+
try:
|
|
58
|
+
response = requests.get(image_str)
|
|
59
|
+
response.raise_for_status()
|
|
60
|
+
return response.content
|
|
61
|
+
except requests.exceptions.RequestException as e:
|
|
62
|
+
raise IOError(f"Failed to download image from URL: {image_str}") from e
|
|
63
|
+
elif _is_base64(image_str):
|
|
64
|
+
header, encoded = image_str.split(',', 1)
|
|
65
|
+
return base64.b64decode(encoded)
|
|
66
|
+
else:
|
|
67
|
+
raise ValueError("Image string is not a valid URL or base64 string.")
|
|
70
68
|
|
|
71
|
-
# Gemini API specific settings
|
|
72
|
-
self.gemini_api_key = kwargs.get("service_key")
|
|
73
69
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
self.
|
|
78
|
-
self.client_id = kwargs.get("client_id", "gemini_client_user")
|
|
79
|
-
|
|
80
|
-
# The actual client/model instance
|
|
70
|
+
class GeminiTTIBinding_Impl(LollmsTTIBinding):
|
|
71
|
+
def __init__(self, **kwargs):
|
|
72
|
+
super().__init__(binding_name="gemini")
|
|
73
|
+
self.auth_method = kwargs.get("auth_method", "vertex_ai")
|
|
81
74
|
self.client: Optional[Any] = None
|
|
75
|
+
self.available_models = []
|
|
82
76
|
|
|
83
|
-
# --- Validation and Initialization ---
|
|
84
77
|
if self.auth_method == "vertex_ai":
|
|
85
78
|
if not VERTEX_AI_AVAILABLE:
|
|
86
|
-
raise ImportError("Vertex AI
|
|
79
|
+
raise ImportError("Vertex AI selected, but 'google-cloud-aiplatform' is not installed.")
|
|
80
|
+
self.project_id = kwargs.get("project_id")
|
|
81
|
+
self.location = kwargs.get("location", "us-central1")
|
|
87
82
|
if not self.project_id:
|
|
88
|
-
raise ValueError("For 'vertex_ai' auth,
|
|
89
|
-
|
|
90
|
-
|
|
83
|
+
raise ValueError("For 'vertex_ai' auth, 'project_id' is required.")
|
|
84
|
+
self.model_name = kwargs.get("model_name") # Can be None initially
|
|
85
|
+
self.available_models = IMAGEN_VERTEX_MODELS
|
|
86
|
+
|
|
91
87
|
elif self.auth_method == "api_key":
|
|
92
88
|
if not GEMINI_API_AVAILABLE:
|
|
93
|
-
raise ImportError("API Key
|
|
94
|
-
|
|
95
|
-
# Resolve API key from kwargs or environment variable
|
|
96
|
-
if not self.gemini_api_key:
|
|
97
|
-
ASCIIColors.info(f"API key not provided directly, checking environment variable '{GEMINI_API_KEY_ENV_VAR}'...")
|
|
98
|
-
self.gemini_api_key = os.environ.get(GEMINI_API_KEY_ENV_VAR)
|
|
99
|
-
|
|
89
|
+
raise ImportError("API Key selected, but 'google-generativeai' is not installed.")
|
|
90
|
+
self.gemini_api_key = kwargs.get("service_key") or os.environ.get(GEMINI_API_KEY_ENV_VAR)
|
|
100
91
|
if not self.gemini_api_key:
|
|
101
|
-
raise ValueError(f"For 'api_key' auth,
|
|
102
|
-
|
|
103
|
-
if not self.model_name:
|
|
104
|
-
self.model_name = IMAGEN_GEMINI_API_MODELS[0]
|
|
92
|
+
raise ValueError(f"For 'api_key' auth, 'service_key' or env var '{GEMINI_API_KEY_ENV_VAR}' is required.")
|
|
93
|
+
self.model_name = kwargs.get("model_name") # Can be None initially
|
|
105
94
|
else:
|
|
106
95
|
raise ValueError(f"Invalid auth_method: '{self.auth_method}'. Must be 'vertex_ai' or 'api_key'.")
|
|
107
96
|
|
|
97
|
+
self.default_seed = int(kwargs.get("default_seed", -1))
|
|
98
|
+
self.default_guidance_scale = float(kwargs.get("default_guidance_scale", 7.5))
|
|
108
99
|
self._initialize_client()
|
|
109
100
|
|
|
110
101
|
def _initialize_client(self):
|
|
111
|
-
"""Initializes the appropriate client based on the selected auth_method."""
|
|
112
102
|
ASCIIColors.info(f"Initializing Google client with auth method: '{self.auth_method}'...")
|
|
113
103
|
try:
|
|
114
104
|
if self.auth_method == "vertex_ai":
|
|
115
105
|
vertexai.init(project=self.project_id, location=self.location)
|
|
116
|
-
|
|
106
|
+
if not self.model_name:
|
|
107
|
+
self.model_name = self.available_models[0]
|
|
108
|
+
self.client = VertexImageGenerationModel.from_pretrained(self.model_name)
|
|
117
109
|
ASCIIColors.green(f"Vertex AI initialized successfully. Project: '{self.project_id}', Model: '{self.model_name}'")
|
|
110
|
+
|
|
118
111
|
elif self.auth_method == "api_key":
|
|
119
112
|
genai.configure(api_key=self.gemini_api_key)
|
|
120
|
-
|
|
121
|
-
#
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
113
|
+
|
|
114
|
+
# --- DYNAMIC MODEL DISCOVERY ---
|
|
115
|
+
ASCIIColors.info("Discovering available image models for your API key...")
|
|
116
|
+
self.available_models = [
|
|
117
|
+
m.name for m in genai.list_models()
|
|
118
|
+
if 'imagen' in m.name and 'generateContent' in m.supported_generation_methods
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
if not self.available_models:
|
|
122
|
+
raise Exception("Your API key does not have access to any compatible image generation models. Please check your Google AI Studio project settings.")
|
|
123
|
+
|
|
124
|
+
ASCIIColors.green(f"Found available models: {self.available_models}")
|
|
125
|
+
|
|
126
|
+
# Validate or set the model_name
|
|
127
|
+
if self.model_name and self.model_name not in self.available_models:
|
|
128
|
+
ASCIIColors.warning(f"Model '{self.model_name}' is not available for your key. Falling back to default.")
|
|
129
|
+
self.model_name = None
|
|
130
|
+
|
|
131
|
+
if not self.model_name:
|
|
132
|
+
self.model_name = self.available_models[0]
|
|
133
|
+
|
|
134
|
+
self.client = genai.GenerativeModel(self.model_name)
|
|
135
|
+
ASCIIColors.green(f"Gemini API configured successfully. Using Model: '{self.model_name}'")
|
|
136
|
+
|
|
129
137
|
except Exception as e:
|
|
130
138
|
trace_exception(e)
|
|
131
139
|
raise Exception(f"Failed to initialize Google client: {e}") from e
|
|
132
140
|
|
|
133
|
-
def
|
|
134
|
-
"""Validates image dimensions against Vertex AI Imagen constraints."""
|
|
135
|
-
if not (256 <= width <= 1536 and width % 8 == 0):
|
|
136
|
-
raise ValueError(f"Invalid width for Vertex AI: {width}. Must be 256-1536 and a multiple of 8.")
|
|
137
|
-
if not (256 <= height <= 1536 and height % 8 == 0):
|
|
138
|
-
raise ValueError(f"Invalid height for Vertex AI: {height}. Must be 256-1536 and a multiple of 8.")
|
|
139
|
-
|
|
140
|
-
def _get_aspect_ratio_for_api(self, width: int, height: int) -> str:
|
|
141
|
-
"""Finds the closest supported aspect ratio string for the Gemini API."""
|
|
142
|
-
ratios = {"1:1": 1.0, "16:9": 16/9, "9:16": 9/16, "4:3": 4/3, "3:4": 3/4}
|
|
143
|
-
target_ratio = width / height
|
|
144
|
-
closest_ratio_name = min(ratios, key=lambda r: abs(ratios[r] - target_ratio))
|
|
145
|
-
ASCIIColors.info(f"Converted {width}x{height} to closest aspect ratio: '{closest_ratio_name}' for Gemini API.")
|
|
146
|
-
return closest_ratio_name
|
|
147
|
-
|
|
148
|
-
def generate_image(self,
|
|
149
|
-
prompt: str,
|
|
150
|
-
negative_prompt: Optional[str] = "",
|
|
151
|
-
width: int = 1024,
|
|
152
|
-
height: int = 1024,
|
|
153
|
-
**kwargs) -> bytes:
|
|
154
|
-
"""
|
|
155
|
-
Generates image data using the configured Google Imagen model.
|
|
156
|
-
"""
|
|
141
|
+
def generate_image(self, prompt: str, negative_prompt: Optional[str] = "", width: int = 1024, height: int = 1024, **kwargs) -> bytes:
|
|
157
142
|
if not self.client:
|
|
158
|
-
raise RuntimeError("Google client is not initialized.
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
143
|
+
raise RuntimeError("Google client is not initialized.")
|
|
144
|
+
|
|
145
|
+
ASCIIColors.info(f"Generating image with prompt: '{prompt[:100]}...'")
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
if self.auth_method == "vertex_ai":
|
|
149
|
+
return self._generate_with_vertex_ai(prompt, negative_prompt, width, height, **kwargs)
|
|
150
|
+
elif self.auth_method == "api_key":
|
|
151
|
+
return self._generate_with_api_key(prompt, negative_prompt, width, height, **kwargs)
|
|
152
|
+
except Exception as e:
|
|
153
|
+
if "quota" in str(e).lower():
|
|
154
|
+
raise Exception(f"Image generation failed due to a quota error. This means you have exceeded the free tier limit for your API key. To fix this, please enable billing on your Google Cloud project. Original error: {e}")
|
|
155
|
+
raise Exception(f"Image generation failed: {e}")
|
|
164
156
|
|
|
165
|
-
|
|
157
|
+
def _generate_with_api_key(self, prompt, negative_prompt, width, height, **kwargs):
|
|
158
|
+
full_prompt = f"Generate an image of: {prompt}"
|
|
166
159
|
if negative_prompt:
|
|
167
|
-
|
|
160
|
+
full_prompt += f". Do not include: {negative_prompt}."
|
|
168
161
|
|
|
169
|
-
|
|
162
|
+
max_retries = 3
|
|
163
|
+
initial_delay = 5
|
|
170
164
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
self.
|
|
174
|
-
gen_params = {
|
|
175
|
-
"prompt": final_prompt,
|
|
176
|
-
"number_of_images": 1,
|
|
177
|
-
"width": width,
|
|
178
|
-
"height": height,
|
|
179
|
-
"guidance_scale": guidance_scale,
|
|
180
|
-
}
|
|
181
|
-
if gen_seed is not None:
|
|
182
|
-
gen_params["seed"] = gen_seed
|
|
183
|
-
|
|
184
|
-
ASCIIColors.debug(f"Vertex AI generation parameters: {gen_params}")
|
|
185
|
-
response = self.client.generate_images(**gen_params)
|
|
165
|
+
for attempt in range(max_retries):
|
|
166
|
+
try:
|
|
167
|
+
response = self.client.generate_content(full_prompt)
|
|
186
168
|
|
|
187
|
-
if not response.
|
|
188
|
-
raise Exception("
|
|
169
|
+
if not response.parts or not hasattr(response.parts[0], 'file_data'):
|
|
170
|
+
raise Exception(f"API response did not contain image data. Check safety filters in your Google AI Studio. Response: {response.text}")
|
|
189
171
|
|
|
190
|
-
return response.
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
"
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
"
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
172
|
+
return response.parts[0].file_data.data
|
|
173
|
+
|
|
174
|
+
except ResourceExhausted as e:
|
|
175
|
+
if attempt < max_retries - 1:
|
|
176
|
+
wait_time = initial_delay * (2 ** attempt)
|
|
177
|
+
ASCIIColors.warning(f"Rate limit exceeded. Waiting {wait_time}s... (Attempt {attempt + 1}/{max_retries})")
|
|
178
|
+
time.sleep(wait_time)
|
|
179
|
+
else:
|
|
180
|
+
ASCIIColors.error(f"Failed to generate image after {max_retries} attempts due to rate limiting.")
|
|
181
|
+
raise e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
raise e
|
|
184
|
+
|
|
185
|
+
def _generate_with_vertex_ai(self, prompt, negative_prompt, width, height, **kwargs):
|
|
186
|
+
self._validate_dimensions_vertex(width, height)
|
|
187
|
+
gen_params = {
|
|
188
|
+
"prompt": prompt, "number_of_images": 1, "width": width, "height": height,
|
|
189
|
+
"guidance_scale": kwargs.get("guidance_scale", self.default_guidance_scale),
|
|
190
|
+
}
|
|
191
|
+
if negative_prompt: gen_params["negative_prompt"] = negative_prompt
|
|
192
|
+
seed = kwargs.get("seed", self.default_seed)
|
|
193
|
+
if seed != -1: gen_params["seed"] = seed
|
|
194
|
+
|
|
195
|
+
response = self.client.generate_images(**gen_params)
|
|
196
|
+
if not response.images: raise Exception("Generation resulted in no images (Vertex AI).")
|
|
197
|
+
return response.images[0]._image_bytes
|
|
208
198
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
raise
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
199
|
+
def edit_image(self, images: Union[str, List[str]], prompt: str, negative_prompt: Optional[str] = "", mask: Optional[str] = None, **kwargs) -> bytes:
|
|
200
|
+
if self.auth_method != "vertex_ai":
|
|
201
|
+
raise NotImplementedError("Image editing is only supported via the 'vertex_ai' method.")
|
|
202
|
+
|
|
203
|
+
image_str = images[0] if isinstance(images, list) else images
|
|
204
|
+
ASCIIColors.info(f"Editing image with prompt: '{prompt[:100]}...'")
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
base_image = VertexImage(image_bytes=_load_image_from_str(image_str))
|
|
208
|
+
mask_image = VertexImage(image_bytes=_load_image_from_str(mask)) if mask else None
|
|
209
|
+
edit_params = {"prompt": prompt, "base_image": base_image, "mask": mask_image, "negative_prompt": negative_prompt or None}
|
|
210
|
+
response = self.client.edit_image(**edit_params)
|
|
211
|
+
if not response.images: raise Exception("Image editing resulted in no images.")
|
|
212
|
+
return response.images[0]._image_bytes
|
|
215
213
|
except Exception as e:
|
|
216
|
-
|
|
217
|
-
|
|
214
|
+
raise Exception(f"Imagen image editing failed: {e}") from e
|
|
215
|
+
|
|
216
|
+
def list_models(self) -> list:
|
|
217
|
+
return [{'model_name': name, 'display_name': f"Google ({name})"} for name in self.available_models]
|
|
218
218
|
|
|
219
219
|
def list_services(self, **kwargs) -> List[Dict[str, str]]:
|
|
220
|
-
"""Lists available Imagen models for the current auth method."""
|
|
221
|
-
models = IMAGEN_VERTEX_MODELS if self.auth_method == "vertex_ai" else IMAGEN_GEMINI_API_MODELS
|
|
222
220
|
service_name = "Vertex AI" if self.auth_method == "vertex_ai" else "Gemini API"
|
|
223
|
-
return [
|
|
224
|
-
{
|
|
225
|
-
"name": name,
|
|
226
|
-
"caption": f"Google Imagen ({name}) via {service_name}",
|
|
227
|
-
"help": "High-quality text-to-image model from Google."
|
|
228
|
-
} for name in models
|
|
229
|
-
]
|
|
230
|
-
|
|
231
|
-
def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
|
|
232
|
-
"""Retrieves the current configurable settings for the binding."""
|
|
233
|
-
settings = [
|
|
234
|
-
{"name": "auth_method", "type": "str", "value": self.auth_method, "description": "Authentication method to use.", "options": ["vertex_ai", "api_key"], "category": "Authentication"},
|
|
235
|
-
]
|
|
236
|
-
if self.auth_method == "vertex_ai":
|
|
237
|
-
settings.extend([
|
|
238
|
-
{"name": "project_id", "type": "str", "value": self.project_id, "description": "Your Google Cloud project ID.", "category": "Authentication"},
|
|
239
|
-
{"name": "location", "type": "str", "value": self.location, "description": "Google Cloud region (e.g., 'us-central1').", "category": "Authentication"},
|
|
240
|
-
{"name": "model_name", "type": "str", "value": self.model_name, "description": "Default Imagen model for generation.", "options": IMAGEN_VERTEX_MODELS, "category": "Model Configuration"},
|
|
241
|
-
])
|
|
242
|
-
elif self.auth_method == "api_key":
|
|
243
|
-
settings.extend([
|
|
244
|
-
{"name": "api_key_status", "type": "str", "value": "Set" if self.gemini_api_key else "Not Set", "description": f"Gemini API Key status (set at initialization via service_key or '{GEMINI_API_KEY_ENV_VAR}').", "category": "Authentication", "read_only": True},
|
|
245
|
-
{"name": "model_name", "type": "str", "value": self.model_name, "description": "Default Imagen model for generation.", "options": IMAGEN_GEMINI_API_MODELS, "category": "Model Configuration"},
|
|
246
|
-
])
|
|
247
|
-
|
|
248
|
-
settings.extend([
|
|
249
|
-
{"name": "default_seed", "type": "int", "value": self.default_seed, "description": "Default seed (-1 for random).", "category": "Image Generation Defaults"},
|
|
250
|
-
{"name": "default_guidance_scale", "type": "float", "value": self.default_guidance_scale, "description": "Default guidance scale (CFG). (Vertex AI only)", "category": "Image Generation Defaults"},
|
|
251
|
-
])
|
|
252
|
-
return settings
|
|
253
|
-
|
|
254
|
-
def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
|
|
255
|
-
"""Applies new settings. Re-initializes the client if core settings change."""
|
|
256
|
-
applied_some_settings = False
|
|
257
|
-
settings_dict = {item["name"]: item["value"] for item in settings} if isinstance(settings, list) else settings
|
|
221
|
+
return [{"name": name, "caption": f"Google ({name}) via {service_name}"} for name in self.available_models]
|
|
258
222
|
|
|
223
|
+
def set_settings(self, settings: Dict[str, Any], **kwargs) -> bool:
|
|
224
|
+
# Simplified for clarity, full logic is complex and stateful
|
|
259
225
|
needs_reinit = False
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
226
|
+
if "auth_method" in settings and self.auth_method != settings["auth_method"]:
|
|
227
|
+
self.auth_method = settings["auth_method"]
|
|
228
|
+
needs_reinit = True
|
|
229
|
+
if "project_id" in settings and self.project_id != settings.get("project_id"):
|
|
230
|
+
self.project_id = settings["project_id"]
|
|
231
|
+
needs_reinit = True
|
|
232
|
+
if "service_key" in settings and self.gemini_api_key != settings.get("service_key"):
|
|
233
|
+
self.gemini_api_key = settings["service_key"]
|
|
234
|
+
needs_reinit = True
|
|
235
|
+
if "model_name" in settings and self.model_name != settings.get("model_name"):
|
|
236
|
+
self.model_name = settings["model_name"]
|
|
271
237
|
needs_reinit = True
|
|
272
|
-
applied_some_settings = True
|
|
273
|
-
|
|
274
|
-
if self.auth_method == "vertex_ai":
|
|
275
|
-
if "project_id" in settings_dict and self.project_id != settings_dict["project_id"]:
|
|
276
|
-
self.project_id = settings_dict["project_id"]
|
|
277
|
-
needs_reinit = True; applied_some_settings = True
|
|
278
|
-
if "location" in settings_dict and self.location != settings_dict["location"]:
|
|
279
|
-
self.location = settings_dict["location"]
|
|
280
|
-
needs_reinit = True; applied_some_settings = True
|
|
281
|
-
# API key is not settable after init, so we don't check for it here.
|
|
282
|
-
|
|
283
|
-
# Phase 2: Apply other settings
|
|
284
|
-
current_models = IMAGEN_VERTEX_MODELS if self.auth_method == "vertex_ai" else IMAGEN_GEMINI_API_MODELS
|
|
285
|
-
if "model_name" in settings_dict:
|
|
286
|
-
new_model = settings_dict["model_name"]
|
|
287
|
-
if new_model not in current_models:
|
|
288
|
-
ASCIIColors.warning(f"Invalid model '{new_model}' for auth method '{self.auth_method}'. Keeping '{self.model_name}'.")
|
|
289
|
-
elif self.model_name != new_model:
|
|
290
|
-
self.model_name = new_model
|
|
291
|
-
needs_reinit = True; applied_some_settings = True
|
|
292
|
-
|
|
293
|
-
if "default_seed" in settings_dict and self.default_seed != int(settings_dict["default_seed"]):
|
|
294
|
-
self.default_seed = int(settings_dict["default_seed"])
|
|
295
|
-
applied_some_settings = True
|
|
296
|
-
if "default_guidance_scale" in settings_dict and self.default_guidance_scale != float(settings_dict["default_guidance_scale"]):
|
|
297
|
-
self.default_guidance_scale = float(settings_dict["default_guidance_scale"])
|
|
298
|
-
applied_some_settings = True
|
|
299
|
-
|
|
300
|
-
# Phase 3: Re-initialize if needed
|
|
301
238
|
if needs_reinit:
|
|
302
239
|
try:
|
|
303
240
|
self._initialize_client()
|
|
304
241
|
except Exception as e:
|
|
305
|
-
ASCIIColors.error(f"Failed to re-initialize client
|
|
242
|
+
ASCIIColors.error(f"Failed to re-initialize client: {e}")
|
|
306
243
|
return False
|
|
244
|
+
return True
|
|
307
245
|
|
|
308
|
-
|
|
246
|
+
def get_settings(self, **kwargs) -> Optional[Dict[str, Any]]:
|
|
247
|
+
return {
|
|
248
|
+
"auth_method": self.auth_method,
|
|
249
|
+
"project_id": self.project_id if self.auth_method == "vertex_ai" else None,
|
|
250
|
+
"location": self.location if self.auth_method == "vertex_ai" else None,
|
|
251
|
+
"model_name": self.model_name,
|
|
252
|
+
"default_seed": self.default_seed,
|
|
253
|
+
"default_guidance_scale": self.default_guidance_scale
|
|
254
|
+
}
|
|
309
255
|
|
|
310
|
-
def
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
{
|
|
315
|
-
'model_name': name,
|
|
316
|
-
'display_name': f"Imagen ({name})",
|
|
317
|
-
'description': f"Google's Imagen model, version {name}",
|
|
318
|
-
'owned_by': 'Google'
|
|
319
|
-
} for name in models
|
|
320
|
-
]
|
|
256
|
+
def _validate_dimensions_vertex(self, width: int, height: int) -> None:
|
|
257
|
+
if not (256 <= width <= 1536 and width % 8 == 0):
|
|
258
|
+
raise ValueError(f"Invalid width for Vertex AI: {width}. Must be 256-1536 and a multiple of 8.")
|
|
259
|
+
if not (256 <= height <= 1536 and height % 8 == 0):
|
|
260
|
+
raise ValueError(f"Invalid height for Vertex AI: {height}. Must be 256-1536 and a multiple of 8.")
|