isa-model 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/config/__init__.py +9 -0
- isa_model/config/config_manager.py +213 -0
- isa_model/core/model_manager.py +5 -0
- isa_model/core/model_registry.py +39 -6
- isa_model/core/storage/supabase_storage.py +344 -0
- isa_model/core/vision_models_init.py +116 -0
- isa_model/deployment/cloud/__init__.py +9 -0
- isa_model/deployment/cloud/modal/__init__.py +10 -0
- isa_model/deployment/cloud/modal/isa_vision_doc_service.py +612 -0
- isa_model/deployment/cloud/modal/isa_vision_ui_service.py +305 -0
- isa_model/inference/ai_factory.py +238 -14
- isa_model/inference/providers/modal_provider.py +109 -0
- isa_model/inference/providers/yyds_provider.py +108 -0
- isa_model/inference/services/__init__.py +2 -1
- isa_model/inference/services/base_service.py +0 -38
- isa_model/inference/services/llm/base_llm_service.py +32 -0
- isa_model/inference/services/llm/llm_adapter.py +73 -3
- isa_model/inference/services/llm/ollama_llm_service.py +104 -3
- isa_model/inference/services/llm/openai_llm_service.py +67 -15
- isa_model/inference/services/llm/yyds_llm_service.py +254 -0
- isa_model/inference/services/stacked/__init__.py +26 -0
- isa_model/inference/services/stacked/base_stacked_service.py +269 -0
- isa_model/inference/services/stacked/config.py +426 -0
- isa_model/inference/services/stacked/doc_analysis_service.py +640 -0
- isa_model/inference/services/stacked/flux_professional_service.py +579 -0
- isa_model/inference/services/stacked/ui_analysis_service.py +1319 -0
- isa_model/inference/services/vision/base_image_gen_service.py +0 -34
- isa_model/inference/services/vision/base_vision_service.py +46 -2
- isa_model/inference/services/vision/isA_vision_service.py +402 -0
- isa_model/inference/services/vision/openai_vision_service.py +151 -9
- isa_model/inference/services/vision/replicate_image_gen_service.py +166 -38
- isa_model/inference/services/vision/replicate_vision_service.py +693 -0
- isa_model/serving/__init__.py +19 -0
- isa_model/serving/api/__init__.py +10 -0
- isa_model/serving/api/fastapi_server.py +84 -0
- isa_model/serving/api/middleware/__init__.py +9 -0
- isa_model/serving/api/middleware/request_logger.py +88 -0
- isa_model/serving/api/routes/__init__.py +5 -0
- isa_model/serving/api/routes/health.py +82 -0
- isa_model/serving/api/routes/llm.py +19 -0
- isa_model/serving/api/routes/ui_analysis.py +223 -0
- isa_model/serving/api/routes/vision.py +19 -0
- isa_model/serving/api/schemas/__init__.py +17 -0
- isa_model/serving/api/schemas/common.py +33 -0
- isa_model/serving/api/schemas/ui_analysis.py +78 -0
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/METADATA +1 -1
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/RECORD +49 -17
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/WHEEL +0 -0
- {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/top_level.txt +0 -0
@@ -44,10 +44,6 @@ class ReplicateImageGenService(BaseImageGenService):
|
|
44
44
|
# 设置 API token
|
45
45
|
os.environ["REPLICATE_API_TOKEN"] = self.api_token
|
46
46
|
|
47
|
-
# 生成图像存储目录
|
48
|
-
self.output_dir = "generated_images"
|
49
|
-
os.makedirs(self.output_dir, exist_ok=True)
|
50
|
-
|
51
47
|
# 统计信息
|
52
48
|
self.last_generation_count = 0
|
53
49
|
self.total_generation_count = 0
|
@@ -133,6 +129,161 @@ class ReplicateImageGenService(BaseImageGenService):
|
|
133
129
|
|
134
130
|
return await self._generate_internal(input_data)
|
135
131
|
|
132
|
+
async def instant_id_generation(
|
133
|
+
self,
|
134
|
+
prompt: str,
|
135
|
+
face_image: Union[str, Any],
|
136
|
+
negative_prompt: Optional[str] = None,
|
137
|
+
num_inference_steps: int = 30,
|
138
|
+
guidance_scale: float = 5.0,
|
139
|
+
seed: Optional[int] = None,
|
140
|
+
identitynet_strength_ratio: float = 0.8,
|
141
|
+
adapter_strength_ratio: float = 0.8
|
142
|
+
) -> Dict[str, Any]:
|
143
|
+
"""InstantID人脸一致性生成"""
|
144
|
+
|
145
|
+
if "instant-id" in self.model_name:
|
146
|
+
input_data = {
|
147
|
+
"prompt": prompt,
|
148
|
+
"image": face_image,
|
149
|
+
"guidance_scale": guidance_scale,
|
150
|
+
"num_inference_steps": num_inference_steps,
|
151
|
+
"identitynet_strength_ratio": identitynet_strength_ratio,
|
152
|
+
"adapter_strength_ratio": adapter_strength_ratio
|
153
|
+
}
|
154
|
+
|
155
|
+
if negative_prompt:
|
156
|
+
input_data["negative_prompt"] = negative_prompt
|
157
|
+
if seed:
|
158
|
+
input_data["seed"] = seed
|
159
|
+
else:
|
160
|
+
# 默认InstantID参数
|
161
|
+
input_data = {
|
162
|
+
"prompt": prompt,
|
163
|
+
"face_image": face_image,
|
164
|
+
"negative_prompt": negative_prompt or "",
|
165
|
+
"num_inference_steps": num_inference_steps,
|
166
|
+
"guidance_scale": guidance_scale,
|
167
|
+
"identitynet_strength_ratio": identitynet_strength_ratio,
|
168
|
+
"adapter_strength_ratio": adapter_strength_ratio
|
169
|
+
}
|
170
|
+
|
171
|
+
if seed:
|
172
|
+
input_data["seed"] = seed
|
173
|
+
|
174
|
+
return await self._generate_internal(input_data)
|
175
|
+
|
176
|
+
async def consistent_character_generation(
|
177
|
+
self,
|
178
|
+
subject: Union[str, Any],
|
179
|
+
prompt: Optional[str] = None,
|
180
|
+
negative_prompt: Optional[str] = None,
|
181
|
+
number_of_images: int = 4,
|
182
|
+
disable_safety_checker: bool = False
|
183
|
+
) -> Dict[str, Any]:
|
184
|
+
"""一致性角色生成 - 生成同一角色的多种姿态和表情"""
|
185
|
+
|
186
|
+
if "consistent-character" in self.model_name:
|
187
|
+
input_data = {
|
188
|
+
"subject": subject,
|
189
|
+
"number_of_images": number_of_images,
|
190
|
+
"disable_safety_checker": disable_safety_checker
|
191
|
+
}
|
192
|
+
|
193
|
+
if prompt:
|
194
|
+
input_data["prompt"] = prompt
|
195
|
+
if negative_prompt:
|
196
|
+
input_data["negative_prompt"] = negative_prompt
|
197
|
+
else:
|
198
|
+
# 默认一致性角色参数
|
199
|
+
input_data = {
|
200
|
+
"subject_image": subject,
|
201
|
+
"prompt": prompt or "portrait, different poses and expressions",
|
202
|
+
"negative_prompt": negative_prompt or "low quality, blurry",
|
203
|
+
"num_images": number_of_images
|
204
|
+
}
|
205
|
+
|
206
|
+
return await self._generate_internal(input_data)
|
207
|
+
|
208
|
+
async def flux_lora_generation(
|
209
|
+
self,
|
210
|
+
prompt: str,
|
211
|
+
lora_scale: float = 1.0,
|
212
|
+
num_outputs: int = 1,
|
213
|
+
aspect_ratio: str = "1:1",
|
214
|
+
output_format: str = "jpg",
|
215
|
+
guidance_scale: float = 3.5,
|
216
|
+
output_quality: int = 90,
|
217
|
+
num_inference_steps: int = 28,
|
218
|
+
disable_safety_checker: bool = False
|
219
|
+
) -> Dict[str, Any]:
|
220
|
+
"""FLUX LoRA生成 - 使用预训练的LoRA权重"""
|
221
|
+
|
222
|
+
if any(lora in self.model_name for lora in ["flux-dev-lora", "flux-lora"]):
|
223
|
+
input_data = {
|
224
|
+
"prompt": prompt,
|
225
|
+
"lora_scale": lora_scale,
|
226
|
+
"num_outputs": num_outputs,
|
227
|
+
"aspect_ratio": aspect_ratio,
|
228
|
+
"output_format": output_format,
|
229
|
+
"guidance_scale": guidance_scale,
|
230
|
+
"output_quality": output_quality,
|
231
|
+
"num_inference_steps": num_inference_steps,
|
232
|
+
"disable_safety_checker": disable_safety_checker
|
233
|
+
}
|
234
|
+
else:
|
235
|
+
# 默认LoRA参数
|
236
|
+
input_data = {
|
237
|
+
"prompt": prompt,
|
238
|
+
"lora_strength": lora_scale,
|
239
|
+
"num_images": num_outputs,
|
240
|
+
"guidance_scale": guidance_scale,
|
241
|
+
"num_inference_steps": num_inference_steps
|
242
|
+
}
|
243
|
+
|
244
|
+
return await self._generate_internal(input_data)
|
245
|
+
|
246
|
+
async def ultimate_upscale(
|
247
|
+
self,
|
248
|
+
image: Union[str, Any],
|
249
|
+
scale: int = 4,
|
250
|
+
scheduler: str = "K_EULER_ANCESTRAL",
|
251
|
+
num_inference_steps: int = 20,
|
252
|
+
guidance_scale: float = 10.0,
|
253
|
+
strength: float = 0.55,
|
254
|
+
hdr: float = 0.0,
|
255
|
+
seed: Optional[int] = None
|
256
|
+
) -> Dict[str, Any]:
|
257
|
+
"""Ultimate SD Upscaler - 专业超分辨率"""
|
258
|
+
|
259
|
+
if "ultimate" in self.model_name or "upscal" in self.model_name:
|
260
|
+
input_data = {
|
261
|
+
"image": image,
|
262
|
+
"scale": scale,
|
263
|
+
"scheduler": scheduler,
|
264
|
+
"num_inference_steps": num_inference_steps,
|
265
|
+
"guidance_scale": guidance_scale,
|
266
|
+
"strength": strength,
|
267
|
+
"hdr": hdr
|
268
|
+
}
|
269
|
+
|
270
|
+
if seed:
|
271
|
+
input_data["seed"] = seed
|
272
|
+
else:
|
273
|
+
# 默认超分辨率参数
|
274
|
+
input_data = {
|
275
|
+
"image": image,
|
276
|
+
"upscale_factor": scale,
|
277
|
+
"num_inference_steps": num_inference_steps,
|
278
|
+
"guidance_scale": guidance_scale,
|
279
|
+
"denoising_strength": strength
|
280
|
+
}
|
281
|
+
|
282
|
+
if seed:
|
283
|
+
input_data["seed"] = seed
|
284
|
+
|
285
|
+
return await self._generate_internal(input_data)
|
286
|
+
|
136
287
|
async def _generate_internal(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
137
288
|
"""内部生成方法"""
|
138
289
|
try:
|
@@ -141,11 +292,19 @@ class ReplicateImageGenService(BaseImageGenService):
|
|
141
292
|
# 调用 Replicate API
|
142
293
|
output = await replicate.async_run(self.model_name, input=input_data)
|
143
294
|
|
144
|
-
# 处理输出
|
295
|
+
# 处理输出 - 转换FileOutput对象为URL字符串
|
145
296
|
if isinstance(output, list):
|
146
|
-
|
297
|
+
raw_urls = output
|
147
298
|
else:
|
148
|
-
|
299
|
+
raw_urls = [output]
|
300
|
+
|
301
|
+
# 转换为字符串URL
|
302
|
+
urls = []
|
303
|
+
for url in raw_urls:
|
304
|
+
if hasattr(url, 'url'):
|
305
|
+
urls.append(str(url.url)) # type: ignore
|
306
|
+
else:
|
307
|
+
urls.append(str(url))
|
149
308
|
|
150
309
|
# 更新统计
|
151
310
|
self.last_generation_count = len(urls)
|
@@ -224,37 +383,6 @@ class ReplicateImageGenService(BaseImageGenService):
|
|
224
383
|
results.append(result)
|
225
384
|
return results
|
226
385
|
|
227
|
-
async def generate_image_to_file(
|
228
|
-
self,
|
229
|
-
prompt: str,
|
230
|
-
output_path: str,
|
231
|
-
negative_prompt: Optional[str] = None,
|
232
|
-
width: int = 512,
|
233
|
-
height: int = 512,
|
234
|
-
num_inference_steps: int = 4,
|
235
|
-
guidance_scale: float = 7.5,
|
236
|
-
seed: Optional[int] = None
|
237
|
-
) -> Dict[str, Any]:
|
238
|
-
"""生成图像并保存到文件"""
|
239
|
-
result = await self.generate_image(
|
240
|
-
prompt, negative_prompt, width, height,
|
241
|
-
num_inference_steps, guidance_scale, seed
|
242
|
-
)
|
243
|
-
|
244
|
-
# 保存第一张图像
|
245
|
-
if result.get("urls"):
|
246
|
-
url = result["urls"][0]
|
247
|
-
url_str = str(url) if hasattr(url, "__str__") else url
|
248
|
-
await self._download_image(url_str, output_path)
|
249
|
-
|
250
|
-
return {
|
251
|
-
"file_path": output_path,
|
252
|
-
"cost_usd": result.get("cost_usd", 0.0),
|
253
|
-
"model": self.model_name
|
254
|
-
}
|
255
|
-
else:
|
256
|
-
raise ValueError("No image generated")
|
257
|
-
|
258
386
|
async def _download_image(self, url: str, save_path: str) -> None:
|
259
387
|
"""下载图像并保存"""
|
260
388
|
try:
|