xinference 1.10.1__py3-none-any.whl → 1.11.0.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +462 -3
- xinference/client/restful/async_restful_client.py +158 -5
- xinference/client/restful/restful_client.py +131 -0
- xinference/core/supervisor.py +12 -0
- xinference/model/audio/model_spec.json +20 -20
- xinference/model/image/model_spec.json +159 -159
- xinference/model/llm/__init__.py +2 -2
- xinference/model/llm/llm_family.json +843 -180
- xinference/model/llm/mlx/distributed_models/core.py +41 -0
- xinference/model/llm/mlx/distributed_models/qwen2.py +1 -2
- xinference/model/llm/sglang/core.py +20 -6
- xinference/model/llm/tool_parsers/qwen_tool_parser.py +29 -4
- xinference/model/llm/transformers/chatglm.py +3 -0
- xinference/model/llm/transformers/core.py +93 -16
- xinference/model/llm/transformers/multimodal/minicpmv45.py +340 -0
- xinference/model/llm/transformers/utils.py +3 -0
- xinference/model/llm/utils.py +37 -24
- xinference/model/llm/vllm/core.py +128 -69
- xinference/model/utils.py +74 -31
- xinference/thirdparty/audiotools/core/audio_signal.py +6 -6
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/text.py +1 -1
- xinference/thirdparty/melo/text/chinese_mix.py +2 -2
- xinference/types.py +9 -0
- xinference/ui/web/ui/build/asset-manifest.json +3 -3
- xinference/ui/web/ui/build/index.html +1 -1
- xinference/ui/web/ui/build/static/js/{main.d192c4f3.js → main.e4d9a9e1.js} +3 -3
- xinference/ui/web/ui/build/static/js/{main.d192c4f3.js.map → main.e4d9a9e1.js.map} +1 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/e6770a05771952175c9fbf48fce283c9bb1bc8b5763e39edc36d099d1fe16b4a.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/ea2a26361204e70cf1018d6990fb6354bed82b3ac69690391e0f100385e7abb7.json +1 -0
- {xinference-1.10.1.dist-info → xinference-1.11.0.post1.dist-info}/METADATA +8 -5
- {xinference-1.10.1.dist-info → xinference-1.11.0.post1.dist-info}/RECORD +37 -36
- xinference/ui/web/ui/node_modules/.cache/babel-loader/7275b67c78ec76ce38a686bb8a576d8c9cecf54e1573614c84859d538efb9be5.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/f995a2425dfb0822fd07127f66ffe9b026883bc156b402eb8bd0b83d52460a93.json +0 -1
- /xinference/ui/web/ui/build/static/js/{main.d192c4f3.js.LICENSE.txt → main.e4d9a9e1.js.LICENSE.txt} +0 -0
- {xinference-1.10.1.dist-info → xinference-1.11.0.post1.dist-info}/WHEEL +0 -0
- {xinference-1.10.1.dist-info → xinference-1.11.0.post1.dist-info}/entry_points.txt +0 -0
- {xinference-1.10.1.dist-info → xinference-1.11.0.post1.dist-info}/licenses/LICENSE +0 -0
- {xinference-1.10.1.dist-info → xinference-1.11.0.post1.dist-info}/top_level.txt +0 -0
xinference/_version.py
CHANGED
|
@@ -8,11 +8,11 @@ import json
|
|
|
8
8
|
|
|
9
9
|
version_json = '''
|
|
10
10
|
{
|
|
11
|
-
"date": "2025-
|
|
11
|
+
"date": "2025-10-20T18:17:30+0800",
|
|
12
12
|
"dirty": false,
|
|
13
13
|
"error": null,
|
|
14
|
-
"full-revisionid": "
|
|
15
|
-
"version": "1.
|
|
14
|
+
"full-revisionid": "378b99185de5a7623f75798df7e4391f4ff39e35",
|
|
15
|
+
"version": "1.11.0.post1"
|
|
16
16
|
}
|
|
17
17
|
''' # END VERSION_JSON
|
|
18
18
|
|
xinference/api/restful_api.py
CHANGED
|
@@ -739,6 +739,18 @@ class RESTfulAPI(CancelMixin):
|
|
|
739
739
|
else None
|
|
740
740
|
),
|
|
741
741
|
)
|
|
742
|
+
self._router.add_api_route(
|
|
743
|
+
"/v1/images/edits",
|
|
744
|
+
self.create_image_edits,
|
|
745
|
+
methods=["POST"],
|
|
746
|
+
response_model=ImageList,
|
|
747
|
+
dependencies=(
|
|
748
|
+
[Security(self._auth_service, scopes=["models:read"])]
|
|
749
|
+
if self.is_authenticated()
|
|
750
|
+
else None
|
|
751
|
+
),
|
|
752
|
+
)
|
|
753
|
+
|
|
742
754
|
# SD WebUI API
|
|
743
755
|
self._router.add_api_route(
|
|
744
756
|
"/sdapi/v1/options",
|
|
@@ -2299,6 +2311,453 @@ class RESTfulAPI(CancelMixin):
|
|
|
2299
2311
|
self.handle_request_limit_error(e)
|
|
2300
2312
|
raise HTTPException(status_code=500, detail=str(e))
|
|
2301
2313
|
|
|
2314
|
+
async def create_image_edits(
|
|
2315
|
+
self,
|
|
2316
|
+
request: Request,
|
|
2317
|
+
prompt: str = Form(...),
|
|
2318
|
+
mask: Optional[UploadFile] = File(None, media_type="application/octet-stream"),
|
|
2319
|
+
model: Optional[str] = Form(None),
|
|
2320
|
+
n: Optional[int] = Form(1),
|
|
2321
|
+
size: Optional[str] = Form("original"),
|
|
2322
|
+
response_format: Optional[str] = Form("url"),
|
|
2323
|
+
stream: Optional[bool] = Form(False),
|
|
2324
|
+
) -> Response:
|
|
2325
|
+
"""OpenAI-compatible image edit endpoint."""
|
|
2326
|
+
import io
|
|
2327
|
+
|
|
2328
|
+
# Parse multipart form data to handle files
|
|
2329
|
+
content_type = request.headers.get("content-type", "")
|
|
2330
|
+
|
|
2331
|
+
if "multipart/form-data" in content_type:
|
|
2332
|
+
# Try manual multipart parsing for better duplicate field handling
|
|
2333
|
+
try:
|
|
2334
|
+
image_files, manual_mask = await self._parse_multipart_manual(request)
|
|
2335
|
+
# Use manually parsed mask if available, otherwise keep the original
|
|
2336
|
+
if manual_mask is not None:
|
|
2337
|
+
mask = manual_mask
|
|
2338
|
+
except Exception as e:
|
|
2339
|
+
logger.error(f"Manual parsing failed, falling back to FastAPI: {e}")
|
|
2340
|
+
# Fallback to FastAPI form parsing
|
|
2341
|
+
form = await request.form()
|
|
2342
|
+
multipart_files: dict[str, list] = {}
|
|
2343
|
+
for key, value in form.items():
|
|
2344
|
+
if hasattr(value, "filename") and value.filename:
|
|
2345
|
+
if key not in multipart_files:
|
|
2346
|
+
multipart_files[key] = []
|
|
2347
|
+
multipart_files[key].append(value)
|
|
2348
|
+
|
|
2349
|
+
image_files = multipart_files.get("image", [])
|
|
2350
|
+
if not image_files:
|
|
2351
|
+
image_files = multipart_files.get("image[]", [])
|
|
2352
|
+
if not image_files:
|
|
2353
|
+
image_files = multipart_files.get("images", [])
|
|
2354
|
+
|
|
2355
|
+
else:
|
|
2356
|
+
# Fallback to FastAPI form parsing
|
|
2357
|
+
form = await request.form()
|
|
2358
|
+
fallback_files: dict[str, list] = {}
|
|
2359
|
+
for key, value in form.items():
|
|
2360
|
+
if hasattr(value, "filename") and value.filename:
|
|
2361
|
+
if key not in fallback_files:
|
|
2362
|
+
fallback_files[key] = []
|
|
2363
|
+
fallback_files[key].append(value)
|
|
2364
|
+
|
|
2365
|
+
image_files = fallback_files.get("image", [])
|
|
2366
|
+
if not image_files:
|
|
2367
|
+
image_files = fallback_files.get("image[]", [])
|
|
2368
|
+
if not image_files:
|
|
2369
|
+
image_files = fallback_files.get("images", [])
|
|
2370
|
+
|
|
2371
|
+
all_file_keys = []
|
|
2372
|
+
if "multipart/form-data" in content_type:
|
|
2373
|
+
all_file_keys = [f"image[] (x{len(image_files)})"] if image_files else []
|
|
2374
|
+
else:
|
|
2375
|
+
# Fallback to FastAPI form parsing
|
|
2376
|
+
form = await request.form()
|
|
2377
|
+
debug_files: dict[str, list] = {}
|
|
2378
|
+
for key, value in form.items():
|
|
2379
|
+
if hasattr(value, "filename") and value.filename:
|
|
2380
|
+
if key not in debug_files:
|
|
2381
|
+
debug_files[key] = []
|
|
2382
|
+
debug_files[key].append(value)
|
|
2383
|
+
|
|
2384
|
+
# Get image files
|
|
2385
|
+
image_files = debug_files.get("image", [])
|
|
2386
|
+
if not image_files:
|
|
2387
|
+
image_files = debug_files.get("image[]", [])
|
|
2388
|
+
if not image_files:
|
|
2389
|
+
image_files = debug_files.get("images", [])
|
|
2390
|
+
|
|
2391
|
+
logger.info(f"Total image files found: {len(image_files)}")
|
|
2392
|
+
|
|
2393
|
+
if not image_files:
|
|
2394
|
+
# Debug: log all received file fields
|
|
2395
|
+
logger.warning(
|
|
2396
|
+
f"No image files found. Available file fields: {all_file_keys}"
|
|
2397
|
+
)
|
|
2398
|
+
raise HTTPException(
|
|
2399
|
+
status_code=400, detail="At least one image file is required"
|
|
2400
|
+
)
|
|
2401
|
+
|
|
2402
|
+
# Validate response format
|
|
2403
|
+
if response_format not in ["url", "b64_json"]:
|
|
2404
|
+
raise HTTPException(
|
|
2405
|
+
status_code=400, detail="response_format must be 'url' or 'b64_json'"
|
|
2406
|
+
)
|
|
2407
|
+
|
|
2408
|
+
# Get default model if not specified
|
|
2409
|
+
if not model:
|
|
2410
|
+
try:
|
|
2411
|
+
models = await (await self._get_supervisor_ref()).list_models()
|
|
2412
|
+
image_models = [
|
|
2413
|
+
name
|
|
2414
|
+
for name, info in models.items()
|
|
2415
|
+
if info["model_type"] == "image"
|
|
2416
|
+
and info.get("model_ability", [])
|
|
2417
|
+
and (
|
|
2418
|
+
"image2image" in info["model_ability"]
|
|
2419
|
+
or "inpainting" in info["model_ability"]
|
|
2420
|
+
)
|
|
2421
|
+
]
|
|
2422
|
+
if not image_models:
|
|
2423
|
+
raise HTTPException(
|
|
2424
|
+
status_code=400, detail="No available image models found"
|
|
2425
|
+
)
|
|
2426
|
+
model = image_models[0]
|
|
2427
|
+
except Exception as e:
|
|
2428
|
+
logger.error(f"Failed to get available models: {e}", exc_info=True)
|
|
2429
|
+
raise HTTPException(
|
|
2430
|
+
status_code=500, detail="Failed to get available models"
|
|
2431
|
+
)
|
|
2432
|
+
|
|
2433
|
+
model_uid = model
|
|
2434
|
+
try:
|
|
2435
|
+
model_ref = await (await self._get_supervisor_ref()).get_model(model_uid)
|
|
2436
|
+
except ValueError as ve:
|
|
2437
|
+
logger.error(str(ve), exc_info=True)
|
|
2438
|
+
await self._report_error_event(model_uid, str(ve))
|
|
2439
|
+
raise HTTPException(status_code=400, detail=str(ve))
|
|
2440
|
+
except Exception as e:
|
|
2441
|
+
logger.error(e, exc_info=True)
|
|
2442
|
+
await self._report_error_event(model_uid, str(e))
|
|
2443
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
2444
|
+
|
|
2445
|
+
request_id = None
|
|
2446
|
+
try:
|
|
2447
|
+
self._add_running_task(request_id)
|
|
2448
|
+
|
|
2449
|
+
# Read and process all images (needed for both streaming and non-streaming)
|
|
2450
|
+
images = []
|
|
2451
|
+
for i, img in enumerate(image_files):
|
|
2452
|
+
image_content = await img.read()
|
|
2453
|
+
image_file = io.BytesIO(image_content)
|
|
2454
|
+
pil_image = Image.open(image_file)
|
|
2455
|
+
|
|
2456
|
+
# Debug: save the received image for inspection
|
|
2457
|
+
debug_filename = f"/tmp/received_image_{i}_{pil_image.mode}_{pil_image.size[0]}x{pil_image.size[1]}.png"
|
|
2458
|
+
pil_image.save(debug_filename)
|
|
2459
|
+
logger.info(f"Saved received image {i} to {debug_filename}")
|
|
2460
|
+
|
|
2461
|
+
# Convert to RGB format to avoid channel mismatch errors
|
|
2462
|
+
if pil_image.mode == "RGBA":
|
|
2463
|
+
logger.info(f"Converting RGBA image {i} to RGB")
|
|
2464
|
+
# Create white background for RGBA images
|
|
2465
|
+
background = Image.new("RGB", pil_image.size, (255, 255, 255))
|
|
2466
|
+
background.paste(pil_image, mask=pil_image.split()[3])
|
|
2467
|
+
pil_image = background
|
|
2468
|
+
elif pil_image.mode != "RGB":
|
|
2469
|
+
logger.info(f"Converting {pil_image.mode} image {i} to RGB")
|
|
2470
|
+
pil_image = pil_image.convert("RGB")
|
|
2471
|
+
|
|
2472
|
+
# Debug: save the converted image
|
|
2473
|
+
converted_filename = f"/tmp/converted_image_{i}_RGB_{pil_image.size[0]}x{pil_image.size[1]}.png"
|
|
2474
|
+
pil_image.save(converted_filename)
|
|
2475
|
+
logger.info(f"Saved converted image {i} to {converted_filename}")
|
|
2476
|
+
|
|
2477
|
+
images.append(pil_image)
|
|
2478
|
+
|
|
2479
|
+
# Debug: log image summary
|
|
2480
|
+
logger.info(f"Processing {len(images)} images:")
|
|
2481
|
+
for i, img in enumerate(images):
|
|
2482
|
+
logger.info(
|
|
2483
|
+
f" Image {i}: mode={img.mode}, size={img.size}, filename={image_files[i].filename if hasattr(image_files[i], 'filename') else 'unknown'}"
|
|
2484
|
+
)
|
|
2485
|
+
|
|
2486
|
+
# Handle streaming if requested
|
|
2487
|
+
if stream:
|
|
2488
|
+
return EventSourceResponse(
|
|
2489
|
+
self._stream_image_edit(
|
|
2490
|
+
model_ref,
|
|
2491
|
+
images, # Pass processed images instead of raw files
|
|
2492
|
+
mask,
|
|
2493
|
+
prompt,
|
|
2494
|
+
(
|
|
2495
|
+
size.replace("x", "*") if size else ""
|
|
2496
|
+
), # Convert size format for streaming
|
|
2497
|
+
response_format,
|
|
2498
|
+
n,
|
|
2499
|
+
)
|
|
2500
|
+
)
|
|
2501
|
+
|
|
2502
|
+
# Use the first image as primary, others as reference
|
|
2503
|
+
primary_image = images[0]
|
|
2504
|
+
reference_images = images[1:] if len(images) > 1 else []
|
|
2505
|
+
|
|
2506
|
+
# Prepare model parameters
|
|
2507
|
+
# If size is "original", use empty string to let model determine original dimensions
|
|
2508
|
+
if size == "original":
|
|
2509
|
+
model_size = ""
|
|
2510
|
+
else:
|
|
2511
|
+
model_size = size.replace("x", "*") if size else ""
|
|
2512
|
+
|
|
2513
|
+
model_params = {
|
|
2514
|
+
"prompt": prompt,
|
|
2515
|
+
"n": n or 1,
|
|
2516
|
+
"size": model_size,
|
|
2517
|
+
"response_format": response_format,
|
|
2518
|
+
"denoising_strength": 0.75, # Default strength for image editing
|
|
2519
|
+
"reference_images": reference_images, # Pass reference images
|
|
2520
|
+
"negative_prompt": " ", # Space instead of empty string to prevent filtering
|
|
2521
|
+
}
|
|
2522
|
+
|
|
2523
|
+
# Generate the image
|
|
2524
|
+
if mask:
|
|
2525
|
+
# Use inpainting for masked edits
|
|
2526
|
+
mask_content = await mask.read()
|
|
2527
|
+
mask_image = Image.open(io.BytesIO(mask_content))
|
|
2528
|
+
result = await model_ref.inpainting(
|
|
2529
|
+
image=primary_image,
|
|
2530
|
+
mask_image=mask_image,
|
|
2531
|
+
**model_params,
|
|
2532
|
+
)
|
|
2533
|
+
else:
|
|
2534
|
+
# Use image-to-image for general edits
|
|
2535
|
+
result = await model_ref.image_to_image(
|
|
2536
|
+
image=primary_image, **model_params
|
|
2537
|
+
)
|
|
2538
|
+
|
|
2539
|
+
# Return the result directly (should be ImageList format)
|
|
2540
|
+
return Response(content=result, media_type="application/json")
|
|
2541
|
+
|
|
2542
|
+
except asyncio.CancelledError:
|
|
2543
|
+
err_str = f"The request has been cancelled: {request_id or 'unknown'}"
|
|
2544
|
+
logger.error(err_str)
|
|
2545
|
+
await self._report_error_event(model_uid, err_str)
|
|
2546
|
+
raise HTTPException(status_code=409, detail=err_str)
|
|
2547
|
+
except Exception as e:
|
|
2548
|
+
e = await self._get_model_last_error(model_ref.uid, e)
|
|
2549
|
+
logger.error(e, exc_info=True)
|
|
2550
|
+
await self._report_error_event(model_uid, str(e))
|
|
2551
|
+
self.handle_request_limit_error(e)
|
|
2552
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
2553
|
+
|
|
2554
|
+
async def _parse_multipart_manual(self, request: Request):
|
|
2555
|
+
"""Manually parse multipart form data to handle duplicate field names"""
|
|
2556
|
+
import io
|
|
2557
|
+
|
|
2558
|
+
class FileWrapper:
|
|
2559
|
+
"""Wrapper for BytesIO to add filename and content_type attributes"""
|
|
2560
|
+
|
|
2561
|
+
def __init__(self, data, filename, content_type="application/octet-stream"):
|
|
2562
|
+
self._file = io.BytesIO(data)
|
|
2563
|
+
self.filename = filename
|
|
2564
|
+
self.content_type = content_type
|
|
2565
|
+
|
|
2566
|
+
def read(self, *args, **kwargs):
|
|
2567
|
+
return self._file.read(*args, **kwargs)
|
|
2568
|
+
|
|
2569
|
+
def seek(self, *args, **kwargs):
|
|
2570
|
+
return self._file.seek(*args, **kwargs)
|
|
2571
|
+
|
|
2572
|
+
def tell(self, *args, **kwargs):
|
|
2573
|
+
return self._file.tell(*args, **kwargs)
|
|
2574
|
+
|
|
2575
|
+
from multipart.multipart import parse_options_header
|
|
2576
|
+
|
|
2577
|
+
content_type = request.headers.get("content-type", "")
|
|
2578
|
+
if not content_type:
|
|
2579
|
+
return [], None
|
|
2580
|
+
|
|
2581
|
+
# Parse content type and boundary
|
|
2582
|
+
content_type, options = parse_options_header(content_type.encode("utf-8"))
|
|
2583
|
+
if content_type != b"multipart/form-data":
|
|
2584
|
+
return [], None
|
|
2585
|
+
|
|
2586
|
+
boundary = options.get(b"boundary")
|
|
2587
|
+
if not boundary:
|
|
2588
|
+
return [], None
|
|
2589
|
+
|
|
2590
|
+
# Get the raw body
|
|
2591
|
+
body = await request.body()
|
|
2592
|
+
|
|
2593
|
+
# Parse multipart data manually
|
|
2594
|
+
image_files = []
|
|
2595
|
+
mask_file = None
|
|
2596
|
+
try:
|
|
2597
|
+
# Import multipart parser
|
|
2598
|
+
from multipart.multipart import MultipartParser
|
|
2599
|
+
|
|
2600
|
+
# Parse the multipart data
|
|
2601
|
+
parser = MultipartParser(
|
|
2602
|
+
io.BytesIO(body),
|
|
2603
|
+
boundary.decode("utf-8") if isinstance(boundary, bytes) else boundary,
|
|
2604
|
+
)
|
|
2605
|
+
|
|
2606
|
+
for part in parser:
|
|
2607
|
+
# Check if this part is an image file
|
|
2608
|
+
field_name = part.name
|
|
2609
|
+
filename = part.filename or ""
|
|
2610
|
+
|
|
2611
|
+
# Look for image fields with different naming conventions
|
|
2612
|
+
if field_name in ["image", "image[]", "images"] and filename:
|
|
2613
|
+
# Create a file-like object from the part data
|
|
2614
|
+
file_obj = FileWrapper(
|
|
2615
|
+
part.data,
|
|
2616
|
+
filename,
|
|
2617
|
+
part.content_type or "application/octet-stream",
|
|
2618
|
+
)
|
|
2619
|
+
image_files.append(file_obj)
|
|
2620
|
+
elif field_name == "mask" and filename:
|
|
2621
|
+
# Handle mask file
|
|
2622
|
+
mask_file = FileWrapper(
|
|
2623
|
+
part.data,
|
|
2624
|
+
filename,
|
|
2625
|
+
part.content_type or "application/octet-stream",
|
|
2626
|
+
)
|
|
2627
|
+
logger.info(f"Manual multipart parsing found mask file: {filename}")
|
|
2628
|
+
|
|
2629
|
+
logger.info(
|
|
2630
|
+
f"Manual multipart parsing found {len(image_files)} image files and mask: {mask_file is not None}"
|
|
2631
|
+
)
|
|
2632
|
+
|
|
2633
|
+
except Exception as e:
|
|
2634
|
+
logger.error(f"Manual multipart parsing failed: {e}")
|
|
2635
|
+
# Return empty list to trigger fallback
|
|
2636
|
+
return [], None
|
|
2637
|
+
|
|
2638
|
+
return image_files, mask_file
|
|
2639
|
+
|
|
2640
|
+
async def _stream_image_edit(
|
|
2641
|
+
self, model_ref, images, mask, prompt, size, response_format, n
|
|
2642
|
+
):
|
|
2643
|
+
"""Stream image editing progress and results"""
|
|
2644
|
+
import io
|
|
2645
|
+
import json
|
|
2646
|
+
from datetime import datetime
|
|
2647
|
+
|
|
2648
|
+
try:
|
|
2649
|
+
# Send start event
|
|
2650
|
+
yield {
|
|
2651
|
+
"event": "start",
|
|
2652
|
+
"data": json.dumps(
|
|
2653
|
+
{
|
|
2654
|
+
"type": "image_edit_started",
|
|
2655
|
+
"timestamp": datetime.now().isoformat(),
|
|
2656
|
+
"prompt": prompt,
|
|
2657
|
+
"image_count": len(images),
|
|
2658
|
+
}
|
|
2659
|
+
),
|
|
2660
|
+
}
|
|
2661
|
+
|
|
2662
|
+
# Images are already processed in the main method, just use them directly
|
|
2663
|
+
image_objects = images
|
|
2664
|
+
logger.info(f"Streaming: Using {len(image_objects)} pre-processed images")
|
|
2665
|
+
|
|
2666
|
+
# Debug: log streaming image summary
|
|
2667
|
+
logger.info(f"Streaming: Processing {len(image_objects)} images:")
|
|
2668
|
+
for i, img in enumerate(image_objects):
|
|
2669
|
+
logger.info(f" Streaming Image {i}: mode={img.mode}, size={img.size}")
|
|
2670
|
+
|
|
2671
|
+
# Use the first image as primary, others as reference
|
|
2672
|
+
primary_image = image_objects[0]
|
|
2673
|
+
reference_images = image_objects[1:] if len(image_objects) > 1 else []
|
|
2674
|
+
|
|
2675
|
+
# Send processing event
|
|
2676
|
+
yield {
|
|
2677
|
+
"event": "processing",
|
|
2678
|
+
"data": json.dumps(
|
|
2679
|
+
{
|
|
2680
|
+
"type": "images_loaded",
|
|
2681
|
+
"timestamp": datetime.now().isoformat(),
|
|
2682
|
+
"primary_image_size": primary_image.size,
|
|
2683
|
+
"reference_images_count": len(reference_images),
|
|
2684
|
+
}
|
|
2685
|
+
),
|
|
2686
|
+
}
|
|
2687
|
+
|
|
2688
|
+
# Prepare model parameters
|
|
2689
|
+
# If size is "original", use empty string to let model determine original dimensions
|
|
2690
|
+
if size == "original":
|
|
2691
|
+
model_size = ""
|
|
2692
|
+
else:
|
|
2693
|
+
model_size = size
|
|
2694
|
+
|
|
2695
|
+
model_params = {
|
|
2696
|
+
"prompt": prompt,
|
|
2697
|
+
"n": n or 1,
|
|
2698
|
+
"size": model_size,
|
|
2699
|
+
"response_format": response_format,
|
|
2700
|
+
"denoising_strength": 0.75,
|
|
2701
|
+
"reference_images": reference_images,
|
|
2702
|
+
"negative_prompt": " ", # Space instead of empty string to prevent filtering
|
|
2703
|
+
}
|
|
2704
|
+
|
|
2705
|
+
# Generate the image
|
|
2706
|
+
if mask:
|
|
2707
|
+
mask_content = await mask.read()
|
|
2708
|
+
mask_image = Image.open(io.BytesIO(mask_content))
|
|
2709
|
+
yield {
|
|
2710
|
+
"event": "processing",
|
|
2711
|
+
"data": json.dumps(
|
|
2712
|
+
{
|
|
2713
|
+
"type": "mask_loaded",
|
|
2714
|
+
"timestamp": datetime.now().isoformat(),
|
|
2715
|
+
"mask_size": mask_image.size,
|
|
2716
|
+
}
|
|
2717
|
+
),
|
|
2718
|
+
}
|
|
2719
|
+
result = await model_ref.inpainting(
|
|
2720
|
+
image=primary_image,
|
|
2721
|
+
mask_image=mask_image,
|
|
2722
|
+
**model_params,
|
|
2723
|
+
)
|
|
2724
|
+
else:
|
|
2725
|
+
yield {
|
|
2726
|
+
"event": "processing",
|
|
2727
|
+
"data": json.dumps(
|
|
2728
|
+
{
|
|
2729
|
+
"type": "starting_generation",
|
|
2730
|
+
"timestamp": datetime.now().isoformat(),
|
|
2731
|
+
}
|
|
2732
|
+
),
|
|
2733
|
+
}
|
|
2734
|
+
result = await model_ref.image_to_image(
|
|
2735
|
+
image=primary_image, **model_params
|
|
2736
|
+
)
|
|
2737
|
+
|
|
2738
|
+
# Parse the result and send final event in OpenAI format
|
|
2739
|
+
result_data = json.loads(result)
|
|
2740
|
+
|
|
2741
|
+
# Send completion event with OpenAI-compatible format
|
|
2742
|
+
yield {
|
|
2743
|
+
"event": "complete",
|
|
2744
|
+
"data": json.dumps(
|
|
2745
|
+
result_data
|
|
2746
|
+
), # Direct send the result in OpenAI format
|
|
2747
|
+
}
|
|
2748
|
+
|
|
2749
|
+
except Exception as e:
|
|
2750
|
+
yield {
|
|
2751
|
+
"event": "error",
|
|
2752
|
+
"data": json.dumps(
|
|
2753
|
+
{
|
|
2754
|
+
"type": "image_edit_error",
|
|
2755
|
+
"timestamp": datetime.now().isoformat(),
|
|
2756
|
+
"error": str(e),
|
|
2757
|
+
}
|
|
2758
|
+
),
|
|
2759
|
+
}
|
|
2760
|
+
|
|
2302
2761
|
async def create_flexible_infer(self, request: Request) -> Response:
|
|
2303
2762
|
payload = await request.json()
|
|
2304
2763
|
|
|
@@ -2355,7 +2814,7 @@ class RESTfulAPI(CancelMixin):
|
|
|
2355
2814
|
)
|
|
2356
2815
|
return Response(content=video_list, media_type="application/json")
|
|
2357
2816
|
except asyncio.CancelledError:
|
|
2358
|
-
err_str = f"The request has been cancelled: {request_id}"
|
|
2817
|
+
err_str = f"The request has been cancelled: {request_id or 'unknown'}"
|
|
2359
2818
|
logger.error(err_str)
|
|
2360
2819
|
await self._report_error_event(model_uid, err_str)
|
|
2361
2820
|
raise HTTPException(status_code=409, detail=err_str)
|
|
@@ -2404,7 +2863,7 @@ class RESTfulAPI(CancelMixin):
|
|
|
2404
2863
|
)
|
|
2405
2864
|
return Response(content=video_list, media_type="application/json")
|
|
2406
2865
|
except asyncio.CancelledError:
|
|
2407
|
-
err_str = f"The request has been cancelled: {request_id}"
|
|
2866
|
+
err_str = f"The request has been cancelled: {request_id or 'unknown'}"
|
|
2408
2867
|
logger.error(err_str)
|
|
2409
2868
|
await self._report_error_event(model_uid, err_str)
|
|
2410
2869
|
raise HTTPException(status_code=409, detail=err_str)
|
|
@@ -2455,7 +2914,7 @@ class RESTfulAPI(CancelMixin):
|
|
|
2455
2914
|
)
|
|
2456
2915
|
return Response(content=video_list, media_type="application/json")
|
|
2457
2916
|
except asyncio.CancelledError:
|
|
2458
|
-
err_str = f"The request has been cancelled: {request_id}"
|
|
2917
|
+
err_str = f"The request has been cancelled: {request_id or 'unknown'}"
|
|
2459
2918
|
logger.error(err_str)
|
|
2460
2919
|
await self._report_error_event(model_uid, err_str)
|
|
2461
2920
|
raise HTTPException(status_code=409, detail=err_str)
|