vectorvein 0.2.85__py3-none-any.whl → 0.2.87__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/openai_compatible_client.py +10 -28
- vectorvein/chat_clients/utils.py +53 -97
- vectorvein/utilities/media_processing.py +62 -3
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.87.dist-info}/METADATA +1 -1
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.87.dist-info}/RECORD +7 -7
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.87.dist-info}/WHEEL +0 -0
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.87.dist-info}/entry_points.txt +0 -0
@@ -99,7 +99,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
99
99
|
return AzureOpenAI(
|
100
100
|
azure_endpoint=self.endpoint.api_base,
|
101
101
|
api_key=self.endpoint.api_key,
|
102
|
-
api_version="2025-
|
102
|
+
api_version="2025-04-01-preview",
|
103
103
|
http_client=self.http_client,
|
104
104
|
)
|
105
105
|
else:
|
@@ -327,9 +327,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
327
327
|
if not max_tokens and not max_completion_tokens:
|
328
328
|
max_output_tokens = self.model_setting.max_output_tokens
|
329
329
|
native_multimodal = self.model_setting.native_multimodal
|
330
|
-
token_counts = get_message_token_counts(
|
331
|
-
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
332
|
-
)
|
330
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
|
333
331
|
if max_output_tokens is not None:
|
334
332
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
335
333
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -373,9 +371,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
373
371
|
extra_body=extra_body,
|
374
372
|
timeout=timeout,
|
375
373
|
stream_options=stream_options,
|
376
|
-
response_format=response_format
|
377
|
-
if response_format and self.model_setting.response_format_available
|
378
|
-
else OPENAI_NOT_GIVEN,
|
374
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
379
375
|
**tools_params, # type: ignore
|
380
376
|
)
|
381
377
|
|
@@ -501,9 +497,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
501
497
|
extra_body=extra_body,
|
502
498
|
timeout=timeout,
|
503
499
|
stream_options=stream_options,
|
504
|
-
response_format=response_format
|
505
|
-
if response_format and self.model_setting.response_format_available
|
506
|
-
else OPENAI_NOT_GIVEN,
|
500
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
507
501
|
**tools_params, # type: ignore
|
508
502
|
)
|
509
503
|
|
@@ -524,10 +518,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
524
518
|
|
525
519
|
if tools:
|
526
520
|
if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
|
527
|
-
result["tool_calls"] = [
|
528
|
-
{**tool_call.model_dump(), "type": "function"}
|
529
|
-
for tool_call in response.choices[0].message.tool_calls
|
530
|
-
]
|
521
|
+
result["tool_calls"] = [{**tool_call.model_dump(), "type": "function"} for tool_call in response.choices[0].message.tool_calls]
|
531
522
|
else:
|
532
523
|
tool_call_content_processor = ToolCallContentProcessor(result["content"])
|
533
524
|
tool_call_data = tool_call_content_processor.tool_calls
|
@@ -581,7 +572,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
581
572
|
return AsyncAzureOpenAI(
|
582
573
|
azure_endpoint=self.endpoint.api_base,
|
583
574
|
api_key=self.endpoint.api_key,
|
584
|
-
api_version="2025-
|
575
|
+
api_version="2025-04-01-preview",
|
585
576
|
http_client=self.http_client,
|
586
577
|
)
|
587
578
|
else:
|
@@ -809,9 +800,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
809
800
|
if not max_tokens and not max_completion_tokens:
|
810
801
|
max_output_tokens = self.model_setting.max_output_tokens
|
811
802
|
native_multimodal = self.model_setting.native_multimodal
|
812
|
-
token_counts = get_message_token_counts(
|
813
|
-
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
814
|
-
)
|
803
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
|
815
804
|
if max_output_tokens is not None:
|
816
805
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
817
806
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -855,9 +844,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
855
844
|
extra_body=extra_body,
|
856
845
|
timeout=timeout,
|
857
846
|
stream_options=stream_options,
|
858
|
-
response_format=response_format
|
859
|
-
if response_format and self.model_setting.response_format_available
|
860
|
-
else OPENAI_NOT_GIVEN,
|
847
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
861
848
|
**tools_params, # type: ignore
|
862
849
|
)
|
863
850
|
|
@@ -983,9 +970,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
983
970
|
extra_body=extra_body,
|
984
971
|
timeout=timeout,
|
985
972
|
stream_options=stream_options,
|
986
|
-
response_format=response_format
|
987
|
-
if response_format and self.model_setting.response_format_available
|
988
|
-
else OPENAI_NOT_GIVEN,
|
973
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
989
974
|
**tools_params, # type: ignore
|
990
975
|
)
|
991
976
|
|
@@ -1006,10 +991,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
1006
991
|
|
1007
992
|
if tools:
|
1008
993
|
if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
|
1009
|
-
result["tool_calls"] = [
|
1010
|
-
{**tool_call.model_dump(), "type": "function"}
|
1011
|
-
for tool_call in response.choices[0].message.tool_calls
|
1012
|
-
]
|
994
|
+
result["tool_calls"] = [{**tool_call.model_dump(), "type": "function"} for tool_call in response.choices[0].message.tool_calls]
|
1013
995
|
else:
|
1014
996
|
tool_call_content_processor = ToolCallContentProcessor(result["content"])
|
1015
997
|
tool_call_data = tool_call_content_processor.tool_calls
|
vectorvein/chat_clients/utils.py
CHANGED
@@ -103,18 +103,8 @@ def convert_type(value, value_type):
|
|
103
103
|
|
104
104
|
def get_token_counts(text: str | dict, model: str = "", use_token_server_first: bool = True) -> int:
|
105
105
|
if use_token_server_first and settings.token_server is not None:
|
106
|
-
base_url =
|
107
|
-
|
108
|
-
if settings.token_server.url is not None
|
109
|
-
else f"http://{settings.token_server.host}:{settings.token_server.port}"
|
110
|
-
)
|
111
|
-
_, response = (
|
112
|
-
Retry(httpx.post)
|
113
|
-
.args(url=f"{base_url}/count_tokens", json={"text": text, "model": model}, timeout=None)
|
114
|
-
.retry_times(5)
|
115
|
-
.sleep_time(1)
|
116
|
-
.run()
|
117
|
-
)
|
106
|
+
base_url = settings.token_server.url if settings.token_server.url is not None else f"http://{settings.token_server.host}:{settings.token_server.port}"
|
107
|
+
_, response = Retry(httpx.post).args(url=f"{base_url}/count_tokens", json={"text": text, "model": model}, timeout=None).retry_times(5).sleep_time(1).run()
|
118
108
|
if response is not None:
|
119
109
|
try:
|
120
110
|
result = response.json()
|
@@ -147,13 +137,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
147
137
|
],
|
148
138
|
}
|
149
139
|
|
150
|
-
_, response = (
|
151
|
-
Retry(httpx.post)
|
152
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
153
|
-
.retry_times(5)
|
154
|
-
.sleep_time(10)
|
155
|
-
.run()
|
156
|
-
)
|
140
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
157
141
|
if response is None:
|
158
142
|
return 1000
|
159
143
|
result = response.json()
|
@@ -174,13 +158,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
174
158
|
{"role": "user", "content": text},
|
175
159
|
],
|
176
160
|
}
|
177
|
-
_, response = (
|
178
|
-
Retry(httpx.post)
|
179
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
180
|
-
.retry_times(5)
|
181
|
-
.sleep_time(10)
|
182
|
-
.run()
|
183
|
-
)
|
161
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
184
162
|
if response is None:
|
185
163
|
return 1000
|
186
164
|
result = response.json()
|
@@ -194,11 +172,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
194
172
|
endpoint_id = endpoint_id["endpoint_id"]
|
195
173
|
endpoint = settings.get_endpoint(endpoint_id)
|
196
174
|
|
197
|
-
api_base = (
|
198
|
-
endpoint.api_base.removesuffix("/openai/")
|
199
|
-
if endpoint.api_base
|
200
|
-
else "https://generativelanguage.googleapis.com/v1beta"
|
201
|
-
)
|
175
|
+
api_base = endpoint.api_base.removesuffix("/openai/") if endpoint.api_base else "https://generativelanguage.googleapis.com/v1beta"
|
202
176
|
base_url = f"{api_base}/models/{backend_setting.id}:countTokens"
|
203
177
|
params = {"key": endpoint.api_key}
|
204
178
|
request_body = {
|
@@ -209,13 +183,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
209
183
|
],
|
210
184
|
},
|
211
185
|
}
|
212
|
-
_, response = (
|
213
|
-
Retry(httpx.post)
|
214
|
-
.args(base_url, json=request_body, params=params, timeout=None)
|
215
|
-
.retry_times(5)
|
216
|
-
.sleep_time(10)
|
217
|
-
.run()
|
218
|
-
)
|
186
|
+
_, response = Retry(httpx.post).args(base_url, json=request_body, params=params, timeout=None).retry_times(5).sleep_time(10).run()
|
219
187
|
if response is None:
|
220
188
|
return 1000
|
221
189
|
result = response.json()
|
@@ -230,12 +198,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
230
198
|
endpoint_id = endpoint_choice
|
231
199
|
endpoint = settings.get_endpoint(endpoint_id)
|
232
200
|
|
233
|
-
if
|
234
|
-
endpoint.is_vertex
|
235
|
-
or endpoint.is_bedrock
|
236
|
-
or endpoint.endpoint_type == "anthropic_vertex"
|
237
|
-
or endpoint.endpoint_type == "anthropic_bedrock"
|
238
|
-
):
|
201
|
+
if endpoint.is_vertex or endpoint.is_bedrock or endpoint.endpoint_type == "anthropic_vertex" or endpoint.endpoint_type == "anthropic_bedrock":
|
239
202
|
continue
|
240
203
|
elif endpoint.endpoint_type in ("default", "anthropic"):
|
241
204
|
return (
|
@@ -277,13 +240,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
277
240
|
{"role": "user", "content": text},
|
278
241
|
],
|
279
242
|
}
|
280
|
-
_, response = (
|
281
|
-
Retry(httpx.post)
|
282
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
283
|
-
.retry_times(5)
|
284
|
-
.sleep_time(10)
|
285
|
-
.run()
|
286
|
-
)
|
243
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
287
244
|
if response is None:
|
288
245
|
return 1000
|
289
246
|
result = response.json()
|
@@ -306,13 +263,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
306
263
|
{"role": "user", "content": text},
|
307
264
|
],
|
308
265
|
}
|
309
|
-
_, response = (
|
310
|
-
Retry(httpx.post)
|
311
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
312
|
-
.retry_times(5)
|
313
|
-
.sleep_time(10)
|
314
|
-
.run()
|
315
|
-
)
|
266
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
316
267
|
if response is None:
|
317
268
|
return 1000
|
318
269
|
result = response.json()
|
@@ -453,48 +404,52 @@ def cutoff_messages(
|
|
453
404
|
return system_message + messages
|
454
405
|
|
455
406
|
|
456
|
-
def format_image_message(image: str, backend: BackendType = BackendType.OpenAI) -> dict:
|
457
|
-
|
407
|
+
def format_image_message(image: str, backend: BackendType = BackendType.OpenAI, process_image: bool = True) -> dict:
|
408
|
+
if process_image:
|
409
|
+
from ..utilities.media_processing import ImageProcessor
|
458
410
|
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
411
|
+
image_processor = ImageProcessor(image_source=image)
|
412
|
+
if backend == BackendType.OpenAI:
|
413
|
+
return {
|
414
|
+
"type": "image_url",
|
415
|
+
"image_url": {"url": image_processor.data_url},
|
416
|
+
}
|
417
|
+
elif backend == BackendType.Anthropic:
|
418
|
+
return {
|
419
|
+
"type": "image",
|
420
|
+
"source": {
|
421
|
+
"type": "base64",
|
422
|
+
"media_type": image_processor.mime_type,
|
423
|
+
"data": image_processor.base64_image,
|
424
|
+
},
|
425
|
+
}
|
426
|
+
else:
|
427
|
+
return {
|
428
|
+
"type": "image_url",
|
429
|
+
"image_url": {"url": image_processor.data_url},
|
430
|
+
}
|
474
431
|
else:
|
475
|
-
|
476
|
-
"type": "
|
477
|
-
|
478
|
-
|
432
|
+
if backend == BackendType.Anthropic:
|
433
|
+
return {"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image}}
|
434
|
+
else:
|
435
|
+
return {"type": "image_url", "image_url": {"url": image}}
|
479
436
|
|
480
437
|
|
481
438
|
def format_workflow_messages(message: dict, content: str, backend: BackendType):
|
482
439
|
formatted_messages = []
|
483
440
|
|
484
441
|
# 工具调用消息
|
485
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini):
|
442
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini, BackendType.DeepSeek):
|
486
443
|
tool_call_message = {
|
487
|
-
"content":
|
444
|
+
"content": content,
|
488
445
|
"role": "assistant",
|
489
446
|
"tool_calls": [
|
490
447
|
{
|
491
|
-
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
448
|
+
"id": message["metadata"]["selected_workflow"]["tool_call_id"] or message["metadata"]["record_id"],
|
492
449
|
"type": "function",
|
493
450
|
"function": {
|
494
451
|
"name": message["metadata"]["selected_workflow"]["function_name"],
|
495
|
-
"arguments": json.dumps(
|
496
|
-
message["metadata"]["selected_workflow"]["params"], ensure_ascii=False
|
497
|
-
),
|
452
|
+
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"], ensure_ascii=False),
|
498
453
|
},
|
499
454
|
}
|
500
455
|
],
|
@@ -527,10 +482,10 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
|
|
527
482
|
formatted_messages.append(tool_call_message)
|
528
483
|
|
529
484
|
# 工具调用结果消息
|
530
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini):
|
485
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini, BackendType.DeepSeek):
|
531
486
|
tool_call_result_message = {
|
532
487
|
"role": "tool",
|
533
|
-
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
488
|
+
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"] or message["metadata"]["record_id"],
|
534
489
|
"name": message["metadata"]["selected_workflow"]["function_name"],
|
535
490
|
"content": message["metadata"].get("workflow_result", ""),
|
536
491
|
}
|
@@ -558,8 +513,8 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
|
|
558
513
|
}
|
559
514
|
formatted_messages.append(tool_call_result_message)
|
560
515
|
|
561
|
-
if content and backend not in (BackendType.Mistral, BackendType.Anthropic):
|
562
|
-
|
516
|
+
# if content and backend not in (BackendType.Mistral, BackendType.Anthropic):
|
517
|
+
# formatted_messages.append({"role": "assistant", "content": content})
|
563
518
|
|
564
519
|
return formatted_messages
|
565
520
|
|
@@ -569,6 +524,7 @@ def transform_from_openai_message(
|
|
569
524
|
backend: BackendType,
|
570
525
|
native_multimodal: bool = False,
|
571
526
|
function_call_available: bool = False,
|
527
|
+
process_image: bool = True,
|
572
528
|
):
|
573
529
|
role = message.get("role", "user")
|
574
530
|
content = message.get("content", "")
|
@@ -593,7 +549,7 @@ def transform_from_openai_message(
|
|
593
549
|
formatted_content.append({"type": "text", "text": item})
|
594
550
|
elif isinstance(item, dict) and "type" in item:
|
595
551
|
if item["type"] == "image_url":
|
596
|
-
formatted_content.append(format_image_message(item["image_url"]["url"], backend))
|
552
|
+
formatted_content.append(format_image_message(item["image_url"]["url"], backend, process_image))
|
597
553
|
else:
|
598
554
|
formatted_content.append(item)
|
599
555
|
if tool_calls:
|
@@ -674,6 +630,7 @@ def format_messages(
|
|
674
630
|
backend: BackendType = BackendType.OpenAI,
|
675
631
|
native_multimodal: bool = False,
|
676
632
|
function_call_available: bool = False,
|
633
|
+
process_image: bool = True,
|
677
634
|
) -> list:
|
678
635
|
"""将 VectorVein 和 OpenAI 的 Message 序列化后的格式转换为不同模型支持的格式
|
679
636
|
|
@@ -681,6 +638,8 @@ def format_messages(
|
|
681
638
|
messages (list): VectorVein Or OpenAI messages list.
|
682
639
|
backend (str, optional): Messages format target backend. Defaults to BackendType.OpenAI.
|
683
640
|
native_multimodal (bool, optional): Use native multimodal ability. Defaults to False.
|
641
|
+
function_call_available (bool, optional): Use function call ability. Defaults to False.
|
642
|
+
process_image (bool, optional): Process image. Defaults to True.
|
684
643
|
|
685
644
|
Returns:
|
686
645
|
list: 转换后的消息列表
|
@@ -697,17 +656,13 @@ def format_messages(
|
|
697
656
|
content = message["content"]["text"]
|
698
657
|
if message["content_type"] == "TXT":
|
699
658
|
role = "user" if message["author_type"] == "U" else "assistant"
|
700
|
-
formatted_message = format_text_message(
|
701
|
-
content, role, message.get("attachments", []), backend, native_multimodal
|
702
|
-
)
|
659
|
+
formatted_message = format_text_message(content, role, message.get("attachments", []), backend, native_multimodal, process_image)
|
703
660
|
formatted_messages.append(formatted_message)
|
704
661
|
elif message["content_type"] == "WKF" and message["status"] in ("S", "R"):
|
705
662
|
formatted_messages.extend(format_workflow_messages(message, content, backend))
|
706
663
|
else:
|
707
664
|
# 处理 OpenAI 格式的消息
|
708
|
-
formatted_message = transform_from_openai_message(
|
709
|
-
message, backend, native_multimodal, function_call_available
|
710
|
-
)
|
665
|
+
formatted_message = transform_from_openai_message(message, backend, native_multimodal, function_call_available, process_image)
|
711
666
|
formatted_messages.append(formatted_message)
|
712
667
|
|
713
668
|
return formatted_messages
|
@@ -719,6 +674,7 @@ def format_text_message(
|
|
719
674
|
attachments: list,
|
720
675
|
backend: BackendType,
|
721
676
|
native_multimodal: bool,
|
677
|
+
process_image: bool = True,
|
722
678
|
):
|
723
679
|
images_extensions = ("jpg", "jpeg", "png", "bmp")
|
724
680
|
has_images = any(attachment.lower().endswith(images_extensions) for attachment in attachments)
|
@@ -733,7 +689,7 @@ def format_text_message(
|
|
733
689
|
"content": [
|
734
690
|
{"type": "text", "text": content},
|
735
691
|
*[
|
736
|
-
format_image_message(image=attachment, backend=backend)
|
692
|
+
format_image_message(image=attachment, backend=backend, process_image=process_image)
|
737
693
|
for attachment in attachments
|
738
694
|
if attachment.lower().endswith(images_extensions)
|
739
695
|
],
|
@@ -3,6 +3,7 @@
|
|
3
3
|
import base64
|
4
4
|
from io import BytesIO
|
5
5
|
from pathlib import Path
|
6
|
+
from typing import Literal
|
6
7
|
from functools import cached_property
|
7
8
|
|
8
9
|
import httpx
|
@@ -93,6 +94,66 @@ class ImageProcessor:
|
|
93
94
|
if scale_factor < 0.1:
|
94
95
|
return img_bytes_resized
|
95
96
|
|
97
|
+
def _clear_cache(self):
|
98
|
+
self._cached_bytes = None
|
99
|
+
self._cached_base64_image = None
|
100
|
+
|
101
|
+
def convert_format(self, target_format: Literal["JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"]):
|
102
|
+
"""
|
103
|
+
转换图片的格式。
|
104
|
+
|
105
|
+
此方法将图片转换为指定的格式。支持的格式包括:'JPEG', 'PNG', 'GIF', 'BMP', 'WEBP' 等。
|
106
|
+
|
107
|
+
参数:
|
108
|
+
-----
|
109
|
+
target_format : str
|
110
|
+
目标格式的字符串表示,不区分大小写。例如 'png', 'jpeg', 'gif' 等。
|
111
|
+
|
112
|
+
返回:
|
113
|
+
-----
|
114
|
+
self
|
115
|
+
返回类的实例,支持方法链式调用。
|
116
|
+
|
117
|
+
异常:
|
118
|
+
-----
|
119
|
+
ValueError
|
120
|
+
如果指定的格式不被支持。
|
121
|
+
|
122
|
+
示例:
|
123
|
+
-----
|
124
|
+
>>> img.convert_format('png') # 将图片转换为PNG格式
|
125
|
+
>>> img.convert_format('JPEG') # 将图片转换为JPEG格式
|
126
|
+
"""
|
127
|
+
|
128
|
+
# 检查格式是否支持
|
129
|
+
supported_formats = ["JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"]
|
130
|
+
if target_format not in supported_formats:
|
131
|
+
raise ValueError(f"不支持的格式: {target_format}。支持的格式有: {', '.join(supported_formats)}")
|
132
|
+
|
133
|
+
# 如果当前格式与目标格式相同,不执行转换
|
134
|
+
if self._image.format == target_format:
|
135
|
+
return self
|
136
|
+
|
137
|
+
# 创建一个新的字节流
|
138
|
+
img_bytes = BytesIO()
|
139
|
+
|
140
|
+
# 处理透明通道
|
141
|
+
if target_format == "JPEG" and self._image.mode in ("RGBA", "LA"):
|
142
|
+
# JPEG不支持透明通道,转换为RGB
|
143
|
+
self._image = self._image.convert("RGB")
|
144
|
+
|
145
|
+
# 保存为新格式
|
146
|
+
self._image.save(img_bytes, format=target_format, optimize=True)
|
147
|
+
img_bytes.seek(0)
|
148
|
+
|
149
|
+
# 加载新格式的图片
|
150
|
+
self._image = Image.open(img_bytes)
|
151
|
+
self._image_format = target_format
|
152
|
+
|
153
|
+
# 清除缓存
|
154
|
+
self._clear_cache()
|
155
|
+
return self
|
156
|
+
|
96
157
|
@property
|
97
158
|
def bytes(self):
|
98
159
|
if self._cached_bytes is not None:
|
@@ -102,9 +163,7 @@ class ImageProcessor:
|
|
102
163
|
img_bytes = BytesIO()
|
103
164
|
|
104
165
|
# 检查图像是否有透明通道
|
105
|
-
has_transparency = self._image.mode in ("RGBA", "LA") or (
|
106
|
-
self._image.mode == "P" and "transparency" in self._image.info
|
107
|
-
)
|
166
|
+
has_transparency = self._image.mode in ("RGBA", "LA") or (self._image.mode == "P" and "transparency" in self._image.info)
|
108
167
|
|
109
168
|
if has_transparency:
|
110
169
|
# 如果有透明通道,使用PNG格式
|
@@ -1,6 +1,6 @@
|
|
1
|
-
vectorvein-0.2.
|
2
|
-
vectorvein-0.2.
|
3
|
-
vectorvein-0.2.
|
1
|
+
vectorvein-0.2.87.dist-info/METADATA,sha256=EhNvBt6E7ujl3cd3VcwZ8FH56oZF9bEwbx7yXnGJ4QU,4567
|
2
|
+
vectorvein-0.2.87.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
3
|
+
vectorvein-0.2.87.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
|
6
6
|
vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
|
@@ -19,11 +19,11 @@ vectorvein/chat_clients/minimax_client.py,sha256=YOILWcsHsN5tihLTMbKJIyJr9TJREMI
|
|
19
19
|
vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
|
20
20
|
vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
|
21
21
|
vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
|
22
|
-
vectorvein/chat_clients/openai_compatible_client.py,sha256=
|
22
|
+
vectorvein/chat_clients/openai_compatible_client.py,sha256=0nj5DwnY8llctoxbW-UXsqVjTRiuezAgkISnV_ygnA4,48869
|
23
23
|
vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
25
25
|
vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
|
26
|
-
vectorvein/chat_clients/utils.py,sha256=
|
26
|
+
vectorvein/chat_clients/utils.py,sha256=2p2FJT4YNapJCI_kgWj81h0tgTj9-drSY33IizMoFrE,29723
|
27
27
|
vectorvein/chat_clients/xai_client.py,sha256=eLFJJrNRJ-ni3DpshODcr3S1EJQLbhVwxyO1E54LaqM,491
|
28
28
|
vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
|
29
29
|
vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
|
@@ -38,7 +38,7 @@ vectorvein/types/exception.py,sha256=KtnqZ-1DstHm95SZAyZdHhkGq1bJ4A9Aw3Zfdu-VIFo
|
|
38
38
|
vectorvein/types/llm_parameters.py,sha256=Fo_-_rFkBvl_dUsCZxy2xNMC9Q4qVGLzDBL9jRVbXIQ,8489
|
39
39
|
vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
40
40
|
vectorvein/types/settings.py,sha256=5wINBiT9qj3ZwDiE6Kf4r6wD3yY1bKN40xxZ7F6jEfI,4844
|
41
|
-
vectorvein/utilities/media_processing.py,sha256=
|
41
|
+
vectorvein/utilities/media_processing.py,sha256=TPm73MS_a1WVqpYv0N9BrZcOK-kxWa3QHSFUI_7WgmU,8257
|
42
42
|
vectorvein/utilities/rate_limiter.py,sha256=dwolIUVw2wP83Odqpx0AAaE77de1GzxkYDGH4tM_u_4,10300
|
43
43
|
vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
|
44
44
|
vectorvein/workflow/graph/edge.py,sha256=1ckyyjCue_PLm7P1ItUfKOy6AKkemOpZ9m1WJ8UXIHQ,1072
|
@@ -65,4 +65,4 @@ vectorvein/workflow/utils/analyse.py,sha256=msmvyz35UTYTwqQR5sg9H0sm1vxmGDSmep9X
|
|
65
65
|
vectorvein/workflow/utils/check.py,sha256=B_NdwqIqnc7Ko2HHqFpfOmWVaAu21tPITe0szKfiZKc,11414
|
66
66
|
vectorvein/workflow/utils/json_to_code.py,sha256=P8dhhSNgKhTnW17qXNjLO2aLdb0rA8qMAWxhObol2TU,7295
|
67
67
|
vectorvein/workflow/utils/layout.py,sha256=j0bRD3uaXu40xCS6U6BGahBI8FrHa5MiF55GbTrZ1LM,4565
|
68
|
-
vectorvein-0.2.
|
68
|
+
vectorvein-0.2.87.dist-info/RECORD,,
|
File without changes
|
File without changes
|