vectorvein 0.2.86__tar.gz → 0.2.87__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.2.86 → vectorvein-0.2.87}/PKG-INFO +1 -1
- {vectorvein-0.2.86 → vectorvein-0.2.87}/pyproject.toml +1 -1
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/utils.py +53 -97
- {vectorvein-0.2.86 → vectorvein-0.2.87}/README.md +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/api/__init__.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/api/client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/api/exceptions.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/api/models.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/ernie_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/xai_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/types/__init__.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/types/settings.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/utilities/rate_limiter.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/utilities/retry.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/edge.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/node.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/port.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/workflow.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/__init__.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/llms.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/output.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/tools.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/triggers.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/analyse.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/check.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
- {vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/layout.py +0 -0
@@ -103,18 +103,8 @@ def convert_type(value, value_type):
|
|
103
103
|
|
104
104
|
def get_token_counts(text: str | dict, model: str = "", use_token_server_first: bool = True) -> int:
|
105
105
|
if use_token_server_first and settings.token_server is not None:
|
106
|
-
base_url =
|
107
|
-
|
108
|
-
if settings.token_server.url is not None
|
109
|
-
else f"http://{settings.token_server.host}:{settings.token_server.port}"
|
110
|
-
)
|
111
|
-
_, response = (
|
112
|
-
Retry(httpx.post)
|
113
|
-
.args(url=f"{base_url}/count_tokens", json={"text": text, "model": model}, timeout=None)
|
114
|
-
.retry_times(5)
|
115
|
-
.sleep_time(1)
|
116
|
-
.run()
|
117
|
-
)
|
106
|
+
base_url = settings.token_server.url if settings.token_server.url is not None else f"http://{settings.token_server.host}:{settings.token_server.port}"
|
107
|
+
_, response = Retry(httpx.post).args(url=f"{base_url}/count_tokens", json={"text": text, "model": model}, timeout=None).retry_times(5).sleep_time(1).run()
|
118
108
|
if response is not None:
|
119
109
|
try:
|
120
110
|
result = response.json()
|
@@ -147,13 +137,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
147
137
|
],
|
148
138
|
}
|
149
139
|
|
150
|
-
_, response = (
|
151
|
-
Retry(httpx.post)
|
152
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
153
|
-
.retry_times(5)
|
154
|
-
.sleep_time(10)
|
155
|
-
.run()
|
156
|
-
)
|
140
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
157
141
|
if response is None:
|
158
142
|
return 1000
|
159
143
|
result = response.json()
|
@@ -174,13 +158,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
174
158
|
{"role": "user", "content": text},
|
175
159
|
],
|
176
160
|
}
|
177
|
-
_, response = (
|
178
|
-
Retry(httpx.post)
|
179
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
180
|
-
.retry_times(5)
|
181
|
-
.sleep_time(10)
|
182
|
-
.run()
|
183
|
-
)
|
161
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
184
162
|
if response is None:
|
185
163
|
return 1000
|
186
164
|
result = response.json()
|
@@ -194,11 +172,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
194
172
|
endpoint_id = endpoint_id["endpoint_id"]
|
195
173
|
endpoint = settings.get_endpoint(endpoint_id)
|
196
174
|
|
197
|
-
api_base = (
|
198
|
-
endpoint.api_base.removesuffix("/openai/")
|
199
|
-
if endpoint.api_base
|
200
|
-
else "https://generativelanguage.googleapis.com/v1beta"
|
201
|
-
)
|
175
|
+
api_base = endpoint.api_base.removesuffix("/openai/") if endpoint.api_base else "https://generativelanguage.googleapis.com/v1beta"
|
202
176
|
base_url = f"{api_base}/models/{backend_setting.id}:countTokens"
|
203
177
|
params = {"key": endpoint.api_key}
|
204
178
|
request_body = {
|
@@ -209,13 +183,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
209
183
|
],
|
210
184
|
},
|
211
185
|
}
|
212
|
-
_, response = (
|
213
|
-
Retry(httpx.post)
|
214
|
-
.args(base_url, json=request_body, params=params, timeout=None)
|
215
|
-
.retry_times(5)
|
216
|
-
.sleep_time(10)
|
217
|
-
.run()
|
218
|
-
)
|
186
|
+
_, response = Retry(httpx.post).args(base_url, json=request_body, params=params, timeout=None).retry_times(5).sleep_time(10).run()
|
219
187
|
if response is None:
|
220
188
|
return 1000
|
221
189
|
result = response.json()
|
@@ -230,12 +198,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
230
198
|
endpoint_id = endpoint_choice
|
231
199
|
endpoint = settings.get_endpoint(endpoint_id)
|
232
200
|
|
233
|
-
if
|
234
|
-
endpoint.is_vertex
|
235
|
-
or endpoint.is_bedrock
|
236
|
-
or endpoint.endpoint_type == "anthropic_vertex"
|
237
|
-
or endpoint.endpoint_type == "anthropic_bedrock"
|
238
|
-
):
|
201
|
+
if endpoint.is_vertex or endpoint.is_bedrock or endpoint.endpoint_type == "anthropic_vertex" or endpoint.endpoint_type == "anthropic_bedrock":
|
239
202
|
continue
|
240
203
|
elif endpoint.endpoint_type in ("default", "anthropic"):
|
241
204
|
return (
|
@@ -277,13 +240,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
277
240
|
{"role": "user", "content": text},
|
278
241
|
],
|
279
242
|
}
|
280
|
-
_, response = (
|
281
|
-
Retry(httpx.post)
|
282
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
283
|
-
.retry_times(5)
|
284
|
-
.sleep_time(10)
|
285
|
-
.run()
|
286
|
-
)
|
243
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
287
244
|
if response is None:
|
288
245
|
return 1000
|
289
246
|
result = response.json()
|
@@ -306,13 +263,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
306
263
|
{"role": "user", "content": text},
|
307
264
|
],
|
308
265
|
}
|
309
|
-
_, response = (
|
310
|
-
Retry(httpx.post)
|
311
|
-
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
312
|
-
.retry_times(5)
|
313
|
-
.sleep_time(10)
|
314
|
-
.run()
|
315
|
-
)
|
266
|
+
_, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
|
316
267
|
if response is None:
|
317
268
|
return 1000
|
318
269
|
result = response.json()
|
@@ -453,48 +404,52 @@ def cutoff_messages(
|
|
453
404
|
return system_message + messages
|
454
405
|
|
455
406
|
|
456
|
-
def format_image_message(image: str, backend: BackendType = BackendType.OpenAI) -> dict:
|
457
|
-
|
407
|
+
def format_image_message(image: str, backend: BackendType = BackendType.OpenAI, process_image: bool = True) -> dict:
|
408
|
+
if process_image:
|
409
|
+
from ..utilities.media_processing import ImageProcessor
|
458
410
|
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
411
|
+
image_processor = ImageProcessor(image_source=image)
|
412
|
+
if backend == BackendType.OpenAI:
|
413
|
+
return {
|
414
|
+
"type": "image_url",
|
415
|
+
"image_url": {"url": image_processor.data_url},
|
416
|
+
}
|
417
|
+
elif backend == BackendType.Anthropic:
|
418
|
+
return {
|
419
|
+
"type": "image",
|
420
|
+
"source": {
|
421
|
+
"type": "base64",
|
422
|
+
"media_type": image_processor.mime_type,
|
423
|
+
"data": image_processor.base64_image,
|
424
|
+
},
|
425
|
+
}
|
426
|
+
else:
|
427
|
+
return {
|
428
|
+
"type": "image_url",
|
429
|
+
"image_url": {"url": image_processor.data_url},
|
430
|
+
}
|
474
431
|
else:
|
475
|
-
|
476
|
-
"type": "
|
477
|
-
|
478
|
-
|
432
|
+
if backend == BackendType.Anthropic:
|
433
|
+
return {"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image}}
|
434
|
+
else:
|
435
|
+
return {"type": "image_url", "image_url": {"url": image}}
|
479
436
|
|
480
437
|
|
481
438
|
def format_workflow_messages(message: dict, content: str, backend: BackendType):
|
482
439
|
formatted_messages = []
|
483
440
|
|
484
441
|
# 工具调用消息
|
485
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini):
|
442
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini, BackendType.DeepSeek):
|
486
443
|
tool_call_message = {
|
487
|
-
"content":
|
444
|
+
"content": content,
|
488
445
|
"role": "assistant",
|
489
446
|
"tool_calls": [
|
490
447
|
{
|
491
|
-
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
448
|
+
"id": message["metadata"]["selected_workflow"]["tool_call_id"] or message["metadata"]["record_id"],
|
492
449
|
"type": "function",
|
493
450
|
"function": {
|
494
451
|
"name": message["metadata"]["selected_workflow"]["function_name"],
|
495
|
-
"arguments": json.dumps(
|
496
|
-
message["metadata"]["selected_workflow"]["params"], ensure_ascii=False
|
497
|
-
),
|
452
|
+
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"], ensure_ascii=False),
|
498
453
|
},
|
499
454
|
}
|
500
455
|
],
|
@@ -527,10 +482,10 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
|
|
527
482
|
formatted_messages.append(tool_call_message)
|
528
483
|
|
529
484
|
# 工具调用结果消息
|
530
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini):
|
485
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini, BackendType.DeepSeek):
|
531
486
|
tool_call_result_message = {
|
532
487
|
"role": "tool",
|
533
|
-
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
488
|
+
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"] or message["metadata"]["record_id"],
|
534
489
|
"name": message["metadata"]["selected_workflow"]["function_name"],
|
535
490
|
"content": message["metadata"].get("workflow_result", ""),
|
536
491
|
}
|
@@ -558,8 +513,8 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
|
|
558
513
|
}
|
559
514
|
formatted_messages.append(tool_call_result_message)
|
560
515
|
|
561
|
-
if content and backend not in (BackendType.Mistral, BackendType.Anthropic):
|
562
|
-
|
516
|
+
# if content and backend not in (BackendType.Mistral, BackendType.Anthropic):
|
517
|
+
# formatted_messages.append({"role": "assistant", "content": content})
|
563
518
|
|
564
519
|
return formatted_messages
|
565
520
|
|
@@ -569,6 +524,7 @@ def transform_from_openai_message(
|
|
569
524
|
backend: BackendType,
|
570
525
|
native_multimodal: bool = False,
|
571
526
|
function_call_available: bool = False,
|
527
|
+
process_image: bool = True,
|
572
528
|
):
|
573
529
|
role = message.get("role", "user")
|
574
530
|
content = message.get("content", "")
|
@@ -593,7 +549,7 @@ def transform_from_openai_message(
|
|
593
549
|
formatted_content.append({"type": "text", "text": item})
|
594
550
|
elif isinstance(item, dict) and "type" in item:
|
595
551
|
if item["type"] == "image_url":
|
596
|
-
formatted_content.append(format_image_message(item["image_url"]["url"], backend))
|
552
|
+
formatted_content.append(format_image_message(item["image_url"]["url"], backend, process_image))
|
597
553
|
else:
|
598
554
|
formatted_content.append(item)
|
599
555
|
if tool_calls:
|
@@ -674,6 +630,7 @@ def format_messages(
|
|
674
630
|
backend: BackendType = BackendType.OpenAI,
|
675
631
|
native_multimodal: bool = False,
|
676
632
|
function_call_available: bool = False,
|
633
|
+
process_image: bool = True,
|
677
634
|
) -> list:
|
678
635
|
"""将 VectorVein 和 OpenAI 的 Message 序列化后的格式转换为不同模型支持的格式
|
679
636
|
|
@@ -681,6 +638,8 @@ def format_messages(
|
|
681
638
|
messages (list): VectorVein Or OpenAI messages list.
|
682
639
|
backend (str, optional): Messages format target backend. Defaults to BackendType.OpenAI.
|
683
640
|
native_multimodal (bool, optional): Use native multimodal ability. Defaults to False.
|
641
|
+
function_call_available (bool, optional): Use function call ability. Defaults to False.
|
642
|
+
process_image (bool, optional): Process image. Defaults to True.
|
684
643
|
|
685
644
|
Returns:
|
686
645
|
list: 转换后的消息列表
|
@@ -697,17 +656,13 @@ def format_messages(
|
|
697
656
|
content = message["content"]["text"]
|
698
657
|
if message["content_type"] == "TXT":
|
699
658
|
role = "user" if message["author_type"] == "U" else "assistant"
|
700
|
-
formatted_message = format_text_message(
|
701
|
-
content, role, message.get("attachments", []), backend, native_multimodal
|
702
|
-
)
|
659
|
+
formatted_message = format_text_message(content, role, message.get("attachments", []), backend, native_multimodal, process_image)
|
703
660
|
formatted_messages.append(formatted_message)
|
704
661
|
elif message["content_type"] == "WKF" and message["status"] in ("S", "R"):
|
705
662
|
formatted_messages.extend(format_workflow_messages(message, content, backend))
|
706
663
|
else:
|
707
664
|
# 处理 OpenAI 格式的消息
|
708
|
-
formatted_message = transform_from_openai_message(
|
709
|
-
message, backend, native_multimodal, function_call_available
|
710
|
-
)
|
665
|
+
formatted_message = transform_from_openai_message(message, backend, native_multimodal, function_call_available, process_image)
|
711
666
|
formatted_messages.append(formatted_message)
|
712
667
|
|
713
668
|
return formatted_messages
|
@@ -719,6 +674,7 @@ def format_text_message(
|
|
719
674
|
attachments: list,
|
720
675
|
backend: BackendType,
|
721
676
|
native_multimodal: bool,
|
677
|
+
process_image: bool = True,
|
722
678
|
):
|
723
679
|
images_extensions = ("jpg", "jpeg", "png", "bmp")
|
724
680
|
has_images = any(attachment.lower().endswith(images_extensions) for attachment in attachments)
|
@@ -733,7 +689,7 @@ def format_text_message(
|
|
733
689
|
"content": [
|
734
690
|
{"type": "text", "text": content},
|
735
691
|
*[
|
736
|
-
format_image_message(image=attachment, backend=backend)
|
692
|
+
format_image_message(image=attachment, backend=backend, process_image=process_image)
|
737
693
|
for attachment in attachments
|
738
694
|
if attachment.lower().endswith(images_extensions)
|
739
695
|
],
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vectorvein-0.2.86 → vectorvein-0.2.87}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|