vectorvein 0.3.17__py3-none-any.whl → 0.3.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,7 +21,6 @@ from ..types.llm_parameters import (
21
21
  NOT_GIVEN,
22
22
  ToolParam,
23
23
  VectorVeinMessage,
24
- VectorVeinTextMessage,
25
24
  VectorVeinWorkflowMessage,
26
25
  )
27
26
 
@@ -159,7 +158,19 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
159
158
  ],
160
159
  }
161
160
 
162
- _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
161
+ _, response = (
162
+ Retry(httpx.post)
163
+ .args(
164
+ url=tokenize_url,
165
+ headers=headers,
166
+ json=request_body,
167
+ timeout=None,
168
+ proxy=endpoint.proxy,
169
+ )
170
+ .retry_times(5)
171
+ .sleep_time(10)
172
+ .run()
173
+ )
163
174
  if response is None:
164
175
  return 1000
165
176
  result = response.json()
@@ -179,7 +190,19 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
179
190
  {"role": "user", "content": text},
180
191
  ],
181
192
  }
182
- _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
193
+ _, response = (
194
+ Retry(httpx.post)
195
+ .args(
196
+ url=tokenize_url,
197
+ headers=headers,
198
+ json=request_body,
199
+ timeout=None,
200
+ proxy=endpoint.proxy,
201
+ )
202
+ .retry_times(5)
203
+ .sleep_time(10)
204
+ .run()
205
+ )
183
206
  if response is None:
184
207
  return 1000
185
208
  result = response.json()
@@ -203,7 +226,19 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
203
226
  ],
204
227
  },
205
228
  }
206
- _, response = Retry(httpx.post).args(base_url, json=request_body, params=params, timeout=None).retry_times(5).sleep_time(10).run()
229
+ _, response = (
230
+ Retry(httpx.post)
231
+ .args(
232
+ base_url,
233
+ json=request_body,
234
+ params=params,
235
+ timeout=None,
236
+ proxy=endpoint.proxy,
237
+ )
238
+ .retry_times(5)
239
+ .sleep_time(10)
240
+ .run()
241
+ )
207
242
  if response is None:
208
243
  return 1000
209
244
  result = response.json()
@@ -227,10 +262,12 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
227
262
  if endpoint.is_vertex or endpoint.is_bedrock or endpoint.endpoint_type == "anthropic_vertex" or endpoint.endpoint_type == "anthropic_bedrock":
228
263
  continue
229
264
  elif endpoint.endpoint_type in ("default", "anthropic"):
265
+ http_client = httpx.Client(proxy=endpoint.proxy)
230
266
  return (
231
267
  Anthropic(
232
268
  api_key=endpoint.api_key,
233
269
  base_url=endpoint.api_base,
270
+ http_client=http_client,
234
271
  )
235
272
  .beta.messages.count_tokens(messages=[{"role": "user", "content": text}], model=model)
236
273
  .input_tokens
@@ -265,7 +302,19 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
265
302
  {"role": "user", "content": text},
266
303
  ],
267
304
  }
268
- _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
305
+ _, response = (
306
+ Retry(httpx.post)
307
+ .args(
308
+ url=tokenize_url,
309
+ headers=headers,
310
+ json=request_body,
311
+ timeout=None,
312
+ proxy=endpoint.proxy,
313
+ )
314
+ .retry_times(5)
315
+ .sleep_time(10)
316
+ .run()
317
+ )
269
318
  if response is None:
270
319
  return 1000
271
320
  result = response.json()
@@ -287,7 +336,19 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
287
336
  {"role": "user", "content": text},
288
337
  ],
289
338
  }
290
- _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
339
+ _, response = (
340
+ Retry(httpx.post)
341
+ .args(
342
+ url=tokenize_url,
343
+ headers=headers,
344
+ json=request_body,
345
+ timeout=None,
346
+ proxy=endpoint.proxy,
347
+ )
348
+ .retry_times(5)
349
+ .sleep_time(10)
350
+ .run()
351
+ )
291
352
  if response is None:
292
353
  return 1000
293
354
  result = response.json()
@@ -578,27 +639,33 @@ def transform_from_openai_message(
578
639
  formatted_content.append(item)
579
640
  if tool_calls:
580
641
  for tool_call in tool_calls:
581
- formatted_content.append(
582
- {
583
- "type": "tool_use",
584
- "id": tool_call["id"],
585
- "name": tool_call["function"]["name"],
586
- "input": json.loads(tool_call["function"]["arguments"]) if tool_call["function"]["arguments"] else {},
587
- }
588
- )
642
+ if isinstance(tool_call, dict) and "function" in tool_call:
643
+ function_info = tool_call["function"]
644
+ if isinstance(function_info, dict):
645
+ formatted_content.append(
646
+ {
647
+ "type": "tool_use",
648
+ "id": tool_call["id"],
649
+ "name": function_info["name"],
650
+ "input": json.loads(function_info["arguments"]) if function_info.get("arguments") else {},
651
+ }
652
+ )
589
653
  return {"role": role, "content": formatted_content}
590
654
  else:
591
655
  if tool_calls:
592
656
  formatted_content = [{"type": "text", "text": content}] if content else []
593
657
  for tool_call in tool_calls:
594
- formatted_content.append(
595
- {
596
- "type": "tool_use",
597
- "id": tool_call["id"],
598
- "name": tool_call["function"]["name"],
599
- "input": json.loads(tool_call["function"]["arguments"]) if tool_call["function"]["arguments"] else {},
600
- }
601
- )
658
+ if isinstance(tool_call, dict) and "function" in tool_call:
659
+ function_info = tool_call["function"]
660
+ if isinstance(function_info, dict):
661
+ formatted_content.append(
662
+ {
663
+ "type": "tool_use",
664
+ "id": tool_call["id"],
665
+ "name": function_info["name"],
666
+ "input": json.loads(function_info["arguments"]) if function_info.get("arguments") else {},
667
+ }
668
+ )
602
669
  return {"role": role, "content": formatted_content}
603
670
  else:
604
671
  return {"role": role, "content": content}
@@ -676,24 +743,30 @@ def format_messages(
676
743
 
677
744
  for message in messages:
678
745
  if is_vectorvein_message(message):
679
- message = cast(VectorVeinMessage, message)
680
- match message["content_type"]:
746
+ # We know this is a VectorVein message, so we can safely cast it
747
+ vectorvein_message = message # type: ignore
748
+ match vectorvein_message.get("content_type"):
681
749
  case "TXT":
682
- message = cast(VectorVeinTextMessage, message)
750
+ # Remove unnecessary cast
751
+ pass
683
752
  case "WKF":
684
- message = cast(VectorVeinWorkflowMessage, message)
753
+ # Remove unnecessary cast
754
+ pass
685
755
  case _:
686
- raise ValueError(f"Unsupported message type: {message['content_type']}")
756
+ content_type = vectorvein_message.get("content_type", "unknown")
757
+ raise ValueError(f"Unsupported message type: {content_type}")
687
758
  # 处理 VectorVein 格式的消息
688
- content = message["content"]["text"]
689
- if message["content_type"] == "TXT":
690
- message = cast(VectorVeinTextMessage, message)
691
- role = "user" if message["author_type"] == "U" else "assistant"
692
- formatted_message = format_text_message(content, role, message.get("attachments", []), backend, native_multimodal, process_image)
759
+ content_dict = vectorvein_message.get("content")
760
+ if isinstance(content_dict, dict) and "text" in content_dict:
761
+ content = content_dict["text"]
762
+ else:
763
+ content = ""
764
+ if vectorvein_message.get("content_type") == "TXT":
765
+ role = "user" if vectorvein_message.get("author_type") == "U" else "assistant"
766
+ formatted_message = format_text_message(content, role, vectorvein_message.get("attachments", []), backend, native_multimodal, process_image)
693
767
  formatted_messages.append(formatted_message)
694
- elif message["content_type"] == "WKF" and message["status"] in ("S", "R"):
695
- message = cast(VectorVeinWorkflowMessage, message)
696
- formatted_messages.extend(format_workflow_messages(message, content, backend))
768
+ elif vectorvein_message.get("content_type") == "WKF" and vectorvein_message.get("status") in ("S", "R"):
769
+ formatted_messages.extend(format_workflow_messages(vectorvein_message, content, backend)) # type: ignore
697
770
  else:
698
771
  # 处理 OpenAI 格式的消息
699
772
  message = cast(ChatCompletionMessageParam, message)
@@ -250,7 +250,7 @@ class Settings(BaseModel):
250
250
  delattr(self, backend_name)
251
251
 
252
252
  # Set version to 2
253
- self.VERSION = "2"
253
+ self.VERSION = "2" # type: ignore
254
254
 
255
255
  return self
256
256
 
@@ -90,6 +90,13 @@ MOONSHOT_MODELS: Final[dict[str, ModelSettingDict]] = {
90
90
  "response_format_available": True,
91
91
  "native_multimodal": False,
92
92
  },
93
+ "kimi-k2-turbo-preview": {
94
+ "id": "kimi-k2-turbo-preview",
95
+ "context_length": 131072,
96
+ "function_call_available": True,
97
+ "response_format_available": True,
98
+ "native_multimodal": False,
99
+ },
93
100
  }
94
101
 
95
102
  # Deepseek models
@@ -729,7 +729,26 @@ class FfmpegProcess(Node):
729
729
  name="input_files",
730
730
  port_type=PortType.FILE,
731
731
  value=[],
732
- support_file_types=[".mp4", ".avi", ".mov", ".mkv", ".mp3", ".wav", ".ogg", ".m4a", ".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp", ".webm", ".flv", ".wmv", ".3gp"],
732
+ support_file_types=[
733
+ ".mp4",
734
+ ".avi",
735
+ ".mov",
736
+ ".mkv",
737
+ ".mp3",
738
+ ".wav",
739
+ ".ogg",
740
+ ".m4a",
741
+ ".jpg",
742
+ ".jpeg",
743
+ ".png",
744
+ ".gif",
745
+ ".bmp",
746
+ ".webp",
747
+ ".webm",
748
+ ".flv",
749
+ ".wmv",
750
+ ".3gp",
751
+ ],
733
752
  multiple=True,
734
753
  required=True,
735
754
  show=True,
@@ -26,7 +26,6 @@ class ScheduleTrigger(Node):
26
26
  category="triggers",
27
27
  task_name="triggers.schedule_trigger",
28
28
  node_id=id,
29
- has_inputs=False,
30
29
  ports={
31
30
  "schedule": InputPort(
32
31
  name="schedule",