vectorvein 0.1.56__py3-none-any.whl → 0.1.59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -249,6 +249,8 @@ class AnthropicChatClient(BaseChatClient):
249
249
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
250
250
  response_format: dict | None = None,
251
251
  stream_options: ChatCompletionStreamOptionsParam | None = None,
252
+ top_p: float | NotGiven | None = NOT_GIVEN,
253
+ skip_cutoff: bool = False,
252
254
  **kwargs,
253
255
  ) -> ChatCompletionMessage:
254
256
  pass
@@ -265,6 +267,8 @@ class AnthropicChatClient(BaseChatClient):
265
267
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
266
268
  response_format: dict | None = None,
267
269
  stream_options: ChatCompletionStreamOptionsParam | None = None,
270
+ top_p: float | NotGiven | None = NOT_GIVEN,
271
+ skip_cutoff: bool = False,
268
272
  **kwargs,
269
273
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
270
274
  pass
@@ -281,6 +285,8 @@ class AnthropicChatClient(BaseChatClient):
281
285
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
282
286
  response_format: dict | None = None,
283
287
  stream_options: ChatCompletionStreamOptionsParam | None = None,
288
+ top_p: float | NotGiven | None = NOT_GIVEN,
289
+ skip_cutoff: bool = False,
284
290
  **kwargs,
285
291
  ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
286
292
  pass
@@ -296,6 +302,8 @@ class AnthropicChatClient(BaseChatClient):
296
302
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
297
303
  response_format: dict | None = None,
298
304
  stream_options: ChatCompletionStreamOptionsParam | None = None,
305
+ top_p: float | NotGiven | None = NOT_GIVEN,
306
+ skip_cutoff: bool = False,
299
307
  **kwargs,
300
308
  ):
301
309
  if model is not None:
@@ -304,10 +312,6 @@ class AnthropicChatClient(BaseChatClient):
304
312
  self.stream = stream
305
313
  if temperature is not None:
306
314
  self.temperature = temperature
307
- if isinstance(tools, OpenAINotGiven):
308
- tools = NOT_GIVEN
309
- if isinstance(tool_choice, OpenAINotGiven):
310
- tool_choice = NOT_GIVEN
311
315
 
312
316
  if self.random_endpoint:
313
317
  self.random_endpoint = True
@@ -348,6 +352,8 @@ class AnthropicChatClient(BaseChatClient):
348
352
  tool_choice=_tool_choice,
349
353
  response_format=response_format,
350
354
  stream_options=stream_options,
355
+ top_p=top_p,
356
+ skip_cutoff=skip_cutoff,
351
357
  **kwargs,
352
358
  )
353
359
  for chunk in response:
@@ -373,12 +379,20 @@ class AnthropicChatClient(BaseChatClient):
373
379
  tools=_tools,
374
380
  tool_choice=_tool_choice,
375
381
  response_format=response_format,
376
- stream_options=stream_options,
382
+ top_p=top_p,
383
+ skip_cutoff=skip_cutoff,
377
384
  **kwargs,
378
385
  )
379
386
 
380
387
  assert isinstance(self.raw_client, Anthropic | AnthropicVertex)
381
388
 
389
+ if isinstance(tools, OpenAINotGiven):
390
+ tools = NOT_GIVEN
391
+ if isinstance(tool_choice, OpenAINotGiven):
392
+ tool_choice = NOT_GIVEN
393
+ if isinstance(top_p, OpenAINotGiven) or top_p is None:
394
+ top_p = NOT_GIVEN
395
+
382
396
  raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
383
397
  self.model_setting = self.backend_settings.models[self.model]
384
398
  if self.model_id is None:
@@ -390,7 +404,7 @@ class AnthropicChatClient(BaseChatClient):
390
404
  else:
391
405
  system_prompt = ""
392
406
 
393
- if self.context_length_control == ContextLengthControlType.Latest:
407
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
394
408
  messages = cutoff_messages(
395
409
  messages,
396
410
  max_count=self.model_setting.context_length,
@@ -424,6 +438,7 @@ class AnthropicChatClient(BaseChatClient):
424
438
  max_tokens=max_tokens,
425
439
  tools=tools_params,
426
440
  tool_choice=tool_choice_param,
441
+ top_p=top_p,
427
442
  **kwargs,
428
443
  )
429
444
 
@@ -486,6 +501,7 @@ class AnthropicChatClient(BaseChatClient):
486
501
  max_tokens=max_tokens,
487
502
  tools=tools_params,
488
503
  tool_choice=tool_choice_param,
504
+ top_p=top_p,
489
505
  **kwargs,
490
506
  )
491
507
 
@@ -614,6 +630,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
614
630
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
615
631
  response_format: dict | None = None,
616
632
  stream_options: ChatCompletionStreamOptionsParam | None = None,
633
+ top_p: float | NotGiven | None = NOT_GIVEN,
634
+ skip_cutoff: bool = False,
617
635
  **kwargs,
618
636
  ) -> ChatCompletionMessage:
619
637
  pass
@@ -630,6 +648,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
630
648
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
631
649
  response_format: dict | None = None,
632
650
  stream_options: ChatCompletionStreamOptionsParam | None = None,
651
+ top_p: float | NotGiven | None = NOT_GIVEN,
652
+ skip_cutoff: bool = False,
633
653
  **kwargs,
634
654
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
635
655
  pass
@@ -646,6 +666,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
646
666
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
647
667
  response_format: dict | None = None,
648
668
  stream_options: ChatCompletionStreamOptionsParam | None = None,
669
+ top_p: float | NotGiven | None = NOT_GIVEN,
670
+ skip_cutoff: bool = False,
649
671
  **kwargs,
650
672
  ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
651
673
  pass
@@ -661,6 +683,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
661
683
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
662
684
  response_format: dict | None = None,
663
685
  stream_options: ChatCompletionStreamOptionsParam | None = None,
686
+ top_p: float | NotGiven | None = NOT_GIVEN,
687
+ skip_cutoff: bool = False,
664
688
  **kwargs,
665
689
  ):
666
690
  if model is not None:
@@ -669,10 +693,6 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
669
693
  self.stream = stream
670
694
  if temperature is not None:
671
695
  self.temperature = temperature
672
- if isinstance(tools, OpenAINotGiven):
673
- tools = NOT_GIVEN
674
- if isinstance(tool_choice, OpenAINotGiven):
675
- tool_choice = NOT_GIVEN
676
696
 
677
697
  if self.random_endpoint:
678
698
  self.random_endpoint = True
@@ -714,6 +734,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
714
734
  tool_choice=_tool_choice,
715
735
  response_format=response_format,
716
736
  stream_options=stream_options,
737
+ top_p=top_p,
738
+ skip_cutoff=skip_cutoff,
717
739
  **kwargs,
718
740
  )
719
741
  async for chunk in response:
@@ -740,11 +762,20 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
740
762
  tools=_tools,
741
763
  tool_choice=_tool_choice,
742
764
  response_format=response_format,
765
+ top_p=top_p,
766
+ skip_cutoff=skip_cutoff,
743
767
  **kwargs,
744
768
  )
745
769
 
746
770
  assert isinstance(self.raw_client, AsyncAnthropic | AsyncAnthropicVertex)
747
771
 
772
+ if isinstance(tools, OpenAINotGiven):
773
+ tools = NOT_GIVEN
774
+ if isinstance(tool_choice, OpenAINotGiven):
775
+ tool_choice = NOT_GIVEN
776
+ if isinstance(top_p, OpenAINotGiven) or top_p is None:
777
+ top_p = NOT_GIVEN
778
+
748
779
  raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
749
780
  self.model_setting = self.backend_settings.models[self.model]
750
781
  if self.model_id is None:
@@ -756,7 +787,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
756
787
  else:
757
788
  system_prompt = ""
758
789
 
759
- if self.context_length_control == ContextLengthControlType.Latest:
790
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
760
791
  messages = cutoff_messages(
761
792
  messages,
762
793
  max_count=self.model_setting.context_length,
@@ -790,6 +821,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
790
821
  max_tokens=max_tokens,
791
822
  tools=tools_params,
792
823
  tool_choice=tool_choice_param,
824
+ top_p=top_p,
793
825
  **kwargs,
794
826
  )
795
827
 
@@ -852,6 +884,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
852
884
  max_tokens=max_tokens,
853
885
  tools=tools_params,
854
886
  tool_choice=tool_choice_param,
887
+ top_p=top_p,
855
888
  **kwargs,
856
889
  )
857
890
 
@@ -73,6 +73,8 @@ class BaseChatClient(ABC):
73
73
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
74
74
  response_format: dict | None = None,
75
75
  stream_options: ChatCompletionStreamOptionsParam | None = None,
76
+ top_p: float | NotGiven | None = NOT_GIVEN,
77
+ skip_cutoff: bool = False,
76
78
  **kwargs,
77
79
  ) -> ChatCompletionMessage:
78
80
  pass
@@ -90,6 +92,8 @@ class BaseChatClient(ABC):
90
92
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
91
93
  response_format: dict | None = None,
92
94
  stream_options: ChatCompletionStreamOptionsParam | None = None,
95
+ top_p: float | NotGiven | None = NOT_GIVEN,
96
+ skip_cutoff: bool = False,
93
97
  **kwargs,
94
98
  ) -> Generator[ChatCompletionDeltaMessage, Any, None]:
95
99
  pass
@@ -107,6 +111,8 @@ class BaseChatClient(ABC):
107
111
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
108
112
  response_format: dict | None = None,
109
113
  stream_options: ChatCompletionStreamOptionsParam | None = None,
114
+ top_p: float | NotGiven | None = NOT_GIVEN,
115
+ skip_cutoff: bool = False,
110
116
  **kwargs,
111
117
  ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
112
118
  pass
@@ -123,6 +129,8 @@ class BaseChatClient(ABC):
123
129
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
124
130
  response_format: dict | None = None,
125
131
  stream_options: ChatCompletionStreamOptionsParam | None = None,
132
+ top_p: float | NotGiven | None = NOT_GIVEN,
133
+ skip_cutoff: bool = False,
126
134
  **kwargs,
127
135
  ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
128
136
  pass
@@ -137,6 +145,7 @@ class BaseChatClient(ABC):
137
145
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
138
146
  response_format: dict | None = None,
139
147
  stream_options: ChatCompletionStreamOptionsParam | None = None,
148
+ top_p: float | NotGiven | None = NOT_GIVEN,
140
149
  **kwargs,
141
150
  ) -> Generator[ChatCompletionDeltaMessage, Any, None]:
142
151
  return self.create_completion(
@@ -206,6 +215,8 @@ class BaseAsyncChatClient(ABC):
206
215
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
207
216
  response_format: dict | None = None,
208
217
  stream_options: ChatCompletionStreamOptionsParam | None = None,
218
+ top_p: float | NotGiven | None = NOT_GIVEN,
219
+ skip_cutoff: bool = False,
209
220
  **kwargs,
210
221
  ) -> ChatCompletionMessage:
211
222
  pass
@@ -223,6 +234,8 @@ class BaseAsyncChatClient(ABC):
223
234
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
224
235
  response_format: dict | None = None,
225
236
  stream_options: ChatCompletionStreamOptionsParam | None = None,
237
+ top_p: float | NotGiven | None = NOT_GIVEN,
238
+ skip_cutoff: bool = False,
226
239
  **kwargs,
227
240
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, None]:
228
241
  pass
@@ -240,6 +253,8 @@ class BaseAsyncChatClient(ABC):
240
253
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
241
254
  response_format: dict | None = None,
242
255
  stream_options: ChatCompletionStreamOptionsParam | None = None,
256
+ top_p: float | NotGiven | None = NOT_GIVEN,
257
+ skip_cutoff: bool = False,
243
258
  **kwargs,
244
259
  ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, None]:
245
260
  pass
@@ -256,6 +271,8 @@ class BaseAsyncChatClient(ABC):
256
271
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
257
272
  response_format: dict | None = None,
258
273
  stream_options: ChatCompletionStreamOptionsParam | None = None,
274
+ top_p: float | NotGiven | None = NOT_GIVEN,
275
+ skip_cutoff: bool = False,
259
276
  **kwargs,
260
277
  ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, None]:
261
278
  pass
@@ -270,6 +287,7 @@ class BaseAsyncChatClient(ABC):
270
287
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
271
288
  response_format: dict | None = None,
272
289
  stream_options: ChatCompletionStreamOptionsParam | None = None,
290
+ top_p: float | NotGiven | None = NOT_GIVEN,
273
291
  **kwargs,
274
292
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, None]:
275
293
  return await self.create_completion(
@@ -65,6 +65,8 @@ class GeminiChatClient(BaseChatClient):
65
65
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
66
66
  response_format: dict | None = None,
67
67
  stream_options: ChatCompletionStreamOptionsParam | None = None,
68
+ top_p: float | NotGiven | None = NOT_GIVEN,
69
+ skip_cutoff: bool = False,
68
70
  **kwargs,
69
71
  ) -> ChatCompletionMessage:
70
72
  pass
@@ -81,6 +83,8 @@ class GeminiChatClient(BaseChatClient):
81
83
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
82
84
  response_format: dict | None = None,
83
85
  stream_options: ChatCompletionStreamOptionsParam | None = None,
86
+ top_p: float | NotGiven | None = NOT_GIVEN,
87
+ skip_cutoff: bool = False,
84
88
  **kwargs,
85
89
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
86
90
  pass
@@ -97,6 +101,8 @@ class GeminiChatClient(BaseChatClient):
97
101
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
98
102
  response_format: dict | None = None,
99
103
  stream_options: ChatCompletionStreamOptionsParam | None = None,
104
+ top_p: float | NotGiven | None = NOT_GIVEN,
105
+ skip_cutoff: bool = False,
100
106
  **kwargs,
101
107
  ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
102
108
  pass
@@ -112,6 +118,8 @@ class GeminiChatClient(BaseChatClient):
112
118
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
113
119
  response_format: dict | None = None,
114
120
  stream_options: ChatCompletionStreamOptionsParam | None = None,
121
+ top_p: float | NotGiven | None = NOT_GIVEN,
122
+ skip_cutoff: bool = False,
115
123
  **kwargs,
116
124
  ):
117
125
  if model is not None:
@@ -129,7 +137,7 @@ class GeminiChatClient(BaseChatClient):
129
137
  else:
130
138
  system_prompt = ""
131
139
 
132
- if self.context_length_control == ContextLengthControlType.Latest:
140
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
133
141
  messages = cutoff_messages(
134
142
  messages,
135
143
  max_count=self.model_setting.context_length,
@@ -137,16 +145,19 @@ class GeminiChatClient(BaseChatClient):
137
145
  model=self.model_setting.id,
138
146
  )
139
147
 
148
+ tools_params = {}
140
149
  if tools:
141
150
  tools_params = {"tools": [{"function_declarations": [tool["function"] for tool in tools]}]}
142
- else:
143
- tools_params = {}
144
151
 
145
152
  response_format_params = {}
146
153
  if response_format is not None:
147
154
  if response_format.get("type") == "json_object":
148
155
  response_format_params = {"response_mime_type": "application/json"}
149
156
 
157
+ top_p_params = {}
158
+ if top_p:
159
+ top_p_params = {"top_p": top_p}
160
+
150
161
  if self.random_endpoint:
151
162
  self.random_endpoint = True
152
163
  endpoint_choice = random.choice(self.backend_settings.models[self.model].endpoints)
@@ -168,6 +179,7 @@ class GeminiChatClient(BaseChatClient):
168
179
  "generationConfig": {
169
180
  "temperature": self.temperature,
170
181
  "maxOutputTokens": max_tokens,
182
+ **top_p_params,
171
183
  **response_format_params,
172
184
  },
173
185
  **tools_params,
@@ -189,7 +201,7 @@ class GeminiChatClient(BaseChatClient):
189
201
  if self.http_client:
190
202
  client = self.http_client
191
203
  else:
192
- client = httpx.Client()
204
+ client = httpx.Client(timeout=300)
193
205
  with client.stream("POST", url, headers=headers, params=params, json=request_body) as response:
194
206
  for chunk in response.iter_lines():
195
207
  message = {"content": "", "tool_calls": []}
@@ -228,8 +240,10 @@ class GeminiChatClient(BaseChatClient):
228
240
  if self.http_client:
229
241
  client = self.http_client
230
242
  else:
231
- client = httpx.Client()
243
+ client = httpx.Client(timeout=300)
232
244
  response = client.post(url, json=request_body, headers=headers, params=params, timeout=None).json()
245
+ if "error" in response:
246
+ raise Exception(response["error"])
233
247
  result = {
234
248
  "content": "",
235
249
  "usage": {
@@ -302,6 +316,8 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
302
316
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
303
317
  response_format: dict | None = None,
304
318
  stream_options: ChatCompletionStreamOptionsParam | None = None,
319
+ top_p: float | NotGiven | None = NOT_GIVEN,
320
+ skip_cutoff: bool = False,
305
321
  **kwargs,
306
322
  ) -> ChatCompletionMessage:
307
323
  pass
@@ -318,6 +334,8 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
318
334
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
319
335
  response_format: dict | None = None,
320
336
  stream_options: ChatCompletionStreamOptionsParam | None = None,
337
+ top_p: float | NotGiven | None = NOT_GIVEN,
338
+ skip_cutoff: bool = False,
321
339
  **kwargs,
322
340
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
323
341
  pass
@@ -334,6 +352,8 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
334
352
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
335
353
  response_format: dict | None = None,
336
354
  stream_options: ChatCompletionStreamOptionsParam | None = None,
355
+ top_p: float | NotGiven | None = NOT_GIVEN,
356
+ skip_cutoff: bool = False,
337
357
  **kwargs,
338
358
  ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
339
359
  pass
@@ -349,6 +369,8 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
349
369
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
350
370
  response_format: dict | None = None,
351
371
  stream_options: ChatCompletionStreamOptionsParam | None = None,
372
+ top_p: float | NotGiven | None = NOT_GIVEN,
373
+ skip_cutoff: bool = False,
352
374
  **kwargs,
353
375
  ):
354
376
  if model is not None:
@@ -366,7 +388,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
366
388
  else:
367
389
  system_prompt = ""
368
390
 
369
- if self.context_length_control == ContextLengthControlType.Latest:
391
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
370
392
  messages = cutoff_messages(
371
393
  messages,
372
394
  max_count=self.model_setting.context_length,
@@ -374,16 +396,19 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
374
396
  model=self.model_setting.id,
375
397
  )
376
398
 
399
+ tools_params = {}
377
400
  if tools:
378
401
  tools_params = {"tools": [{"function_declarations": [tool["function"] for tool in tools]}]}
379
- else:
380
- tools_params = {}
381
402
 
382
403
  response_format_params = {}
383
404
  if response_format is not None:
384
405
  if response_format.get("type") == "json_object":
385
406
  response_format_params = {"response_mime_type": "application/json"}
386
407
 
408
+ top_p_params = {}
409
+ if top_p:
410
+ top_p_params = {"top_p": top_p}
411
+
387
412
  if self.random_endpoint:
388
413
  self.random_endpoint = True
389
414
  endpoint_choice = random.choice(self.backend_settings.models[self.model].endpoints)
@@ -405,6 +430,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
405
430
  "generationConfig": {
406
431
  "temperature": self.temperature,
407
432
  "maxOutputTokens": max_tokens,
433
+ **top_p_params,
408
434
  **response_format_params,
409
435
  },
410
436
  **tools_params,
@@ -426,7 +452,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
426
452
  if self.http_client:
427
453
  client = self.http_client
428
454
  else:
429
- client = httpx.AsyncClient()
455
+ client = httpx.AsyncClient(timeout=300)
430
456
  async with client.stream("POST", url, headers=headers, params=params, json=request_body) as response:
431
457
  async for chunk in response.aiter_lines():
432
458
  message = {"content": "", "tool_calls": []}
@@ -465,10 +491,12 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
465
491
  if self.http_client:
466
492
  client = self.http_client
467
493
  else:
468
- client = httpx.AsyncClient()
494
+ client = httpx.AsyncClient(timeout=300)
469
495
  async with client:
470
496
  response = await client.post(url, json=request_body, headers=headers, params=params, timeout=None)
471
497
  response = response.json()
498
+ if "error" in response:
499
+ raise Exception(response["error"])
472
500
  result = {
473
501
  "content": "",
474
502
  "usage": {
@@ -91,6 +91,8 @@ class MiniMaxChatClient(BaseChatClient):
91
91
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
92
92
  response_format: dict | None = None,
93
93
  stream_options: ChatCompletionStreamOptionsParam | None = None,
94
+ top_p: float | NotGiven | None = NOT_GIVEN,
95
+ skip_cutoff: bool = False,
94
96
  **kwargs,
95
97
  ) -> ChatCompletionMessage:
96
98
  pass
@@ -107,6 +109,8 @@ class MiniMaxChatClient(BaseChatClient):
107
109
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
108
110
  response_format: dict | None = None,
109
111
  stream_options: ChatCompletionStreamOptionsParam | None = None,
112
+ top_p: float | NotGiven | None = NOT_GIVEN,
113
+ skip_cutoff: bool = False,
110
114
  **kwargs,
111
115
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
112
116
  pass
@@ -123,6 +127,8 @@ class MiniMaxChatClient(BaseChatClient):
123
127
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
124
128
  response_format: dict | None = None,
125
129
  stream_options: ChatCompletionStreamOptionsParam | None = None,
130
+ top_p: float | NotGiven | None = NOT_GIVEN,
131
+ skip_cutoff: bool = False,
126
132
  **kwargs,
127
133
  ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
128
134
  pass
@@ -138,6 +144,8 @@ class MiniMaxChatClient(BaseChatClient):
138
144
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
139
145
  response_format: dict | None = None,
140
146
  stream_options: ChatCompletionStreamOptionsParam | None = None,
147
+ top_p: float | NotGiven | None = NOT_GIVEN,
148
+ skip_cutoff: bool = False,
141
149
  **kwargs,
142
150
  ):
143
151
  if model is not None:
@@ -162,7 +170,7 @@ class MiniMaxChatClient(BaseChatClient):
162
170
  self.endpoint_id = endpoint_choice
163
171
  self.endpoint = settings.get_endpoint(self.endpoint_id)
164
172
 
165
- if self.context_length_control == ContextLengthControlType.Latest:
173
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
166
174
  messages = cutoff_messages(
167
175
  messages,
168
176
  max_count=self.model_setting.context_length,
@@ -190,6 +198,11 @@ class MiniMaxChatClient(BaseChatClient):
190
198
  else:
191
199
  tools_params = {}
192
200
 
201
+ if top_p:
202
+ top_p_params = {"top_p": top_p}
203
+ else:
204
+ top_p_params = {}
205
+
193
206
  if max_tokens is None:
194
207
  max_output_tokens = self.model_setting.max_output_tokens
195
208
  if max_output_tokens is not None:
@@ -218,6 +231,7 @@ class MiniMaxChatClient(BaseChatClient):
218
231
  "temperature": self.temperature,
219
232
  "stream": self.stream,
220
233
  "mask_sensitive_info": False,
234
+ **top_p_params,
221
235
  **tools_params,
222
236
  **kwargs,
223
237
  }
@@ -326,6 +340,8 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
326
340
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
327
341
  response_format: dict | None = None,
328
342
  stream_options: ChatCompletionStreamOptionsParam | None = None,
343
+ top_p: float | NotGiven | None = NOT_GIVEN,
344
+ skip_cutoff: bool = False,
329
345
  **kwargs,
330
346
  ) -> ChatCompletionMessage:
331
347
  pass
@@ -342,6 +358,8 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
342
358
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
343
359
  response_format: dict | None = None,
344
360
  stream_options: ChatCompletionStreamOptionsParam | None = None,
361
+ top_p: float | NotGiven | None = NOT_GIVEN,
362
+ skip_cutoff: bool = False,
345
363
  **kwargs,
346
364
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
347
365
  pass
@@ -358,6 +376,8 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
358
376
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
359
377
  response_format: dict | None = None,
360
378
  stream_options: ChatCompletionStreamOptionsParam | None = None,
379
+ top_p: float | NotGiven | None = NOT_GIVEN,
380
+ skip_cutoff: bool = False,
361
381
  **kwargs,
362
382
  ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
363
383
  pass
@@ -373,6 +393,8 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
373
393
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
374
394
  response_format: dict | None = None,
375
395
  stream_options: ChatCompletionStreamOptionsParam | None = None,
396
+ top_p: float | NotGiven | None = NOT_GIVEN,
397
+ skip_cutoff: bool = False,
376
398
  **kwargs,
377
399
  ):
378
400
  if model is not None:
@@ -397,7 +419,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
397
419
  self.endpoint_id = endpoint_choice
398
420
  self.endpoint = settings.get_endpoint(self.endpoint_id)
399
421
 
400
- if self.context_length_control == ContextLengthControlType.Latest:
422
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
401
423
  messages = cutoff_messages(
402
424
  messages,
403
425
  max_count=self.model_setting.context_length,
@@ -423,6 +445,11 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
423
445
  else:
424
446
  tools_params = {}
425
447
 
448
+ if top_p:
449
+ top_p_params = {"top_p": top_p}
450
+ else:
451
+ top_p_params = {}
452
+
426
453
  if max_tokens is None:
427
454
  max_output_tokens = self.model_setting.max_output_tokens
428
455
  if max_output_tokens is not None:
@@ -451,6 +478,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
451
478
  "temperature": self.temperature,
452
479
  "stream": self.stream,
453
480
  "mask_sensitive_info": False,
481
+ **top_p_params,
454
482
  **tools_params,
455
483
  **kwargs,
456
484
  }
@@ -24,6 +24,7 @@ from ..types.llm_parameters import (
24
24
  ToolParam,
25
25
  ToolChoice,
26
26
  OpenAINotGiven,
27
+ AnthropicNotGiven,
27
28
  Usage,
28
29
  ChatCompletionMessage,
29
30
  ChatCompletionDeltaMessage,
@@ -97,6 +98,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
97
98
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
98
99
  response_format: dict | None = None,
99
100
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
101
+ top_p: float | NotGiven | None = NOT_GIVEN,
102
+ skip_cutoff: bool = False,
100
103
  **kwargs,
101
104
  ) -> ChatCompletionMessage:
102
105
  pass
@@ -113,6 +116,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
113
116
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
114
117
  response_format: dict | None = None,
115
118
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
119
+ top_p: float | NotGiven | None = NOT_GIVEN,
120
+ skip_cutoff: bool = False,
116
121
  **kwargs,
117
122
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
118
123
  pass
@@ -129,6 +134,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
129
134
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
130
135
  response_format: dict | None = None,
131
136
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
137
+ top_p: float | NotGiven | None = NOT_GIVEN,
138
+ skip_cutoff: bool = False,
132
139
  **kwargs,
133
140
  ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
134
141
  pass
@@ -144,6 +151,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
144
151
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
145
152
  response_format: dict | None = None,
146
153
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
154
+ top_p: float | NotGiven | None = NOT_GIVEN,
155
+ skip_cutoff: bool = False,
147
156
  **kwargs,
148
157
  ):
149
158
  if model is not None:
@@ -152,13 +161,15 @@ class OpenAICompatibleChatClient(BaseChatClient):
152
161
  self.stream = stream
153
162
  if temperature is not None:
154
163
  self.temperature = temperature
164
+ if isinstance(top_p, AnthropicNotGiven):
165
+ top_p = NOT_GIVEN
155
166
 
156
167
  raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
157
168
  self.model_setting = self.backend_settings.models[self.model]
158
169
  if self.model_id is None:
159
170
  self.model_id = self.model_setting.id
160
171
 
161
- if self.context_length_control == ContextLengthControlType.Latest:
172
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
162
173
  messages = cutoff_messages(
163
174
  messages,
164
175
  max_count=self.model_setting.context_length,
@@ -206,6 +217,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
206
217
  stream=True,
207
218
  temperature=self.temperature,
208
219
  max_tokens=max_tokens,
220
+ top_p=top_p,
209
221
  **_stream_options_params,
210
222
  **self.response_format,
211
223
  **tools_params,
@@ -260,6 +272,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
260
272
  stream=False,
261
273
  temperature=self.temperature,
262
274
  max_tokens=max_tokens,
275
+ top_p=top_p,
263
276
  **self.response_format,
264
277
  **tools_params,
265
278
  **kwargs,
@@ -351,6 +364,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
351
364
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
352
365
  response_format: dict | None = None,
353
366
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
367
+ top_p: float | NotGiven | None = NOT_GIVEN,
368
+ skip_cutoff: bool = False,
354
369
  **kwargs,
355
370
  ) -> ChatCompletionMessage:
356
371
  pass
@@ -367,6 +382,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
367
382
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
368
383
  response_format: dict | None = None,
369
384
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
385
+ top_p: float | NotGiven | None = NOT_GIVEN,
386
+ skip_cutoff: bool = False,
370
387
  **kwargs,
371
388
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
372
389
  pass
@@ -383,6 +400,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
383
400
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
384
401
  response_format: dict | None = None,
385
402
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
403
+ top_p: float | NotGiven | None = NOT_GIVEN,
404
+ skip_cutoff: bool = False,
386
405
  **kwargs,
387
406
  ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
388
407
  pass
@@ -398,6 +417,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
398
417
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
399
418
  response_format: dict | None = None,
400
419
  stream_options: ChatCompletionStreamOptionsParam | None | OpenAINotGiven = NOT_GIVEN,
420
+ top_p: float | NotGiven | None = NOT_GIVEN,
421
+ skip_cutoff: bool = False,
401
422
  **kwargs,
402
423
  ):
403
424
  if model is not None:
@@ -406,13 +427,15 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
406
427
  self.stream = stream
407
428
  if temperature is not None:
408
429
  self.temperature = temperature
430
+ if isinstance(top_p, AnthropicNotGiven):
431
+ top_p = NOT_GIVEN
409
432
 
410
433
  raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
411
434
  self.model_setting = self.backend_settings.models[self.model]
412
435
  if self.model_id is None:
413
436
  self.model_id = self.model_setting.id
414
437
 
415
- if self.context_length_control == ContextLengthControlType.Latest:
438
+ if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
416
439
  messages = cutoff_messages(
417
440
  messages,
418
441
  max_count=self.model_setting.context_length,
@@ -460,6 +483,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
460
483
  stream=self.stream,
461
484
  temperature=self.temperature,
462
485
  max_tokens=max_tokens,
486
+ top_p=top_p,
463
487
  **_stream_options_params,
464
488
  **self.response_format,
465
489
  **tools_params,
@@ -515,6 +539,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
515
539
  stream=self.stream,
516
540
  temperature=self.temperature,
517
541
  max_tokens=max_tokens,
542
+ top_p=top_p,
518
543
  **self.response_format,
519
544
  **tools_params,
520
545
  **kwargs,
@@ -201,6 +201,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
201
201
  result = response.json()
202
202
  return result["data"]["total_tokens"]
203
203
  elif model.startswith("gemini"):
204
+ # TODO: gemini-exp-1114 暂时不支持,使用 gemini-1.5-flash 代替
205
+ if model == "gemini-exp-1114":
206
+ model = "gemini-1.5-flash"
207
+
204
208
  model_setting = settings.gemini.models[model]
205
209
  if len(model_setting.endpoints) == 0:
206
210
  return len(get_gpt_35_encoding().encode(text))
@@ -208,13 +212,14 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
208
212
  if isinstance(endpoint_id, dict):
209
213
  endpoint_id = endpoint_id["endpoint_id"]
210
214
  endpoint = settings.get_endpoint(endpoint_id)
215
+
211
216
  base_url = f"{endpoint.api_base}/models/{model_setting.id}:countTokens"
212
217
  params = {"key": endpoint.api_key}
213
218
  request_body = {
214
219
  "contents": {
215
220
  "role": "USER",
216
221
  "parts": [
217
- {"text": "TEXT"},
222
+ {"text": text},
218
223
  ],
219
224
  },
220
225
  }
@@ -609,6 +609,13 @@ MINIMAX_MODELS: Final[Dict[str, Dict[str, Any]]] = {
609
609
  "function_call_available": True,
610
610
  "response_format_available": True,
611
611
  },
612
+ "abab7-preview": {
613
+ "id": "abab7-preview",
614
+ "context_length": 245760,
615
+ "max_output_tokens": 245760,
616
+ "function_call_available": True,
617
+ "response_format_available": True,
618
+ },
612
619
  }
613
620
 
614
621
  # Gemini models
@@ -616,7 +623,8 @@ GEMINI_DEFAULT_MODEL: Final[str] = "gemini-1.5-pro"
616
623
  GEMINI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
617
624
  "gemini-1.5-pro": {
618
625
  "id": "gemini-1.5-pro",
619
- "context_length": 1048576,
626
+ "context_length": 2097152,
627
+ "max_output_tokens": 8192,
620
628
  "function_call_available": True,
621
629
  "response_format_available": True,
622
630
  "native_multimodal": True,
@@ -624,6 +632,22 @@ GEMINI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
624
632
  "gemini-1.5-flash": {
625
633
  "id": "gemini-1.5-flash",
626
634
  "context_length": 1048576,
635
+ "max_output_tokens": 8192,
636
+ "function_call_available": True,
637
+ "response_format_available": True,
638
+ "native_multimodal": True,
639
+ },
640
+ "gemini-exp-1114": {
641
+ "id": "gemini-exp-1114",
642
+ "context_length": 32767,
643
+ "function_call_available": True,
644
+ "response_format_available": True,
645
+ "native_multimodal": True,
646
+ },
647
+ "gemini-1.5-flash-8b": {
648
+ "id": "gemini-1.5-flash-8b",
649
+ "context_length": 1048576,
650
+ "max_output_tokens": 8192,
627
651
  "function_call_available": True,
628
652
  "response_format_available": True,
629
653
  "native_multimodal": True,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.56
3
+ Version: 0.1.59
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,24 +1,24 @@
1
- vectorvein-0.1.56.dist-info/METADATA,sha256=3YTghMyf-ZK9VzwhaqGwf_xuhKmVI5IA7SKI1O73Sww,633
2
- vectorvein-0.1.56.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- vectorvein-0.1.56.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.1.59.dist-info/METADATA,sha256=GCU-0oftBD1QtnzFXvknbCMq1mdXwBiduQT_B6y95eI,633
2
+ vectorvein-0.1.59.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ vectorvein-0.1.59.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  vectorvein/chat_clients/__init__.py,sha256=Oev7Lv1DIEWCMD-2Pm7e2cwzX7JFQTnIK-j6o4iUuyQ,17725
6
- vectorvein/chat_clients/anthropic_client.py,sha256=G68JGM98E0pYyi8Tjvo4VQtnug9ncugFbb4d0DrPVQo,36122
6
+ vectorvein/chat_clients/anthropic_client.py,sha256=phDFgXPV-eNla7ZFPdcZx1fPOWlAFWc3C_mP9VcNkO0,37439
7
7
  vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
8
- vectorvein/chat_clients/base_client.py,sha256=0Uj0e-JR0a68sRS_WfUMVd91Av7lzJh6-DukjutlaD0,9497
8
+ vectorvein/chat_clients/base_client.py,sha256=N1Swm6b9Gos7zLSH-qCSxgnDRCHPmuWZcw_H9zVnGJs,10297
9
9
  vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
10
- vectorvein/chat_clients/gemini_client.py,sha256=DlQI5p8qG5erThMb17ojB2gofzTkwVy3veFC8_Cbpc4,19902
10
+ vectorvein/chat_clients/gemini_client.py,sha256=e7xZdZm0-W2iXy3S-J5b1bO9YqhGxcv0Y5HPYcQnDds,21098
11
11
  vectorvein/chat_clients/groq_client.py,sha256=Uow4pgdmFi93ZQSoOol2-0PhhqkW-S0XuSldvppz5U4,498
12
12
  vectorvein/chat_clients/local_client.py,sha256=55nOsxzqUf79q3Y14MKROA71zxhsT7p7FsDZ89rts2M,422
13
- vectorvein/chat_clients/minimax_client.py,sha256=3HetFZbmgvEzWFaH_Gbj99lsh9DmIOhfpVdbV4HxrSI,19116
13
+ vectorvein/chat_clients/minimax_client.py,sha256=uNYz3ccJOhTAgzkDNvWfM4_LaBaOpHjrne1YNieba28,20122
14
14
  vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
15
15
  vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
16
16
  vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
17
- vectorvein/chat_clients/openai_compatible_client.py,sha256=D2VmhpDVct4w2y58s87An7x22koOdkxSOKR2-so9lJI,22044
17
+ vectorvein/chat_clients/openai_compatible_client.py,sha256=hbSggW5itvTRk3Qb3rejNTt3vd0r6R95irLTjeSzm2g,23102
18
18
  vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
20
20
  vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
21
- vectorvein/chat_clients/utils.py,sha256=UqDV4ljwZRbndZdkyHkIBis1ciRt5gjPFOJ0vnR2Uas,26669
21
+ vectorvein/chat_clients/utils.py,sha256=6rjXUv39XMIEKKmA-vG8HzOHntCVeFvlrirZcduKCIA,26828
22
22
  vectorvein/chat_clients/xai_client.py,sha256=eLFJJrNRJ-ni3DpshODcr3S1EJQLbhVwxyO1E54LaqM,491
23
23
  vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
24
24
  vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
@@ -26,11 +26,11 @@ vectorvein/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  vectorvein/server/token_server.py,sha256=36F9PKSNOX8ZtYBXY_l-76GQTpUSmQ2Y8EMy1H7wtdQ,1353
27
27
  vectorvein/settings/__init__.py,sha256=g01y74x0k2JEAqNpRGG0PDs0NTULjOAZV6HRhydPX1c,3874
28
28
  vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- vectorvein/types/defaults.py,sha256=Kf7kdY2-dbcWR_oQkqoHfBLtr2kg-M_Va3-37TlZU-o,22941
29
+ vectorvein/types/defaults.py,sha256=DC0fJ2MtXYNXiWkDdnpGYXuwCoSOcuB6PseI_y-VDo0,23730
30
30
  vectorvein/types/enums.py,sha256=7KTJSVtQueImmbr1fSwv3rQVtc0RyMWXJmoE2tDOaso,1667
31
31
  vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
32
32
  vectorvein/types/llm_parameters.py,sha256=5o-C_yXxxQWZy_e8OWowB2107GTS-Eawx4Mvb1q55Co,5256
33
33
  vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  vectorvein/utilities/media_processing.py,sha256=CTRq-lGlFkFgP_FSRhNwF_qUgmOrXPf2_1Ok9HY42_g,5887
35
35
  vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
36
- vectorvein-0.1.56.dist-info/RECORD,,
36
+ vectorvein-0.1.59.dist-info/RECORD,,