vectorvein 0.1.34__py3-none-any.whl → 0.1.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -25,6 +25,7 @@ from ..settings import settings
25
25
  from ..types import defaults as defs
26
26
  from .utils import cutoff_messages, get_message_token_counts
27
27
  from .base_client import BaseChatClient, BaseAsyncChatClient
28
+ from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
28
29
  from ..types.enums import ContextLengthControlType, BackendType
29
30
  from ..types.llm_parameters import (
30
31
  Usage,
@@ -122,7 +123,7 @@ class AnthropicChatClient(BaseChatClient):
122
123
  random_endpoint: bool = True,
123
124
  endpoint_id: str = "",
124
125
  http_client: httpx.Client | None = None,
125
- **kwargs,
126
+ backend_name: str | None = None,
126
127
  ):
127
128
  super().__init__(
128
129
  model,
@@ -132,7 +133,7 @@ class AnthropicChatClient(BaseChatClient):
132
133
  random_endpoint,
133
134
  endpoint_id,
134
135
  http_client,
135
- **kwargs,
136
+ backend_name,
136
137
  )
137
138
 
138
139
  @cached_property
@@ -176,12 +177,23 @@ class AnthropicChatClient(BaseChatClient):
176
177
  access_token=self.creds.token,
177
178
  http_client=self.http_client,
178
179
  )
179
- else:
180
+ elif self.endpoint.api_schema_type == "default":
180
181
  return Anthropic(
181
182
  api_key=self.endpoint.api_key,
182
183
  base_url=self.endpoint.api_base,
183
184
  http_client=self.http_client,
184
185
  )
186
+ elif self.endpoint.api_schema_type == "openai":
187
+ return OpenAICompatibleChatClient(
188
+ model=self.model,
189
+ stream=self.stream,
190
+ temperature=self.temperature,
191
+ context_length_control=self.context_length_control,
192
+ random_endpoint=self.random_endpoint,
193
+ endpoint_id=self.endpoint_id,
194
+ http_client=self.http_client,
195
+ backend_name=self.BACKEND_NAME,
196
+ ).raw_client
185
197
 
186
198
  @overload
187
199
  def create_completion(
@@ -213,6 +225,21 @@ class AnthropicChatClient(BaseChatClient):
213
225
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
214
226
  pass
215
227
 
228
+ @overload
229
+ def create_completion(
230
+ self,
231
+ messages: list,
232
+ model: str | None = None,
233
+ stream: bool | None = None,
234
+ temperature: float | None = None,
235
+ max_tokens: int | None = None,
236
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
237
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
238
+ response_format: dict | None = None,
239
+ **kwargs,
240
+ ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
241
+ pass
242
+
216
243
  def create_completion(
217
244
  self,
218
245
  messages: list,
@@ -236,6 +263,41 @@ class AnthropicChatClient(BaseChatClient):
236
263
  if isinstance(tool_choice, OpenAINotGiven):
237
264
  tool_choice = NOT_GIVEN
238
265
 
266
+ if self.random_endpoint:
267
+ self.random_endpoint = True
268
+ self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
269
+ self.endpoint = settings.get_endpoint(self.endpoint_id)
270
+
271
+ if self.endpoint.api_schema_type == "openai":
272
+ if self.stream:
273
+ return OpenAICompatibleChatClient(
274
+ model=self.model,
275
+ stream=True,
276
+ temperature=self.temperature,
277
+ context_length_control=self.context_length_control,
278
+ random_endpoint=self.random_endpoint,
279
+ endpoint_id=self.endpoint_id,
280
+ http_client=self.http_client,
281
+ backend_name=self.BACKEND_NAME,
282
+ ).create_completion(
283
+ messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
284
+ )
285
+ else:
286
+ return OpenAICompatibleChatClient(
287
+ model=self.model,
288
+ stream=False,
289
+ temperature=self.temperature,
290
+ context_length_control=self.context_length_control,
291
+ random_endpoint=self.random_endpoint,
292
+ endpoint_id=self.endpoint_id,
293
+ http_client=self.http_client,
294
+ backend_name=self.BACKEND_NAME,
295
+ ).create_completion(
296
+ messages, model, False, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
297
+ )
298
+
299
+ assert isinstance(self.raw_client, Anthropic | AnthropicVertex)
300
+
239
301
  self.model_setting = self.backend_settings.models[self.model]
240
302
 
241
303
  if messages[0].get("role") == "system":
@@ -377,7 +439,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
377
439
  random_endpoint: bool = True,
378
440
  endpoint_id: str = "",
379
441
  http_client: httpx.AsyncClient | None = None,
380
- **kwargs,
442
+ backend_name: str | None = None,
381
443
  ):
382
444
  super().__init__(
383
445
  model,
@@ -387,7 +449,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
387
449
  random_endpoint,
388
450
  endpoint_id,
389
451
  http_client,
390
- **kwargs,
452
+ backend_name,
391
453
  )
392
454
 
393
455
  @cached_property
@@ -431,12 +493,23 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
431
493
  access_token=self.creds.token,
432
494
  http_client=self.http_client,
433
495
  )
434
- else:
496
+ elif self.endpoint.api_schema_type == "default":
435
497
  return AsyncAnthropic(
436
498
  api_key=self.endpoint.api_key,
437
499
  base_url=self.endpoint.api_base,
438
500
  http_client=self.http_client,
439
501
  )
502
+ elif self.endpoint.api_schema_type == "openai":
503
+ return AsyncOpenAICompatibleChatClient(
504
+ model=self.model,
505
+ stream=self.stream,
506
+ temperature=self.temperature,
507
+ context_length_control=self.context_length_control,
508
+ random_endpoint=self.random_endpoint,
509
+ endpoint_id=self.endpoint_id,
510
+ http_client=self.http_client,
511
+ backend_name=self.BACKEND_NAME,
512
+ ).raw_client
440
513
 
441
514
  @overload
442
515
  async def create_completion(
@@ -468,6 +541,21 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
468
541
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
469
542
  pass
470
543
 
544
+ @overload
545
+ async def create_completion(
546
+ self,
547
+ messages: list,
548
+ model: str | None = None,
549
+ stream: bool | None = None,
550
+ temperature: float | None = None,
551
+ max_tokens: int | None = None,
552
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
553
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
554
+ response_format: dict | None = None,
555
+ **kwargs,
556
+ ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
557
+ pass
558
+
471
559
  async def create_completion(
472
560
  self,
473
561
  messages: list,
@@ -491,6 +579,41 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
491
579
  if isinstance(tool_choice, OpenAINotGiven):
492
580
  tool_choice = NOT_GIVEN
493
581
 
582
+ if self.random_endpoint:
583
+ self.random_endpoint = True
584
+ self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
585
+ self.endpoint = settings.get_endpoint(self.endpoint_id)
586
+
587
+ if self.endpoint.api_schema_type == "openai":
588
+ if self.stream:
589
+ return AsyncOpenAICompatibleChatClient(
590
+ model=self.model,
591
+ stream=True,
592
+ temperature=self.temperature,
593
+ context_length_control=self.context_length_control,
594
+ random_endpoint=self.random_endpoint,
595
+ endpoint_id=self.endpoint_id,
596
+ http_client=self.http_client,
597
+ backend_name=self.BACKEND_NAME,
598
+ ).create_completion(
599
+ messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
600
+ )
601
+ else:
602
+ return AsyncOpenAICompatibleChatClient(
603
+ model=self.model,
604
+ stream=False,
605
+ temperature=self.temperature,
606
+ context_length_control=self.context_length_control,
607
+ random_endpoint=self.random_endpoint,
608
+ endpoint_id=self.endpoint_id,
609
+ http_client=self.http_client,
610
+ backend_name=self.BACKEND_NAME,
611
+ ).create_completion(
612
+ messages, model, False, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
613
+ )
614
+
615
+ assert isinstance(self.raw_client, AsyncAnthropic | AsyncAnthropicVertex)
616
+
494
617
  self.model_setting = self.backend_settings.models[self.model]
495
618
 
496
619
  if messages[0].get("role") == "system":
@@ -34,7 +34,7 @@ class BaseChatClient(ABC):
34
34
  random_endpoint: bool = True,
35
35
  endpoint_id: str = "",
36
36
  http_client: httpx.Client | None = None,
37
- **kwargs,
37
+ backend_name: str | None = None,
38
38
  ):
39
39
  self.model = model or self.DEFAULT_MODEL
40
40
  self.stream = stream
@@ -44,6 +44,9 @@ class BaseChatClient(ABC):
44
44
  self.endpoint_id = endpoint_id
45
45
  self.http_client = http_client
46
46
 
47
+ if backend_name is not None:
48
+ self.BACKEND_NAME = BackendType(backend_name)
49
+
47
50
  self.backend_settings = settings.get_backend(self.BACKEND_NAME)
48
51
 
49
52
  if endpoint_id:
@@ -156,7 +159,7 @@ class BaseAsyncChatClient(ABC):
156
159
  random_endpoint: bool = True,
157
160
  endpoint_id: str = "",
158
161
  http_client: httpx.AsyncClient | None = None,
159
- **kwargs,
162
+ backend_name: str | None = None,
160
163
  ):
161
164
  self.model = model or self.DEFAULT_MODEL
162
165
  self.stream = stream
@@ -166,6 +169,9 @@ class BaseAsyncChatClient(ABC):
166
169
  self.endpoint_id = endpoint_id
167
170
  self.http_client = http_client
168
171
 
172
+ if backend_name is not None:
173
+ self.BACKEND_NAME = BackendType(backend_name)
174
+
169
175
  self.backend_settings = settings.get_backend(self.BACKEND_NAME)
170
176
 
171
177
  if endpoint_id:
@@ -35,7 +35,7 @@ class GeminiChatClient(BaseChatClient):
35
35
  random_endpoint: bool = True,
36
36
  endpoint_id: str = "",
37
37
  http_client: httpx.Client | None = None,
38
- **kwargs,
38
+ backend_name: str | None = None,
39
39
  ):
40
40
  super().__init__(
41
41
  model,
@@ -45,7 +45,7 @@ class GeminiChatClient(BaseChatClient):
45
45
  random_endpoint,
46
46
  endpoint_id,
47
47
  http_client,
48
- **kwargs,
48
+ backend_name,
49
49
  )
50
50
 
51
51
  @cached_property
@@ -263,7 +263,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
263
263
  random_endpoint: bool = True,
264
264
  endpoint_id: str = "",
265
265
  http_client: httpx.AsyncClient | None = None,
266
- **kwargs,
266
+ backend_name: str | None = None,
267
267
  ):
268
268
  super().__init__(
269
269
  model,
@@ -273,7 +273,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
273
273
  random_endpoint,
274
274
  endpoint_id,
275
275
  http_client,
276
- **kwargs,
276
+ backend_name,
277
277
  )
278
278
 
279
279
  @cached_property
@@ -56,7 +56,7 @@ class MiniMaxChatClient(BaseChatClient):
56
56
  random_endpoint: bool = True,
57
57
  endpoint_id: str = "",
58
58
  http_client: httpx.Client | None = None,
59
- **kwargs,
59
+ backend_name: str | None = None,
60
60
  ):
61
61
  super().__init__(
62
62
  model,
@@ -66,7 +66,7 @@ class MiniMaxChatClient(BaseChatClient):
66
66
  random_endpoint,
67
67
  endpoint_id,
68
68
  http_client,
69
- **kwargs,
69
+ backend_name,
70
70
  )
71
71
  if http_client:
72
72
  self.http_client = http_client
@@ -271,7 +271,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
271
271
  random_endpoint: bool = True,
272
272
  endpoint_id: str = "",
273
273
  http_client: httpx.AsyncClient | None = None,
274
- **kwargs,
274
+ backend_name: str | None = None,
275
275
  ):
276
276
  super().__init__(
277
277
  model,
@@ -281,7 +281,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
281
281
  random_endpoint,
282
282
  endpoint_id,
283
283
  http_client,
284
- **kwargs,
284
+ backend_name,
285
285
  )
286
286
  if http_client:
287
287
  self.http_client = http_client
@@ -25,6 +25,7 @@ from ..types.llm_parameters import (
25
25
  NOT_GIVEN,
26
26
  ToolParam,
27
27
  ToolChoice,
28
+ BackendSettings,
28
29
  ChatCompletionMessage,
29
30
  ChatCompletionDeltaMessage,
30
31
  )
@@ -43,7 +44,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
43
44
  random_endpoint: bool = True,
44
45
  endpoint_id: str = "",
45
46
  http_client: httpx.Client | None = None,
46
- **kwargs,
47
+ backend_name: str | None = None,
47
48
  ):
48
49
  super().__init__(
49
50
  model,
@@ -53,7 +54,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
53
54
  random_endpoint,
54
55
  endpoint_id,
55
56
  http_client,
56
- **kwargs,
57
+ backend_name,
57
58
  )
58
59
 
59
60
  @cached_property
@@ -265,7 +266,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
265
266
  random_endpoint: bool = True,
266
267
  endpoint_id: str = "",
267
268
  http_client: httpx.AsyncClient | None = None,
268
- **kwargs,
269
+ backend_name: str | None = None,
269
270
  ):
270
271
  super().__init__(
271
272
  model,
@@ -275,7 +276,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
275
276
  random_endpoint,
276
277
  endpoint_id,
277
278
  http_client,
278
- **kwargs,
279
+ backend_name,
279
280
  )
280
281
 
281
282
  @cached_property
@@ -7,7 +7,7 @@ from typing import Iterable
7
7
  import httpx
8
8
  import tiktoken
9
9
  from anthropic import Anthropic
10
- from qwen_tokenizer import qwen_tokenizer
10
+ from qwen_tokenizer import get_tokenizer
11
11
  from deepseek_tokenizer import deepseek_tokenizer
12
12
 
13
13
  from ..settings import settings
@@ -208,6 +208,7 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
208
208
  elif model.startswith("deepseek"):
209
209
  return len(deepseek_tokenizer.encode(text))
210
210
  elif model.startswith("qwen"):
211
+ qwen_tokenizer = get_tokenizer(model)
211
212
  return len(qwen_tokenizer.encode(text))
212
213
  elif model.startswith("stepfun"):
213
214
  model_setting = settings.moonshot.models[model]
@@ -23,6 +23,10 @@ class EndpointSetting(BaseModel):
23
23
  region: Optional[str] = Field(None, description="The region for the endpoint.")
24
24
  api_base: str = Field(None, description="The base URL for the API.")
25
25
  api_key: Optional[str] = Field(None, description="The API key for authentication.")
26
+ api_schema_type: Optional[str] = Field(
27
+ "default",
28
+ description="The type of client for the endpoint. Set to 'default' will determine the type automatically.",
29
+ )
26
30
  credentials: Optional[dict] = Field(None, description="Additional credentials if needed.")
27
31
  is_azure: bool = Field(False, description="Indicates if the endpoint is for Azure.")
28
32
  is_vertex: bool = Field(False, description="Indicates if the endpoint is for Vertex.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.34
3
+ Version: 0.1.36
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,24 +1,24 @@
1
- vectorvein-0.1.34.dist-info/METADATA,sha256=vU6f2zVHj_35BGEKhGfkeJ_Cj4uAE1MzTIHu25n5WOI,502
2
- vectorvein-0.1.34.dist-info/WHEEL,sha256=pM0IBB6ZwH3nkEPhtcp50KvKNX-07jYtnb1g1m6Z4Co,90
3
- vectorvein-0.1.34.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.1.36.dist-info/METADATA,sha256=gRwf2-Irb4TOVXjFue0Vb4ofR_zIM6AS0q4IRv4eIl0,502
2
+ vectorvein-0.1.36.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ vectorvein-0.1.36.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  vectorvein/chat_clients/__init__.py,sha256=dW169oK1n3v8Z0uD8itghzlCP72rxiaS-XYn6fvI2xM,16788
6
- vectorvein/chat_clients/anthropic_client.py,sha256=h82GxBi7h22B7leBuPofwBstxH_c12tEgGjpnKg6UDc,25007
6
+ vectorvein/chat_clients/anthropic_client.py,sha256=FsiAe4NAP7tC6EPrAqSorc5eaHkOfGKJjE6pEmuQAaw,30619
7
7
  vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
8
- vectorvein/chat_clients/base_client.py,sha256=wxh7WkzFG4cD4I4t4e6RGe1KiFZc8Z5llh2iVblXEZE,8415
8
+ vectorvein/chat_clients/base_client.py,sha256=7i456Yn-tqY0oPeIj_wHWNGGzCKPAbX5Ufxy2wWGMNY,8653
9
9
  vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
10
- vectorvein/chat_clients/gemini_client.py,sha256=VxII45fMjE9JTlOuq4n7R0lNVQRoHTUyTNZE4ICXNrM,18685
10
+ vectorvein/chat_clients/gemini_client.py,sha256=6LKy03iVDXtj0Bn99o9M8rRURMzozX-kf0FoazxPQt0,18739
11
11
  vectorvein/chat_clients/groq_client.py,sha256=Uow4pgdmFi93ZQSoOol2-0PhhqkW-S0XuSldvppz5U4,498
12
12
  vectorvein/chat_clients/local_client.py,sha256=55nOsxzqUf79q3Y14MKROA71zxhsT7p7FsDZ89rts2M,422
13
- vectorvein/chat_clients/minimax_client.py,sha256=ljnT9QtVUiySSQSECEv9g2vRfv88K2pPPNZH4sCh838,17204
13
+ vectorvein/chat_clients/minimax_client.py,sha256=0MVMb4g0K_VKnPGHYX81jHiBaQUGWFG5vV8wyClCT_8,17258
14
14
  vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
15
15
  vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
16
16
  vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
17
- vectorvein/chat_clients/openai_compatible_client.py,sha256=gfCTXji8pgFUiultiNDKcmPIGu7lFfQ9VmA8o2_Mm6c,18823
17
+ vectorvein/chat_clients/openai_compatible_client.py,sha256=H7W7BEA71Dh6N42cFuH-nXN9en6oU14OU6RyMmefbnM,18899
18
18
  vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
20
20
  vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
21
- vectorvein/chat_clients/utils.py,sha256=1LddLLVf8r8_Hj5LEYrQRus2qfsuXkJPMOu9VsiKMys,24338
21
+ vectorvein/chat_clients/utils.py,sha256=7aSukIJrjFr50o160tgV7t2gUdZUdeNJnJaeJSwlBtQ,24383
22
22
  vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
23
23
  vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
24
24
  vectorvein/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -27,8 +27,8 @@ vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
27
27
  vectorvein/types/defaults.py,sha256=xefmRNYBGbnWA5kjLLFKN91UM5gnHZ5-kcCNlQRfznk,22095
28
28
  vectorvein/types/enums.py,sha256=x_S0IJiEWijOAEiMNdiGDGEWGtmt7TwMriJVDqrDmTo,1637
29
29
  vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
30
- vectorvein/types/llm_parameters.py,sha256=5Q_NWVjbEhEcG7lYLebiQZ9uQU9rZznFmrUxDZ17yqY,4714
30
+ vectorvein/types/llm_parameters.py,sha256=uvVS3dqqvGMJoiz79LxdKjGj3YSmGD3U_yA0f7mLoTM,4903
31
31
  vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
32
  vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
33
33
  vectorvein/utilities/retry.py,sha256=9ePuJdeUUGx-qMWfaFxmlOvG_lQPwCQ4UB1z3Edlo34,993
34
- vectorvein-0.1.34.dist-info/RECORD,,
34
+ vectorvein-0.1.36.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: pdm-backend (2.4.2)
2
+ Generator: pdm-backend (2.4.3)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any