vectorvein 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,7 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-26 14:48:55
3
+ import httpx
4
+
3
5
  from .base_client import BaseChatClient, BaseAsyncChatClient
4
6
 
5
7
  from .yi_client import YiChatClient, AsyncYiChatClient
@@ -58,6 +60,9 @@ def create_chat_client(
58
60
  stream: bool = False,
59
61
  temperature: float = 0.7,
60
62
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
63
+ random_endpoint: bool = True,
64
+ endpoint_id: str = "",
65
+ http_client: httpx.Client | None = None,
61
66
  **kwargs,
62
67
  ) -> BaseChatClient:
63
68
  if backend.lower() not in BackendMap["sync"]:
@@ -73,6 +78,9 @@ def create_chat_client(
73
78
  stream=stream,
74
79
  temperature=temperature,
75
80
  context_length_control=context_length_control,
81
+ random_endpoint=random_endpoint,
82
+ endpoint_id=endpoint_id,
83
+ http_client=http_client,
76
84
  **kwargs,
77
85
  )
78
86
 
@@ -83,6 +91,9 @@ def create_async_chat_client(
83
91
  stream: bool = False,
84
92
  temperature: float = 0.7,
85
93
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
94
+ random_endpoint: bool = True,
95
+ endpoint_id: str = "",
96
+ http_client: httpx.AsyncClient | None = None,
86
97
  **kwargs,
87
98
  ) -> BaseAsyncChatClient:
88
99
  if backend.lower() not in BackendMap["async"]:
@@ -98,6 +109,9 @@ def create_async_chat_client(
98
109
  stream=stream,
99
110
  temperature=temperature,
100
111
  context_length_control=context_length_control,
112
+ random_endpoint=random_endpoint,
113
+ endpoint_id=endpoint_id,
114
+ http_client=http_client,
101
115
  **kwargs,
102
116
  )
103
117
 
@@ -3,6 +3,7 @@
3
3
  import json
4
4
  import random
5
5
 
6
+ import httpx
6
7
  from openai._types import NotGiven as OpenAINotGiven
7
8
  from anthropic import Anthropic, AnthropicVertex, AsyncAnthropic, AsyncAnthropicVertex
8
9
  from anthropic._types import NotGiven, NOT_GIVEN
@@ -97,6 +98,7 @@ class AnthropicChatClient(BaseChatClient):
97
98
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
98
99
  random_endpoint: bool = True,
99
100
  endpoint_id: str = "",
101
+ http_client: httpx.Client | None = None,
100
102
  **kwargs,
101
103
  ):
102
104
  super().__init__(
@@ -106,6 +108,7 @@ class AnthropicChatClient(BaseChatClient):
106
108
  context_length_control,
107
109
  random_endpoint,
108
110
  endpoint_id,
111
+ http_client,
109
112
  **kwargs,
110
113
  )
111
114
 
@@ -183,11 +186,13 @@ class AnthropicChatClient(BaseChatClient):
183
186
  base_url=base_url,
184
187
  project_id=self.endpoint.credentials.get("quota_project_id"),
185
188
  access_token=self.creds.token,
189
+ http_client=self.http_client,
186
190
  )
187
191
  else:
188
192
  self._client = Anthropic(
189
193
  api_key=self.endpoint.api_key,
190
194
  base_url=self.endpoint.api_base,
195
+ http_client=self.http_client,
191
196
  )
192
197
 
193
198
  tools_params = refactor_tool_use_params(tools) if tools else tools
@@ -299,6 +304,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
299
304
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
300
305
  random_endpoint: bool = True,
301
306
  endpoint_id: str = "",
307
+ http_client: httpx.AsyncClient | None = None,
302
308
  **kwargs,
303
309
  ):
304
310
  super().__init__(
@@ -308,6 +314,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
308
314
  context_length_control,
309
315
  random_endpoint,
310
316
  endpoint_id,
317
+ http_client,
311
318
  **kwargs,
312
319
  )
313
320
 
@@ -385,11 +392,13 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
385
392
  base_url=base_url,
386
393
  project_id=self.endpoint.credentials.get("quota_project_id"),
387
394
  access_token=self.creds.token,
395
+ http_client=self.http_client,
388
396
  )
389
397
  else:
390
398
  self._client = AsyncAnthropic(
391
399
  api_key=self.endpoint.api_key,
392
400
  base_url=self.endpoint.api_base,
401
+ http_client=self.http_client,
393
402
  )
394
403
 
395
404
  tools_params = refactor_tool_use_params(tools) if tools else tools
@@ -3,6 +3,7 @@
3
3
  from abc import ABC, abstractmethod
4
4
  from typing import Generator, AsyncGenerator, Any
5
5
 
6
+ import httpx
6
7
  from openai._types import NotGiven, NOT_GIVEN
7
8
 
8
9
  from ..settings import settings
@@ -23,6 +24,7 @@ class BaseChatClient(ABC):
23
24
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
24
25
  random_endpoint: bool = True,
25
26
  endpoint_id: str = "",
27
+ http_client: httpx.Client | None = None,
26
28
  **kwargs,
27
29
  ):
28
30
  self.model = model or self.DEFAULT_MODEL
@@ -31,6 +33,7 @@ class BaseChatClient(ABC):
31
33
  self.context_length_control = context_length_control
32
34
  self.random_endpoint = random_endpoint
33
35
  self.endpoint_id = endpoint_id
36
+ self.http_client = http_client
34
37
 
35
38
  self.backend_settings = settings.get_backend(self.BACKEND_NAME)
36
39
 
@@ -84,6 +87,7 @@ class BaseAsyncChatClient(ABC):
84
87
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
85
88
  random_endpoint: bool = True,
86
89
  endpoint_id: str = "",
90
+ http_client: httpx.AsyncClient | None = None,
87
91
  **kwargs,
88
92
  ):
89
93
  self.model = model or self.DEFAULT_MODEL
@@ -92,6 +96,7 @@ class BaseAsyncChatClient(ABC):
92
96
  self.context_length_control = context_length_control
93
97
  self.random_endpoint = random_endpoint
94
98
  self.endpoint_id = endpoint_id
99
+ self.http_client = http_client
95
100
 
96
101
  self.backend_settings = settings.get_backend(self.BACKEND_NAME)
97
102
 
@@ -25,6 +25,7 @@ class GeminiChatClient(BaseChatClient):
25
25
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
26
26
  random_endpoint: bool = True,
27
27
  endpoint_id: str = "",
28
+ http_client: httpx.Client | None = None,
28
29
  **kwargs,
29
30
  ):
30
31
  super().__init__(
@@ -34,6 +35,7 @@ class GeminiChatClient(BaseChatClient):
34
35
  context_length_control,
35
36
  random_endpoint,
36
37
  endpoint_id,
38
+ http_client,
37
39
  **kwargs,
38
40
  )
39
41
 
@@ -107,7 +109,11 @@ class GeminiChatClient(BaseChatClient):
107
109
 
108
110
  def generator():
109
111
  result = {"content": ""}
110
- with httpx.stream("POST", url, headers=headers, params=params, json=request_body) as response:
112
+ if self.http_client:
113
+ client = self.http_client
114
+ else:
115
+ client = httpx.Client()
116
+ with client.stream("POST", url, headers=headers, params=params, json=request_body) as response:
111
117
  for chunk in response.iter_lines():
112
118
  message = {"content": ""}
113
119
  if not chunk.startswith("data:"):
@@ -142,7 +148,11 @@ class GeminiChatClient(BaseChatClient):
142
148
  return generator()
143
149
  else:
144
150
  url = f"{self.endpoint.api_base}/models/{self.model_setting.id}:generateContent"
145
- response = httpx.post(url, json=request_body, headers=headers, params=params, timeout=None).json()
151
+ if self.http_client:
152
+ client = self.http_client
153
+ else:
154
+ client = httpx.Client()
155
+ response = client.post(url, json=request_body, headers=headers, params=params, timeout=None).json()
146
156
  result = {
147
157
  "content": "",
148
158
  "usage": {
@@ -185,6 +195,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
185
195
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
186
196
  random_endpoint: bool = True,
187
197
  endpoint_id: str = "",
198
+ http_client: httpx.AsyncClient | None = None,
188
199
  **kwargs,
189
200
  ):
190
201
  super().__init__(
@@ -194,6 +205,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
194
205
  context_length_control,
195
206
  random_endpoint,
196
207
  endpoint_id,
208
+ http_client,
197
209
  **kwargs,
198
210
  )
199
211
 
@@ -267,7 +279,10 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
267
279
 
268
280
  async def generator():
269
281
  result = {"content": ""}
270
- client = httpx.AsyncClient()
282
+ if self.http_client:
283
+ client = self.http_client
284
+ else:
285
+ client = httpx.AsyncClient()
271
286
  async with client.stream("POST", url, headers=headers, params=params, json=request_body) as response:
272
287
  async for chunk in response.aiter_lines():
273
288
  message = {"content": ""}
@@ -303,8 +318,12 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
303
318
  return generator()
304
319
  else:
305
320
  url = f"{self.endpoint.api_base}/models/{self.model_setting.id}:generateContent"
306
- async with httpx.AsyncClient(headers=headers, params=params, timeout=None) as client:
307
- response = await client.post(url, json=request_body)
321
+ if self.http_client:
322
+ client = self.http_client
323
+ else:
324
+ client = httpx.AsyncClient()
325
+ async with client:
326
+ response = await client.post(url, json=request_body, headers=headers, params=params, timeout=None)
308
327
  response = response.json()
309
328
  result = {
310
329
  "content": "",
@@ -48,6 +48,7 @@ class MiniMaxChatClient(BaseChatClient):
48
48
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
49
49
  random_endpoint: bool = True,
50
50
  endpoint_id: str = "",
51
+ http_client: httpx.Client | None = None,
51
52
  **kwargs,
52
53
  ):
53
54
  super().__init__(
@@ -57,9 +58,13 @@ class MiniMaxChatClient(BaseChatClient):
57
58
  context_length_control,
58
59
  random_endpoint,
59
60
  endpoint_id,
61
+ http_client,
60
62
  **kwargs,
61
63
  )
62
- self.http_client = httpx.Client()
64
+ if http_client:
65
+ self.http_client = http_client
66
+ else:
67
+ self.http_client = httpx.Client()
63
68
 
64
69
  def create_completion(
65
70
  self,
@@ -208,6 +213,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
208
213
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
209
214
  random_endpoint: bool = True,
210
215
  endpoint_id: str = "",
216
+ http_client: httpx.AsyncClient | None = None,
211
217
  **kwargs,
212
218
  ):
213
219
  super().__init__(
@@ -217,9 +223,13 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
217
223
  context_length_control,
218
224
  random_endpoint,
219
225
  endpoint_id,
226
+ http_client,
220
227
  **kwargs,
221
228
  )
222
- self.http_client = httpx.AsyncClient()
229
+ if http_client:
230
+ self.http_client = http_client
231
+ else:
232
+ self.http_client = httpx.AsyncClient()
223
233
 
224
234
  async def create_completion(
225
235
  self,
@@ -3,6 +3,7 @@
3
3
  import json
4
4
  import random
5
5
 
6
+ import httpx
6
7
  from openai._types import NotGiven, NOT_GIVEN
7
8
  from openai._streaming import Stream, AsyncStream
8
9
  from openai.types.chat import ChatCompletion, ChatCompletionChunk
@@ -33,6 +34,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
33
34
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
34
35
  random_endpoint: bool = True,
35
36
  endpoint_id: str = "",
37
+ http_client: httpx.Client | None = None,
36
38
  **kwargs,
37
39
  ):
38
40
  super().__init__(
@@ -42,6 +44,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
42
44
  context_length_control,
43
45
  random_endpoint,
44
46
  endpoint_id,
47
+ http_client,
45
48
  **kwargs,
46
49
  )
47
50
 
@@ -74,12 +77,14 @@ class OpenAICompatibleChatClient(BaseChatClient):
74
77
  self._client = AzureOpenAI(
75
78
  azure_endpoint=self.endpoint.api_base,
76
79
  api_key=self.endpoint.api_key,
77
- api_version="2024-05-01-preview",
80
+ api_version="2024-08-01-preview",
81
+ http_client=self.http_client,
78
82
  )
79
83
  else:
80
84
  self._client = OpenAI(
81
85
  api_key=self.endpoint.api_key,
82
86
  base_url=self.endpoint.api_base,
87
+ http_client=self.http_client,
83
88
  )
84
89
 
85
90
  if self.context_length_control == ContextLengthControlType.Latest:
@@ -188,6 +193,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
188
193
  context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
189
194
  random_endpoint: bool = True,
190
195
  endpoint_id: str = "",
196
+ http_client: httpx.AsyncClient | None = None,
191
197
  **kwargs,
192
198
  ):
193
199
  super().__init__(
@@ -197,6 +203,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
197
203
  context_length_control,
198
204
  random_endpoint,
199
205
  endpoint_id,
206
+ http_client,
200
207
  **kwargs,
201
208
  )
202
209
 
@@ -229,12 +236,14 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
229
236
  self._client = AsyncAzureOpenAI(
230
237
  azure_endpoint=self.endpoint.api_base,
231
238
  api_key=self.endpoint.api_key,
232
- api_version="2024-05-01-preview",
239
+ api_version="2024-08-01-preview",
240
+ http_client=self.http_client,
233
241
  )
234
242
  else:
235
243
  self._client = AsyncOpenAI(
236
244
  api_key=self.endpoint.api_key,
237
245
  base_url=self.endpoint.api_base,
246
+ http_client=self.http_client,
238
247
  )
239
248
 
240
249
  if self.context_length_control == ContextLengthControlType.Latest:
@@ -69,6 +69,7 @@ class ChatCompletionMessage(BaseModel):
69
69
 
70
70
  usage: Optional[Usage] = None
71
71
 
72
+
72
73
  class ChatCompletionDeltaMessage(BaseModel):
73
74
  content: Optional[str] = None
74
75
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.11
3
+ Version: 0.1.12
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,18 +1,18 @@
1
- vectorvein-0.1.11.dist-info/METADATA,sha256=rw0fFY-7V-teJUIt4t4KlvLIZHp4hjgaQBRpQ816eP0,502
2
- vectorvein-0.1.11.dist-info/WHEEL,sha256=rSwsxJWe3vzyR5HCwjWXQruDgschpei4h_giTm0dJVE,90
1
+ vectorvein-0.1.12.dist-info/METADATA,sha256=OxvgLVzQ5ilVePTHSj5ZhU9aKdyb7nXq7A7UNyDFuzo,502
2
+ vectorvein-0.1.12.dist-info/WHEEL,sha256=rSwsxJWe3vzyR5HCwjWXQruDgschpei4h_giTm0dJVE,90
3
3
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- vectorvein/chat_clients/__init__.py,sha256=5j7W--jr-l2cqDJp38uXYvkydDK0rnzm7MYGSACHKmU,3976
5
- vectorvein/chat_clients/anthropic_client.py,sha256=MHpLywbIIgFsAkj8EiKEoxUgxcbx3njFxLTN2GhEjys,20088
6
- vectorvein/chat_clients/base_client.py,sha256=wMXpQ1L1KDb2Hg6va3H3GmcVeQB6r6sh7F4IS0DBQWI,4275
4
+ vectorvein/chat_clients/__init__.py,sha256=rur7elYPL-HIQM46U5Kb8aO4tMXfYOOKLdIHFlKIw0M,4421
5
+ vectorvein/chat_clients/anthropic_client.py,sha256=sECXynbReJs1xixFEHSlfEQo9Q1zGf5uCJ-V8emdoAM,20438
6
+ vectorvein/chat_clients/base_client.py,sha256=CscLWDNd8pE0EKthvRRtxyjMhGGt7pwP4FOts9KNOLU,4469
7
7
  vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
8
- vectorvein/chat_clients/gemini_client.py,sha256=jUu-wwpaChP8_lxLdWnYkgVr_aSegCHbmBgnpFztqAw,13743
8
+ vectorvein/chat_clients/gemini_client.py,sha256=W-9Vu-GTE9wxStPznyNR0rBEgDG3LYBu2uQXd4sh1YQ,14425
9
9
  vectorvein/chat_clients/groq_client.py,sha256=Uow4pgdmFi93ZQSoOol2-0PhhqkW-S0XuSldvppz5U4,498
10
10
  vectorvein/chat_clients/local_client.py,sha256=55nOsxzqUf79q3Y14MKROA71zxhsT7p7FsDZ89rts2M,422
11
- vectorvein/chat_clients/minimax_client.py,sha256=ZO-7WNsvsct3cpuo0Qc6WL3A0kkijn0J8o0ahlxmUyE,13478
11
+ vectorvein/chat_clients/minimax_client.py,sha256=iNq87zWx79g8tGo784c67xUt-YQ4TyMoFWUIjDVEMGw,13801
12
12
  vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
13
13
  vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
14
14
  vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
15
- vectorvein/chat_clients/openai_compatible_client.py,sha256=rPxIPj4STm1sy7T6bqyZpO-FPwMyjyQmThyJqd8_jYs,13822
15
+ vectorvein/chat_clients/openai_compatible_client.py,sha256=00Kxwmnooh-yfENkNLgl1Ped2s912njwVitqes8SZpw,14181
16
16
  vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
17
17
  vectorvein/chat_clients/utils.py,sha256=mnAew2Ie3nQHdEyDLKuJvXkQ5QdcSAJ6SpYk5JPbR1Q,20888
18
18
  vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
@@ -20,7 +20,7 @@ vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-a
20
20
  vectorvein/settings/__init__.py,sha256=4mpccT7eZC3yI1vVnVViW4wHBnDEH9D2R5EsIP34VgU,3218
21
21
  vectorvein/types/defaults.py,sha256=lCzGOLybX8FzHX-Cv32BaQFZ8sHvPhGIIwDD-VksP20,13460
22
22
  vectorvein/types/enums.py,sha256=vzOenCnRlFXBwPh-lfFhjGfM-6yfDj7wZColHODqocI,1550
23
- vectorvein/types/llm_parameters.py,sha256=nBjStC2zndTY__yhD2WFXB09taxEhDLE3OHA6MICfgE,3494
23
+ vectorvein/types/llm_parameters.py,sha256=PWN18dDGrCTP4Bz7pX0XxO-wDUA7qTngppzEELrROmc,3496
24
24
  vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
25
25
  vectorvein/utilities/retry.py,sha256=9ePuJdeUUGx-qMWfaFxmlOvG_lQPwCQ4UB1z3Edlo34,993
26
- vectorvein-0.1.11.dist-info/RECORD,,
26
+ vectorvein-0.1.12.dist-info/RECORD,,