vectorvein 0.1.16__tar.gz → 0.1.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.16 → vectorvein-0.1.18}/PKG-INFO +1 -1
- {vectorvein-0.1.16 → vectorvein-0.1.18}/pyproject.toml +1 -1
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/anthropic_client.py +90 -86
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/base_client.py +12 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/openai_compatible_client.py +79 -36
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/utils.py +1 -1
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/types/defaults.py +4 -4
- {vectorvein-0.1.16 → vectorvein-0.1.18}/README.md +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/utilities/retry.py +0 -0
@@ -112,46 +112,8 @@ class AnthropicChatClient(BaseChatClient):
|
|
112
112
|
**kwargs,
|
113
113
|
)
|
114
114
|
|
115
|
-
|
116
|
-
|
117
|
-
messages: list = list,
|
118
|
-
model: str | None = None,
|
119
|
-
stream: bool | None = None,
|
120
|
-
temperature: float | None = None,
|
121
|
-
max_tokens: int | None = None,
|
122
|
-
tools: list | NotGiven = NOT_GIVEN,
|
123
|
-
tool_choice: str | NotGiven = NOT_GIVEN,
|
124
|
-
**kwargs,
|
125
|
-
):
|
126
|
-
if model is not None:
|
127
|
-
self.model = model
|
128
|
-
if stream is not None:
|
129
|
-
self.stream = stream
|
130
|
-
if temperature is not None:
|
131
|
-
self.temperature = temperature
|
132
|
-
if isinstance(tools, OpenAINotGiven):
|
133
|
-
tools = NOT_GIVEN
|
134
|
-
if isinstance(tool_choice, OpenAINotGiven):
|
135
|
-
tool_choice = NOT_GIVEN
|
136
|
-
|
137
|
-
self.model_setting = self.backend_settings.models[self.model]
|
138
|
-
|
139
|
-
if messages[0].get("role") == "system":
|
140
|
-
system_prompt = messages[0]["content"]
|
141
|
-
messages = messages[1:]
|
142
|
-
else:
|
143
|
-
system_prompt = ""
|
144
|
-
|
145
|
-
if self.context_length_control == ContextLengthControlType.Latest:
|
146
|
-
messages = cutoff_messages(
|
147
|
-
messages,
|
148
|
-
max_count=self.model_setting.context_length,
|
149
|
-
backend=self.BACKEND_NAME,
|
150
|
-
model=self.model_setting.id,
|
151
|
-
)
|
152
|
-
|
153
|
-
messages = format_messages_alternate(messages)
|
154
|
-
|
115
|
+
@property
|
116
|
+
def raw_client(self):
|
155
117
|
if self.random_endpoint:
|
156
118
|
self.random_endpoint = True
|
157
119
|
self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
@@ -181,7 +143,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
181
143
|
else:
|
182
144
|
base_url = f"{self.endpoint.api_base}{self.endpoint.region}-aiplatform/v1"
|
183
145
|
|
184
|
-
|
146
|
+
return AnthropicVertex(
|
185
147
|
region=self.endpoint.region,
|
186
148
|
base_url=base_url,
|
187
149
|
project_id=self.endpoint.credentials.get("quota_project_id"),
|
@@ -189,12 +151,52 @@ class AnthropicChatClient(BaseChatClient):
|
|
189
151
|
http_client=self.http_client,
|
190
152
|
)
|
191
153
|
else:
|
192
|
-
|
154
|
+
return Anthropic(
|
193
155
|
api_key=self.endpoint.api_key,
|
194
156
|
base_url=self.endpoint.api_base,
|
195
157
|
http_client=self.http_client,
|
196
158
|
)
|
197
159
|
|
160
|
+
def create_completion(
|
161
|
+
self,
|
162
|
+
messages: list = list,
|
163
|
+
model: str | None = None,
|
164
|
+
stream: bool | None = None,
|
165
|
+
temperature: float | None = None,
|
166
|
+
max_tokens: int | None = None,
|
167
|
+
tools: list | NotGiven = NOT_GIVEN,
|
168
|
+
tool_choice: str | NotGiven = NOT_GIVEN,
|
169
|
+
**kwargs,
|
170
|
+
):
|
171
|
+
if model is not None:
|
172
|
+
self.model = model
|
173
|
+
if stream is not None:
|
174
|
+
self.stream = stream
|
175
|
+
if temperature is not None:
|
176
|
+
self.temperature = temperature
|
177
|
+
if isinstance(tools, OpenAINotGiven):
|
178
|
+
tools = NOT_GIVEN
|
179
|
+
if isinstance(tool_choice, OpenAINotGiven):
|
180
|
+
tool_choice = NOT_GIVEN
|
181
|
+
|
182
|
+
self.model_setting = self.backend_settings.models[self.model]
|
183
|
+
|
184
|
+
if messages[0].get("role") == "system":
|
185
|
+
system_prompt = messages[0]["content"]
|
186
|
+
messages = messages[1:]
|
187
|
+
else:
|
188
|
+
system_prompt = ""
|
189
|
+
|
190
|
+
if self.context_length_control == ContextLengthControlType.Latest:
|
191
|
+
messages = cutoff_messages(
|
192
|
+
messages,
|
193
|
+
max_count=self.model_setting.context_length,
|
194
|
+
backend=self.BACKEND_NAME,
|
195
|
+
model=self.model_setting.id,
|
196
|
+
)
|
197
|
+
|
198
|
+
messages = format_messages_alternate(messages)
|
199
|
+
|
198
200
|
tools_params = refactor_tool_use_params(tools) if tools else tools
|
199
201
|
|
200
202
|
if max_tokens is None:
|
@@ -206,7 +208,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
206
208
|
else:
|
207
209
|
max_tokens = self.model_setting.context_length - token_counts
|
208
210
|
|
209
|
-
response = self.
|
211
|
+
response = self.raw_client.messages.create(
|
210
212
|
model=self.model_setting.id,
|
211
213
|
messages=messages,
|
212
214
|
system=system_prompt,
|
@@ -317,46 +319,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
317
319
|
**kwargs,
|
318
320
|
)
|
319
321
|
|
320
|
-
|
321
|
-
|
322
|
-
messages: list = list,
|
323
|
-
model: str | None = None,
|
324
|
-
stream: bool | None = None,
|
325
|
-
temperature: float | None = None,
|
326
|
-
max_tokens: int | None = None,
|
327
|
-
tools: list | NotGiven = NOT_GIVEN,
|
328
|
-
tool_choice: str | NotGiven = NOT_GIVEN,
|
329
|
-
**kwargs,
|
330
|
-
):
|
331
|
-
if model is not None:
|
332
|
-
self.model = model
|
333
|
-
if stream is not None:
|
334
|
-
self.stream = stream
|
335
|
-
if temperature is not None:
|
336
|
-
self.temperature = temperature
|
337
|
-
if isinstance(tools, OpenAINotGiven):
|
338
|
-
tools = NOT_GIVEN
|
339
|
-
if isinstance(tool_choice, OpenAINotGiven):
|
340
|
-
tool_choice = NOT_GIVEN
|
341
|
-
|
342
|
-
self.model_setting = self.backend_settings.models[self.model]
|
343
|
-
|
344
|
-
if messages[0].get("role") == "system":
|
345
|
-
system_prompt = messages[0]["content"]
|
346
|
-
messages = messages[1:]
|
347
|
-
else:
|
348
|
-
system_prompt = ""
|
349
|
-
|
350
|
-
if self.context_length_control == ContextLengthControlType.Latest:
|
351
|
-
messages = cutoff_messages(
|
352
|
-
messages,
|
353
|
-
max_count=self.model_setting.context_length,
|
354
|
-
backend=self.BACKEND_NAME,
|
355
|
-
model=self.model_setting.id,
|
356
|
-
)
|
357
|
-
|
358
|
-
messages = format_messages_alternate(messages)
|
359
|
-
|
322
|
+
@property
|
323
|
+
def raw_client(self):
|
360
324
|
if self.random_endpoint:
|
361
325
|
self.random_endpoint = True
|
362
326
|
self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
@@ -386,7 +350,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
386
350
|
else:
|
387
351
|
base_url = f"{self.endpoint.api_base}{self.endpoint.region}-aiplatform/v1"
|
388
352
|
|
389
|
-
|
353
|
+
return AsyncAnthropicVertex(
|
390
354
|
region=self.endpoint.region,
|
391
355
|
base_url=base_url,
|
392
356
|
project_id=self.endpoint.credentials.get("quota_project_id"),
|
@@ -394,12 +358,52 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
394
358
|
http_client=self.http_client,
|
395
359
|
)
|
396
360
|
else:
|
397
|
-
|
361
|
+
return AsyncAnthropic(
|
398
362
|
api_key=self.endpoint.api_key,
|
399
363
|
base_url=self.endpoint.api_base,
|
400
364
|
http_client=self.http_client,
|
401
365
|
)
|
402
366
|
|
367
|
+
async def create_completion(
|
368
|
+
self,
|
369
|
+
messages: list = list,
|
370
|
+
model: str | None = None,
|
371
|
+
stream: bool | None = None,
|
372
|
+
temperature: float | None = None,
|
373
|
+
max_tokens: int | None = None,
|
374
|
+
tools: list | NotGiven = NOT_GIVEN,
|
375
|
+
tool_choice: str | NotGiven = NOT_GIVEN,
|
376
|
+
**kwargs,
|
377
|
+
):
|
378
|
+
if model is not None:
|
379
|
+
self.model = model
|
380
|
+
if stream is not None:
|
381
|
+
self.stream = stream
|
382
|
+
if temperature is not None:
|
383
|
+
self.temperature = temperature
|
384
|
+
if isinstance(tools, OpenAINotGiven):
|
385
|
+
tools = NOT_GIVEN
|
386
|
+
if isinstance(tool_choice, OpenAINotGiven):
|
387
|
+
tool_choice = NOT_GIVEN
|
388
|
+
|
389
|
+
self.model_setting = self.backend_settings.models[self.model]
|
390
|
+
|
391
|
+
if messages[0].get("role") == "system":
|
392
|
+
system_prompt = messages[0]["content"]
|
393
|
+
messages = messages[1:]
|
394
|
+
else:
|
395
|
+
system_prompt = ""
|
396
|
+
|
397
|
+
if self.context_length_control == ContextLengthControlType.Latest:
|
398
|
+
messages = cutoff_messages(
|
399
|
+
messages,
|
400
|
+
max_count=self.model_setting.context_length,
|
401
|
+
backend=self.BACKEND_NAME,
|
402
|
+
model=self.model_setting.id,
|
403
|
+
)
|
404
|
+
|
405
|
+
messages = format_messages_alternate(messages)
|
406
|
+
|
403
407
|
tools_params = refactor_tool_use_params(tools) if tools else tools
|
404
408
|
|
405
409
|
if max_tokens is None:
|
@@ -411,7 +415,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
411
415
|
else:
|
412
416
|
max_tokens = self.model_setting.context_length - token_counts
|
413
417
|
|
414
|
-
response = await self.
|
418
|
+
response = await self.raw_client.messages.create(
|
415
419
|
model=self.model_setting.id,
|
416
420
|
messages=messages,
|
417
421
|
system=system_prompt,
|
@@ -5,6 +5,8 @@ from typing import Generator, AsyncGenerator, Any
|
|
5
5
|
|
6
6
|
import httpx
|
7
7
|
from openai._types import NotGiven, NOT_GIVEN
|
8
|
+
from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
|
9
|
+
from anthropic import Anthropic, AnthropicVertex, AsyncAnthropic, AsyncAnthropicVertex
|
8
10
|
|
9
11
|
from ..settings import settings
|
10
12
|
from ..types import defaults as defs
|
@@ -42,6 +44,11 @@ class BaseChatClient(ABC):
|
|
42
44
|
self.random_endpoint = False
|
43
45
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
44
46
|
|
47
|
+
@property
|
48
|
+
@abstractmethod
|
49
|
+
def raw_client(self) -> OpenAI | AzureOpenAI | Anthropic | AnthropicVertex:
|
50
|
+
pass
|
51
|
+
|
45
52
|
@abstractmethod
|
46
53
|
def create_completion(
|
47
54
|
self,
|
@@ -108,6 +115,11 @@ class BaseAsyncChatClient(ABC):
|
|
108
115
|
self.random_endpoint = False
|
109
116
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
110
117
|
|
118
|
+
@property
|
119
|
+
@abstractmethod
|
120
|
+
def raw_client(self) -> AsyncOpenAI | AsyncAzureOpenAI | AsyncAnthropic | AsyncAnthropicVertex:
|
121
|
+
pass
|
122
|
+
|
111
123
|
@abstractmethod
|
112
124
|
async def create_completion(
|
113
125
|
self,
|
{vectorvein-0.1.16 → vectorvein-0.1.18}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
@@ -2,6 +2,7 @@
|
|
2
2
|
# @Date: 2024-07-26 14:48:55
|
3
3
|
import json
|
4
4
|
import random
|
5
|
+
from functools import cached_property
|
5
6
|
|
6
7
|
import httpx
|
7
8
|
from openai._types import NotGiven, NOT_GIVEN
|
@@ -48,6 +49,27 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
48
49
|
**kwargs,
|
49
50
|
)
|
50
51
|
|
52
|
+
@cached_property
|
53
|
+
def raw_client(self):
|
54
|
+
if self.random_endpoint:
|
55
|
+
self.random_endpoint = True
|
56
|
+
self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
57
|
+
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
58
|
+
|
59
|
+
if self.endpoint.is_azure:
|
60
|
+
return AzureOpenAI(
|
61
|
+
azure_endpoint=self.endpoint.api_base,
|
62
|
+
api_key=self.endpoint.api_key,
|
63
|
+
api_version="2024-08-01-preview",
|
64
|
+
http_client=self.http_client,
|
65
|
+
)
|
66
|
+
else:
|
67
|
+
return OpenAI(
|
68
|
+
api_key=self.endpoint.api_key,
|
69
|
+
base_url=self.endpoint.api_base,
|
70
|
+
http_client=self.http_client,
|
71
|
+
)
|
72
|
+
|
51
73
|
def create_completion(
|
52
74
|
self,
|
53
75
|
messages: list = list,
|
@@ -68,24 +90,24 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
68
90
|
|
69
91
|
self.model_setting = self.backend_settings.models[self.model]
|
70
92
|
|
71
|
-
if self.random_endpoint:
|
72
|
-
|
73
|
-
|
74
|
-
|
93
|
+
# if self.random_endpoint:
|
94
|
+
# self.random_endpoint = True
|
95
|
+
# self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
96
|
+
# self.endpoint = settings.get_endpoint(self.endpoint_id)
|
75
97
|
|
76
|
-
if self.endpoint.is_azure:
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
else:
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
98
|
+
# if self.endpoint.is_azure:
|
99
|
+
# self._client = AzureOpenAI(
|
100
|
+
# azure_endpoint=self.endpoint.api_base,
|
101
|
+
# api_key=self.endpoint.api_key,
|
102
|
+
# api_version="2024-08-01-preview",
|
103
|
+
# http_client=self.http_client,
|
104
|
+
# )
|
105
|
+
# else:
|
106
|
+
# self._client = OpenAI(
|
107
|
+
# api_key=self.endpoint.api_key,
|
108
|
+
# base_url=self.endpoint.api_base,
|
109
|
+
# http_client=self.http_client,
|
110
|
+
# )
|
89
111
|
|
90
112
|
if self.context_length_control == ContextLengthControlType.Latest:
|
91
113
|
messages = cutoff_messages(
|
@@ -118,7 +140,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
118
140
|
else:
|
119
141
|
max_tokens = self.model_setting.context_length - token_counts
|
120
142
|
|
121
|
-
response: ChatCompletion | Stream[ChatCompletionChunk] = self.
|
143
|
+
response: ChatCompletion | Stream[ChatCompletionChunk] = self.raw_client.chat.completions.create(
|
122
144
|
model=self.model_setting.id,
|
123
145
|
messages=messages,
|
124
146
|
stream=self.stream,
|
@@ -206,6 +228,27 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
206
228
|
**kwargs,
|
207
229
|
)
|
208
230
|
|
231
|
+
@cached_property
|
232
|
+
def raw_client(self):
|
233
|
+
if self.random_endpoint:
|
234
|
+
self.random_endpoint = True
|
235
|
+
self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
236
|
+
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
237
|
+
|
238
|
+
if self.endpoint.is_azure:
|
239
|
+
return AsyncAzureOpenAI(
|
240
|
+
azure_endpoint=self.endpoint.api_base,
|
241
|
+
api_key=self.endpoint.api_key,
|
242
|
+
api_version="2024-08-01-preview",
|
243
|
+
http_client=self.http_client,
|
244
|
+
)
|
245
|
+
else:
|
246
|
+
return AsyncOpenAI(
|
247
|
+
api_key=self.endpoint.api_key,
|
248
|
+
base_url=self.endpoint.api_base,
|
249
|
+
http_client=self.http_client,
|
250
|
+
)
|
251
|
+
|
209
252
|
async def create_completion(
|
210
253
|
self,
|
211
254
|
messages: list = list,
|
@@ -226,24 +269,24 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
226
269
|
|
227
270
|
self.model_setting = self.backend_settings.models[self.model]
|
228
271
|
|
229
|
-
if self.random_endpoint:
|
230
|
-
|
231
|
-
|
232
|
-
|
272
|
+
# if self.random_endpoint:
|
273
|
+
# self.random_endpoint = True
|
274
|
+
# self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
275
|
+
# self.endpoint = settings.get_endpoint(self.endpoint_id)
|
233
276
|
|
234
|
-
if self.endpoint.is_azure:
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
else:
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
277
|
+
# if self.endpoint.is_azure:
|
278
|
+
# self._client = AsyncAzureOpenAI(
|
279
|
+
# azure_endpoint=self.endpoint.api_base,
|
280
|
+
# api_key=self.endpoint.api_key,
|
281
|
+
# api_version="2024-08-01-preview",
|
282
|
+
# http_client=self.http_client,
|
283
|
+
# )
|
284
|
+
# else:
|
285
|
+
# self._client = AsyncOpenAI(
|
286
|
+
# api_key=self.endpoint.api_key,
|
287
|
+
# base_url=self.endpoint.api_base,
|
288
|
+
# http_client=self.http_client,
|
289
|
+
# )
|
247
290
|
|
248
291
|
if self.context_length_control == ContextLengthControlType.Latest:
|
249
292
|
messages = cutoff_messages(
|
@@ -276,7 +319,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
276
319
|
else:
|
277
320
|
max_tokens = self.model_setting.context_length - token_counts
|
278
321
|
|
279
|
-
response: ChatCompletion | AsyncStream[ChatCompletionChunk] = await self.
|
322
|
+
response: ChatCompletion | AsyncStream[ChatCompletionChunk] = await self.raw_client.chat.completions.create(
|
280
323
|
model=self.model_setting.id,
|
281
324
|
messages=messages,
|
282
325
|
stream=self.stream,
|
@@ -222,7 +222,7 @@ def get_message_token_counts(messages: list, tools: dict | None = None, model: s
|
|
222
222
|
# TODO: Get real image size
|
223
223
|
tokens += calculate_image_tokens(2048, 2048, model)
|
224
224
|
if tools is not None:
|
225
|
-
tokens += get_token_counts(
|
225
|
+
tokens += get_token_counts(str(tools), model)
|
226
226
|
|
227
227
|
return tokens
|
228
228
|
|
@@ -424,7 +424,7 @@ ANTHROPIC_MODELS = {
|
|
424
424
|
"context_length": 200000,
|
425
425
|
"max_output_tokens": 4096,
|
426
426
|
"function_call_available": True,
|
427
|
-
"response_format_available":
|
427
|
+
"response_format_available": False,
|
428
428
|
"native_multimodal": True,
|
429
429
|
},
|
430
430
|
"claude-3-sonnet-20240229": {
|
@@ -433,14 +433,14 @@ ANTHROPIC_MODELS = {
|
|
433
433
|
"max_output_tokens": 4096,
|
434
434
|
"function_call_available": True,
|
435
435
|
"native_multimodal": True,
|
436
|
-
"response_format_available":
|
436
|
+
"response_format_available": False,
|
437
437
|
},
|
438
438
|
"claude-3-haiku-20240307": {
|
439
439
|
"id": "claude-3-haiku-20240307",
|
440
440
|
"context_length": 200000,
|
441
441
|
"max_output_tokens": 4096,
|
442
442
|
"function_call_available": True,
|
443
|
-
"response_format_available":
|
443
|
+
"response_format_available": False,
|
444
444
|
"native_multimodal": True,
|
445
445
|
},
|
446
446
|
"claude-3-5-sonnet-20240620": {
|
@@ -448,7 +448,7 @@ ANTHROPIC_MODELS = {
|
|
448
448
|
"context_length": 200000,
|
449
449
|
"max_output_tokens": 4096,
|
450
450
|
"function_call_available": True,
|
451
|
-
"response_format_available":
|
451
|
+
"response_format_available": False,
|
452
452
|
"native_multimodal": True,
|
453
453
|
},
|
454
454
|
}
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|