vectorvein 0.1.26__tar.gz → 0.1.28__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.26 → vectorvein-0.1.28}/PKG-INFO +1 -1
- {vectorvein-0.1.26 → vectorvein-0.1.28}/pyproject.toml +1 -1
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/__init__.py +9 -10
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/base_client.py +32 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/gemini_client.py +30 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/minimax_client.py +30 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/openai_compatible_client.py +34 -4
- vectorvein-0.1.28/src/vectorvein/chat_clients/stepfun_client.py +15 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/utils.py +25 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/settings/__init__.py +3 -1
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/types/defaults.py +95 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/types/enums.py +3 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/README.md +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/utilities/retry.py +0 -0
@@ -13,6 +13,7 @@ from .openai_client import OpenAIChatClient, AsyncOpenAIChatClient
|
|
13
13
|
from .zhipuai_client import ZhiPuAIChatClient, AsyncZhiPuAIChatClient
|
14
14
|
from .minimax_client import MiniMaxChatClient, AsyncMiniMaxChatClient
|
15
15
|
from .mistral_client import MistralChatClient, AsyncMistralChatClient
|
16
|
+
from .stepfun_client import StepFunChatClient, AsyncStepFunChatClient
|
16
17
|
from .baichuan_client import BaichuanChatClient, AsyncBaichuanChatClient
|
17
18
|
from .moonshot_client import MoonshotChatClient, AsyncMoonshotChatClient
|
18
19
|
from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
|
@@ -38,6 +39,7 @@ BackendMap = {
|
|
38
39
|
BackendType.Yi: YiChatClient,
|
39
40
|
BackendType.ZhiPuAI: ZhiPuAIChatClient,
|
40
41
|
BackendType.Baichuan: BaichuanChatClient,
|
42
|
+
BackendType.StepFun: StepFunChatClient,
|
41
43
|
},
|
42
44
|
"async": {
|
43
45
|
BackendType.Anthropic: AsyncAnthropicChatClient,
|
@@ -53,6 +55,7 @@ BackendMap = {
|
|
53
55
|
BackendType.Yi: AsyncYiChatClient,
|
54
56
|
BackendType.ZhiPuAI: AsyncZhiPuAIChatClient,
|
55
57
|
BackendType.Baichuan: AsyncBaichuanChatClient,
|
58
|
+
BackendType.StepFun: AsyncStepFunChatClient,
|
56
59
|
},
|
57
60
|
}
|
58
61
|
|
@@ -68,15 +71,13 @@ def create_chat_client(
|
|
68
71
|
http_client: httpx.Client | None = None,
|
69
72
|
**kwargs,
|
70
73
|
) -> BaseChatClient:
|
71
|
-
if backend
|
74
|
+
if backend not in BackendMap["sync"]:
|
72
75
|
raise ValueError(f"Unsupported backend: {backend}")
|
73
|
-
else:
|
74
|
-
backend_key = backend.lower()
|
75
76
|
|
76
|
-
ClientClass = BackendMap["sync"][
|
77
|
+
ClientClass = BackendMap["sync"][backend]
|
77
78
|
if model is None:
|
78
79
|
model = ClientClass.DEFAULT_MODEL
|
79
|
-
return BackendMap["sync"][
|
80
|
+
return BackendMap["sync"][backend](
|
80
81
|
model=model,
|
81
82
|
stream=stream,
|
82
83
|
temperature=temperature,
|
@@ -99,15 +100,13 @@ def create_async_chat_client(
|
|
99
100
|
http_client: httpx.AsyncClient | None = None,
|
100
101
|
**kwargs,
|
101
102
|
) -> BaseAsyncChatClient:
|
102
|
-
if backend
|
103
|
+
if backend not in BackendMap["async"]:
|
103
104
|
raise ValueError(f"Unsupported backend: {backend}")
|
104
|
-
else:
|
105
|
-
backend_key = backend.lower()
|
106
105
|
|
107
|
-
ClientClass = BackendMap["async"][
|
106
|
+
ClientClass = BackendMap["async"][backend]
|
108
107
|
if model is None:
|
109
108
|
model = ClientClass.DEFAULT_MODEL
|
110
|
-
return BackendMap["async"][
|
109
|
+
return BackendMap["async"][backend](
|
111
110
|
model=model,
|
112
111
|
stream=stream,
|
113
112
|
temperature=temperature,
|
@@ -88,6 +88,22 @@ class BaseChatClient(ABC):
|
|
88
88
|
) -> Generator[ChatCompletionDeltaMessage, Any, None]:
|
89
89
|
pass
|
90
90
|
|
91
|
+
@overload
|
92
|
+
@abstractmethod
|
93
|
+
def create_completion(
|
94
|
+
self,
|
95
|
+
messages: list,
|
96
|
+
model: str | None = None,
|
97
|
+
stream: bool = False,
|
98
|
+
temperature: float = 0.7,
|
99
|
+
max_tokens: int | None = None,
|
100
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
101
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
102
|
+
response_format: dict | None = None,
|
103
|
+
**kwargs,
|
104
|
+
) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
|
105
|
+
pass
|
106
|
+
|
91
107
|
@abstractmethod
|
92
108
|
def create_completion(
|
93
109
|
self,
|
@@ -196,6 +212,22 @@ class BaseAsyncChatClient(ABC):
|
|
196
212
|
) -> AsyncGenerator[ChatCompletionDeltaMessage, None]:
|
197
213
|
pass
|
198
214
|
|
215
|
+
@overload
|
216
|
+
@abstractmethod
|
217
|
+
async def create_completion(
|
218
|
+
self,
|
219
|
+
messages: list,
|
220
|
+
model: str | None = None,
|
221
|
+
stream: bool = False,
|
222
|
+
temperature: float = 0.7,
|
223
|
+
max_tokens: int | None = None,
|
224
|
+
tools: list | NotGiven = NOT_GIVEN,
|
225
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
226
|
+
response_format: dict | None = None,
|
227
|
+
**kwargs,
|
228
|
+
) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, None]:
|
229
|
+
pass
|
230
|
+
|
199
231
|
@abstractmethod
|
200
232
|
async def create_completion(
|
201
233
|
self,
|
@@ -82,6 +82,21 @@ class GeminiChatClient(BaseChatClient):
|
|
82
82
|
) -> Generator[ChatCompletionDeltaMessage, None, None]:
|
83
83
|
pass
|
84
84
|
|
85
|
+
@overload
|
86
|
+
def create_completion(
|
87
|
+
self,
|
88
|
+
messages: list,
|
89
|
+
model: str | None = None,
|
90
|
+
stream: bool | None = None,
|
91
|
+
temperature: float | None = None,
|
92
|
+
max_tokens: int | None = None,
|
93
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
94
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
95
|
+
response_format: dict | None = None,
|
96
|
+
**kwargs,
|
97
|
+
) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
|
98
|
+
pass
|
99
|
+
|
85
100
|
def create_completion(
|
86
101
|
self,
|
87
102
|
messages: list,
|
@@ -295,6 +310,21 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
295
310
|
) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
|
296
311
|
pass
|
297
312
|
|
313
|
+
@overload
|
314
|
+
async def create_completion(
|
315
|
+
self,
|
316
|
+
messages: list,
|
317
|
+
model: str | None = None,
|
318
|
+
stream: bool | None = None,
|
319
|
+
temperature: float | None = None,
|
320
|
+
max_tokens: int | None = None,
|
321
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
322
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
323
|
+
response_format: dict | None = None,
|
324
|
+
**kwargs,
|
325
|
+
) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
|
326
|
+
pass
|
327
|
+
|
298
328
|
async def create_completion(
|
299
329
|
self,
|
300
330
|
messages: list,
|
@@ -107,6 +107,21 @@ class MiniMaxChatClient(BaseChatClient):
|
|
107
107
|
) -> Generator[ChatCompletionDeltaMessage, None, None]:
|
108
108
|
pass
|
109
109
|
|
110
|
+
@overload
|
111
|
+
def create_completion(
|
112
|
+
self,
|
113
|
+
messages: list,
|
114
|
+
model: str | None = None,
|
115
|
+
stream: bool | None = None,
|
116
|
+
temperature: float | None = None,
|
117
|
+
max_tokens: int | None = None,
|
118
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
119
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
120
|
+
response_format: dict | None = None,
|
121
|
+
**kwargs,
|
122
|
+
) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
|
123
|
+
pass
|
124
|
+
|
110
125
|
def create_completion(
|
111
126
|
self,
|
112
127
|
messages: list,
|
@@ -307,6 +322,21 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
|
|
307
322
|
) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
|
308
323
|
pass
|
309
324
|
|
325
|
+
@overload
|
326
|
+
async def create_completion(
|
327
|
+
self,
|
328
|
+
messages: list,
|
329
|
+
model: str | None = None,
|
330
|
+
stream: bool | None = None,
|
331
|
+
temperature: float | None = None,
|
332
|
+
max_tokens: int | None = None,
|
333
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
334
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
335
|
+
response_format: dict | None = None,
|
336
|
+
**kwargs,
|
337
|
+
) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
|
338
|
+
pass
|
339
|
+
|
310
340
|
async def create_completion(
|
311
341
|
self,
|
312
342
|
messages: list,
|
{vectorvein-0.1.26 → vectorvein-0.1.28}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
@@ -107,6 +107,21 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
107
107
|
) -> Generator[ChatCompletionDeltaMessage, None, None]:
|
108
108
|
pass
|
109
109
|
|
110
|
+
@overload
|
111
|
+
def create_completion(
|
112
|
+
self,
|
113
|
+
messages: list,
|
114
|
+
model: str | None = None,
|
115
|
+
stream: bool | None = None,
|
116
|
+
temperature: float | None = None,
|
117
|
+
max_tokens: int | None = None,
|
118
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
119
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
120
|
+
response_format: dict | None = None,
|
121
|
+
**kwargs,
|
122
|
+
) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
|
123
|
+
pass
|
124
|
+
|
110
125
|
def create_completion(
|
111
126
|
self,
|
112
127
|
messages: list,
|
@@ -154,10 +169,10 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
154
169
|
max_output_tokens = self.model_setting.max_output_tokens
|
155
170
|
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model_setting.id)
|
156
171
|
if max_output_tokens is not None:
|
157
|
-
max_tokens = self.model_setting.context_length - token_counts
|
172
|
+
max_tokens = self.model_setting.context_length - token_counts - 64
|
158
173
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
159
174
|
else:
|
160
|
-
max_tokens = self.model_setting.context_length - token_counts
|
175
|
+
max_tokens = self.model_setting.context_length - token_counts - 64
|
161
176
|
|
162
177
|
if response_format and self.model_setting.response_format_available:
|
163
178
|
self.response_format = {"response_format": response_format}
|
@@ -314,6 +329,21 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
314
329
|
) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
|
315
330
|
pass
|
316
331
|
|
332
|
+
@overload
|
333
|
+
async def create_completion(
|
334
|
+
self,
|
335
|
+
messages: list,
|
336
|
+
model: str | None = None,
|
337
|
+
stream: bool | None = None,
|
338
|
+
temperature: float | None = None,
|
339
|
+
max_tokens: int | None = None,
|
340
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
341
|
+
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
342
|
+
response_format: dict | None = None,
|
343
|
+
**kwargs,
|
344
|
+
) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
|
345
|
+
pass
|
346
|
+
|
317
347
|
async def create_completion(
|
318
348
|
self,
|
319
349
|
messages: list,
|
@@ -366,10 +396,10 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
366
396
|
max_output_tokens = self.model_setting.max_output_tokens
|
367
397
|
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model_setting.id)
|
368
398
|
if max_output_tokens is not None:
|
369
|
-
max_tokens = self.model_setting.context_length - token_counts
|
399
|
+
max_tokens = self.model_setting.context_length - token_counts - 64
|
370
400
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
371
401
|
else:
|
372
|
-
max_tokens = self.model_setting.context_length - token_counts
|
402
|
+
max_tokens = self.model_setting.context_length - token_counts - 64
|
373
403
|
|
374
404
|
if self.stream:
|
375
405
|
stream_response: AsyncStream[ChatCompletionChunk] = await self.raw_client.chat.completions.create(
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# @Author: Bi Ying
|
2
|
+
# @Date: 2024-07-26 14:48:55
|
3
|
+
from ..types.enums import BackendType
|
4
|
+
from ..types.defaults import STEPFUN_DEFAULT_MODEL
|
5
|
+
from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
|
6
|
+
|
7
|
+
|
8
|
+
class StepFunChatClient(OpenAICompatibleChatClient):
|
9
|
+
DEFAULT_MODEL = STEPFUN_DEFAULT_MODEL
|
10
|
+
BACKEND_NAME = BackendType.StepFun
|
11
|
+
|
12
|
+
|
13
|
+
class AsyncStepFunChatClient(AsyncOpenAICompatibleChatClient):
|
14
|
+
DEFAULT_MODEL = STEPFUN_DEFAULT_MODEL
|
15
|
+
BACKEND_NAME = BackendType.StepFun
|
@@ -195,6 +195,31 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
|
|
195
195
|
return len(deepseek_tokenizer.encode(text))
|
196
196
|
elif model.startswith("qwen"):
|
197
197
|
return len(qwen_tokenizer.encode(text))
|
198
|
+
elif model.startswith("stepfun"):
|
199
|
+
model_setting = settings.moonshot.models[model]
|
200
|
+
if len(model_setting.endpoints) == 0:
|
201
|
+
return len(chatgpt_encoding.encode(text))
|
202
|
+
endpoint_id = model_setting.endpoints[0]
|
203
|
+
endpoint = settings.get_endpoint(endpoint_id)
|
204
|
+
tokenize_url = "https://api.stepfun.com/v1/token/count"
|
205
|
+
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {endpoint.api_key}"}
|
206
|
+
request_body = {
|
207
|
+
"model": model,
|
208
|
+
"messages": [
|
209
|
+
{"role": "user", "content": text},
|
210
|
+
],
|
211
|
+
}
|
212
|
+
_, response = (
|
213
|
+
Retry(httpx.post)
|
214
|
+
.args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
|
215
|
+
.retry_times(5)
|
216
|
+
.sleep_time(10)
|
217
|
+
.run()
|
218
|
+
)
|
219
|
+
if response is None:
|
220
|
+
return 1000
|
221
|
+
result = response.json()
|
222
|
+
return result["data"]["total_tokens"]
|
198
223
|
else:
|
199
224
|
return len(chatgpt_encoding.encode(text))
|
200
225
|
|
@@ -27,6 +27,7 @@ class Settings(BaseModel):
|
|
27
27
|
yi: BackendSettings = Field(default_factory=BackendSettings, description="Yi models settings.")
|
28
28
|
zhipuai: BackendSettings = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
|
29
29
|
baichuan: BackendSettings = Field(default_factory=BackendSettings, description="Baichuan models settings.")
|
30
|
+
stepfun: BackendSettings = Field(default_factory=BackendSettings, description="StepFun models settings.")
|
30
31
|
|
31
32
|
def __init__(self, **data):
|
32
33
|
model_types = {
|
@@ -43,6 +44,7 @@ class Settings(BaseModel):
|
|
43
44
|
"yi": defs.YI_MODELS,
|
44
45
|
"zhipuai": defs.ZHIPUAI_MODELS,
|
45
46
|
"baichuan": defs.BAICHUAN_MODELS,
|
47
|
+
"stepfun": defs.STEPFUN_MODELS,
|
46
48
|
}
|
47
49
|
|
48
50
|
for model_type, default_models in model_types.items():
|
@@ -62,7 +64,7 @@ class Settings(BaseModel):
|
|
62
64
|
for endpoint in self.endpoints:
|
63
65
|
if endpoint.id == endpoint_id:
|
64
66
|
return endpoint
|
65
|
-
|
67
|
+
raise ValueError(f"Endpoint {endpoint_id} not found.")
|
66
68
|
|
67
69
|
def get_backend(self, backend: BackendType) -> BackendSettings:
|
68
70
|
return getattr(self, backend.value.lower())
|
@@ -588,3 +588,98 @@ GEMINI_MODELS = {
|
|
588
588
|
"native_multimodal": True,
|
589
589
|
},
|
590
590
|
}
|
591
|
+
|
592
|
+
# 百度文心一言 ERNIE 模型
|
593
|
+
ERNIE_DEFAULT_MODEL = "ernie-lite"
|
594
|
+
ERNIE_MODELS = {
|
595
|
+
"ernie-lite": {
|
596
|
+
"id": "ernie-lite",
|
597
|
+
"context_length": 6144,
|
598
|
+
"max_output_tokens": 2048,
|
599
|
+
"function_call_available": False,
|
600
|
+
"response_format_available": False,
|
601
|
+
},
|
602
|
+
"ernie-speed": {
|
603
|
+
"id": "ernie-speed",
|
604
|
+
"context_length": 126976,
|
605
|
+
"max_output_tokens": 4096,
|
606
|
+
"function_call_available": False,
|
607
|
+
"response_format_available": False,
|
608
|
+
},
|
609
|
+
"ernie-speed-pro-128k": {
|
610
|
+
"id": "ernie-speed-pro-128k",
|
611
|
+
"context_length": 126976,
|
612
|
+
"max_output_tokens": 4096,
|
613
|
+
"function_call_available": False,
|
614
|
+
"response_format_available": False,
|
615
|
+
},
|
616
|
+
"ernie-4.0-8k-latest": {
|
617
|
+
"id": "ernie-4.0-8k-latest",
|
618
|
+
"context_length": 5120,
|
619
|
+
"max_output_tokens": 2048,
|
620
|
+
"function_call_available": False,
|
621
|
+
"response_format_available": True,
|
622
|
+
},
|
623
|
+
"ernie-4.0-turbo-8k": {
|
624
|
+
"id": "ernie-4.0-turbo-8k",
|
625
|
+
"context_length": 5120,
|
626
|
+
"max_output_tokens": 2048,
|
627
|
+
"function_call_available": False,
|
628
|
+
"response_format_available": True,
|
629
|
+
},
|
630
|
+
}
|
631
|
+
|
632
|
+
|
633
|
+
STEPFUN_DEFAULT_MODEL = "step-1-8k"
|
634
|
+
STEPFUN_MODELS = {
|
635
|
+
"step-1-8k": {
|
636
|
+
"id": "step-1-8k",
|
637
|
+
"context_length": 8192,
|
638
|
+
"function_call_available": True,
|
639
|
+
"response_format_available": True,
|
640
|
+
},
|
641
|
+
"step-1-32k": {
|
642
|
+
"id": "step-1-32k",
|
643
|
+
"context_length": 32000,
|
644
|
+
"function_call_available": True,
|
645
|
+
"response_format_available": True,
|
646
|
+
},
|
647
|
+
"step-1-128k": {
|
648
|
+
"id": "step-1-128k",
|
649
|
+
"context_length": 128000,
|
650
|
+
"function_call_available": True,
|
651
|
+
"response_format_available": True,
|
652
|
+
},
|
653
|
+
"step-1-256k": {
|
654
|
+
"id": "step-1-256k",
|
655
|
+
"context_length": 256000,
|
656
|
+
"function_call_available": True,
|
657
|
+
"response_format_available": True,
|
658
|
+
},
|
659
|
+
"step-2-16k": {
|
660
|
+
"id": "step-2-16k",
|
661
|
+
"context_length": 16384,
|
662
|
+
"function_call_available": True,
|
663
|
+
"response_format_available": True,
|
664
|
+
},
|
665
|
+
"step-1-flash": {
|
666
|
+
"id": "step-1-flash",
|
667
|
+
"context_length": 8192,
|
668
|
+
"function_call_available": True,
|
669
|
+
"response_format_available": True,
|
670
|
+
},
|
671
|
+
"step-1v-8k": {
|
672
|
+
"id": "step-1v-8k",
|
673
|
+
"context_length": 8192,
|
674
|
+
"function_call_available": False,
|
675
|
+
"response_format_available": False,
|
676
|
+
"native_multimodal": True,
|
677
|
+
},
|
678
|
+
"step-1v-32k": {
|
679
|
+
"id": "step-1v-32k",
|
680
|
+
"context_length": 32768,
|
681
|
+
"function_call_available": False,
|
682
|
+
"response_format_available": False,
|
683
|
+
"native_multimodal": True,
|
684
|
+
},
|
685
|
+
}
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|