vectorvein 0.1.36__tar.gz → 0.1.38__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {vectorvein-0.1.36 → vectorvein-0.1.38}/PKG-INFO +1 -1
  2. {vectorvein-0.1.36 → vectorvein-0.1.38}/pyproject.toml +1 -1
  3. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/anthropic_client.py +88 -42
  4. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/openai_compatible_client.py +34 -15
  5. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/types/llm_parameters.py +9 -2
  6. {vectorvein-0.1.36 → vectorvein-0.1.38}/README.md +0 -0
  7. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/__init__.py +0 -0
  8. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/__init__.py +0 -0
  9. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  10. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/base_client.py +0 -0
  11. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  12. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  13. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/groq_client.py +0 -0
  14. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/local_client.py +0 -0
  15. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  16. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  17. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  18. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/openai_client.py +0 -0
  19. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/py.typed +0 -0
  20. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  21. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  22. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/utils.py +0 -0
  23. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/yi_client.py +0 -0
  24. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  25. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/py.typed +0 -0
  26. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/settings/__init__.py +0 -0
  27. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/settings/py.typed +0 -0
  28. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/types/defaults.py +0 -0
  29. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/types/enums.py +0 -0
  30. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/types/exception.py +0 -0
  31. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/types/py.typed +0 -0
  32. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/utilities/media_processing.py +0 -0
  33. {vectorvein-0.1.36 → vectorvein-0.1.38}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.36
3
+ Version: 0.1.38
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.36"
19
+ version = "0.1.38"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -135,12 +135,19 @@ class AnthropicChatClient(BaseChatClient):
135
135
  http_client,
136
136
  backend_name,
137
137
  )
138
+ self.model_id = None
138
139
 
139
140
  @cached_property
140
141
  def raw_client(self):
141
142
  if self.random_endpoint:
142
143
  self.random_endpoint = True
143
- self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
144
+ endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
145
+ self.model_id = None
146
+ if isinstance(endpoint, dict):
147
+ self.endpoint_id = endpoint["endpoint_id"]
148
+ self.model_id = endpoint["model_id"]
149
+ else:
150
+ self.endpoint_id = endpoint
144
151
  self.endpoint = settings.get_endpoint(self.endpoint_id)
145
152
 
146
153
  if self.endpoint.is_vertex:
@@ -265,23 +272,35 @@ class AnthropicChatClient(BaseChatClient):
265
272
 
266
273
  if self.random_endpoint:
267
274
  self.random_endpoint = True
268
- self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
275
+ endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
276
+ self.model_id = None
277
+ if isinstance(endpoint, dict):
278
+ self.endpoint_id = endpoint["endpoint_id"]
279
+ self.model_id = endpoint["model_id"]
280
+ else:
281
+ self.endpoint_id = endpoint
269
282
  self.endpoint = settings.get_endpoint(self.endpoint_id)
270
283
 
271
284
  if self.endpoint.api_schema_type == "openai":
272
285
  if self.stream:
273
- return OpenAICompatibleChatClient(
274
- model=self.model,
275
- stream=True,
276
- temperature=self.temperature,
277
- context_length_control=self.context_length_control,
278
- random_endpoint=self.random_endpoint,
279
- endpoint_id=self.endpoint_id,
280
- http_client=self.http_client,
281
- backend_name=self.BACKEND_NAME,
282
- ).create_completion(
283
- messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
284
- )
286
+
287
+ def _generator():
288
+ response = OpenAICompatibleChatClient(
289
+ model=self.model,
290
+ stream=True,
291
+ temperature=self.temperature,
292
+ context_length_control=self.context_length_control,
293
+ random_endpoint=self.random_endpoint,
294
+ endpoint_id=self.endpoint_id,
295
+ http_client=self.http_client,
296
+ backend_name=self.BACKEND_NAME,
297
+ ).create_completion(
298
+ messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
299
+ )
300
+ for chunk in response:
301
+ yield chunk
302
+
303
+ return _generator()
285
304
  else:
286
305
  return OpenAICompatibleChatClient(
287
306
  model=self.model,
@@ -298,7 +317,10 @@ class AnthropicChatClient(BaseChatClient):
298
317
 
299
318
  assert isinstance(self.raw_client, Anthropic | AnthropicVertex)
300
319
 
320
+ raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
301
321
  self.model_setting = self.backend_settings.models[self.model]
322
+ if self.model_id is None:
323
+ self.model_id = self.model_setting.id
302
324
 
303
325
  if messages[0].get("role") == "system":
304
326
  system_prompt: str = messages[0]["content"]
@@ -311,7 +333,7 @@ class AnthropicChatClient(BaseChatClient):
311
333
  messages,
312
334
  max_count=self.model_setting.context_length,
313
335
  backend=self.BACKEND_NAME,
314
- model=self.model_setting.id,
336
+ model=self.model,
315
337
  )
316
338
 
317
339
  messages = format_messages_alternate(messages)
@@ -323,7 +345,7 @@ class AnthropicChatClient(BaseChatClient):
323
345
 
324
346
  if max_tokens is None:
325
347
  max_output_tokens = self.model_setting.max_output_tokens
326
- token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.model_setting.id)
348
+ token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.model)
327
349
  if max_output_tokens is not None:
328
350
  max_tokens = self.model_setting.context_length - token_counts
329
351
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -331,8 +353,8 @@ class AnthropicChatClient(BaseChatClient):
331
353
  max_tokens = self.model_setting.context_length - token_counts
332
354
 
333
355
  if self.stream:
334
- stream_response = self.raw_client.messages.create(
335
- model=self.model_setting.id,
356
+ stream_response = raw_client.messages.create(
357
+ model=self.model_id,
336
358
  messages=messages,
337
359
  system=system_prompt,
338
360
  stream=True,
@@ -393,8 +415,8 @@ class AnthropicChatClient(BaseChatClient):
393
415
 
394
416
  return generator()
395
417
  else:
396
- response = self.raw_client.messages.create(
397
- model=self.model_setting.id,
418
+ response = raw_client.messages.create(
419
+ model=self.model_id,
398
420
  messages=messages,
399
421
  system=system_prompt,
400
422
  stream=False,
@@ -451,12 +473,19 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
451
473
  http_client,
452
474
  backend_name,
453
475
  )
476
+ self.model_id = None
454
477
 
455
478
  @cached_property
456
479
  def raw_client(self):
457
480
  if self.random_endpoint:
458
481
  self.random_endpoint = True
459
- self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
482
+ endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
483
+ self.model_id = None
484
+ if isinstance(endpoint, dict):
485
+ self.endpoint_id = endpoint["endpoint_id"]
486
+ self.model_id = endpoint["model_id"]
487
+ else:
488
+ self.endpoint_id = endpoint
460
489
  self.endpoint = settings.get_endpoint(self.endpoint_id)
461
490
 
462
491
  if self.endpoint.is_vertex:
@@ -581,25 +610,38 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
581
610
 
582
611
  if self.random_endpoint:
583
612
  self.random_endpoint = True
584
- self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
613
+ endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
614
+ self.model_id = None
615
+ if isinstance(endpoint, dict):
616
+ self.endpoint_id = endpoint["endpoint_id"]
617
+ self.model_id = endpoint["model_id"]
618
+ else:
619
+ self.endpoint_id = endpoint
585
620
  self.endpoint = settings.get_endpoint(self.endpoint_id)
586
621
 
587
622
  if self.endpoint.api_schema_type == "openai":
588
623
  if self.stream:
589
- return AsyncOpenAICompatibleChatClient(
590
- model=self.model,
591
- stream=True,
592
- temperature=self.temperature,
593
- context_length_control=self.context_length_control,
594
- random_endpoint=self.random_endpoint,
595
- endpoint_id=self.endpoint_id,
596
- http_client=self.http_client,
597
- backend_name=self.BACKEND_NAME,
598
- ).create_completion(
599
- messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
600
- )
624
+
625
+ async def _generator():
626
+ client = AsyncOpenAICompatibleChatClient(
627
+ model=self.model,
628
+ stream=True,
629
+ temperature=self.temperature,
630
+ context_length_control=self.context_length_control,
631
+ random_endpoint=self.random_endpoint,
632
+ endpoint_id=self.endpoint_id,
633
+ http_client=self.http_client,
634
+ backend_name=self.BACKEND_NAME,
635
+ )
636
+ response = await client.create_completion(
637
+ messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
638
+ )
639
+ async for chunk in response:
640
+ yield chunk
641
+
642
+ return _generator()
601
643
  else:
602
- return AsyncOpenAICompatibleChatClient(
644
+ client = AsyncOpenAICompatibleChatClient(
603
645
  model=self.model,
604
646
  stream=False,
605
647
  temperature=self.temperature,
@@ -608,13 +650,17 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
608
650
  endpoint_id=self.endpoint_id,
609
651
  http_client=self.http_client,
610
652
  backend_name=self.BACKEND_NAME,
611
- ).create_completion(
653
+ )
654
+ return await client.create_completion(
612
655
  messages, model, False, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
613
656
  )
614
657
 
615
658
  assert isinstance(self.raw_client, AsyncAnthropic | AsyncAnthropicVertex)
616
659
 
660
+ raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
617
661
  self.model_setting = self.backend_settings.models[self.model]
662
+ if self.model_id is None:
663
+ self.model_id = self.model_setting.id
618
664
 
619
665
  if messages[0].get("role") == "system":
620
666
  system_prompt = messages[0]["content"]
@@ -627,7 +673,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
627
673
  messages,
628
674
  max_count=self.model_setting.context_length,
629
675
  backend=self.BACKEND_NAME,
630
- model=self.model_setting.id,
676
+ model=self.model,
631
677
  )
632
678
 
633
679
  messages = format_messages_alternate(messages)
@@ -639,7 +685,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
639
685
 
640
686
  if max_tokens is None:
641
687
  max_output_tokens = self.model_setting.max_output_tokens
642
- token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.model_setting.id)
688
+ token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.model)
643
689
  if max_output_tokens is not None:
644
690
  max_tokens = self.model_setting.context_length - token_counts
645
691
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -647,8 +693,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
647
693
  max_tokens = self.model_setting.context_length - token_counts
648
694
 
649
695
  if self.stream:
650
- stream_response = await self.raw_client.messages.create(
651
- model=self.model_setting.id,
696
+ stream_response = await raw_client.messages.create(
697
+ model=self.model_id,
652
698
  messages=messages,
653
699
  system=system_prompt,
654
700
  stream=True,
@@ -709,8 +755,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
709
755
 
710
756
  return generator()
711
757
  else:
712
- response = await self.raw_client.messages.create(
713
- model=self.model_setting.id,
758
+ response = await raw_client.messages.create(
759
+ model=self.model_id,
714
760
  messages=messages,
715
761
  system=system_prompt,
716
762
  stream=False,
@@ -25,7 +25,6 @@ from ..types.llm_parameters import (
25
25
  NOT_GIVEN,
26
26
  ToolParam,
27
27
  ToolChoice,
28
- BackendSettings,
29
28
  ChatCompletionMessage,
30
29
  ChatCompletionDeltaMessage,
31
30
  )
@@ -56,12 +55,19 @@ class OpenAICompatibleChatClient(BaseChatClient):
56
55
  http_client,
57
56
  backend_name,
58
57
  )
58
+ self.model_id = None
59
59
 
60
60
  @cached_property
61
61
  def raw_client(self) -> OpenAI | AzureOpenAI:
62
62
  if self.random_endpoint:
63
63
  self.random_endpoint = True
64
- self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
64
+ endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
65
+ self.model_id = None
66
+ if isinstance(endpoint, dict):
67
+ self.endpoint_id = endpoint["endpoint_id"]
68
+ self.model_id = endpoint["model_id"]
69
+ else:
70
+ self.endpoint_id = endpoint
65
71
  self.endpoint = settings.get_endpoint(self.endpoint_id)
66
72
 
67
73
  if self.endpoint.is_azure:
@@ -142,14 +148,17 @@ class OpenAICompatibleChatClient(BaseChatClient):
142
148
  if temperature is not None:
143
149
  self.temperature = temperature
144
150
 
151
+ raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
145
152
  self.model_setting = self.backend_settings.models[self.model]
153
+ if self.model_id is None:
154
+ self.model_id = self.model_setting.id
146
155
 
147
156
  if self.context_length_control == ContextLengthControlType.Latest:
148
157
  messages = cutoff_messages(
149
158
  messages,
150
159
  max_count=self.model_setting.context_length,
151
160
  backend=self.BACKEND_NAME,
152
- model=self.model_setting.id,
161
+ model=self.model,
153
162
  )
154
163
 
155
164
  if tools:
@@ -168,7 +177,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
168
177
 
169
178
  if max_tokens is None:
170
179
  max_output_tokens = self.model_setting.max_output_tokens
171
- token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model_setting.id)
180
+ token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model)
172
181
  if max_output_tokens is not None:
173
182
  max_tokens = self.model_setting.context_length - token_counts - 64
174
183
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -181,8 +190,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
181
190
  self.response_format = {}
182
191
 
183
192
  if self.stream:
184
- stream_response: Stream[ChatCompletionChunk] = self.raw_client.chat.completions.create(
185
- model=self.model_setting.id,
193
+ stream_response: Stream[ChatCompletionChunk] = raw_client.chat.completions.create(
194
+ model=self.model_id,
186
195
  messages=messages,
187
196
  stream=True,
188
197
  temperature=self.temperature,
@@ -222,8 +231,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
222
231
 
223
232
  return generator()
224
233
  else:
225
- response: ChatCompletion = self.raw_client.chat.completions.create(
226
- model=self.model_setting.id,
234
+ response: ChatCompletion = raw_client.chat.completions.create(
235
+ model=self.model_id,
227
236
  messages=messages,
228
237
  stream=False,
229
238
  temperature=self.temperature,
@@ -278,12 +287,19 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
278
287
  http_client,
279
288
  backend_name,
280
289
  )
290
+ self.model_id = None
281
291
 
282
292
  @cached_property
283
293
  def raw_client(self):
284
294
  if self.random_endpoint:
285
295
  self.random_endpoint = True
286
- self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
296
+ endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
297
+ self.model_id = None
298
+ if isinstance(endpoint, dict):
299
+ self.endpoint_id = endpoint["endpoint_id"]
300
+ self.model_id = endpoint["model_id"]
301
+ else:
302
+ self.endpoint_id = endpoint
287
303
  self.endpoint = settings.get_endpoint(self.endpoint_id)
288
304
 
289
305
  if self.endpoint.is_azure:
@@ -364,14 +380,17 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
364
380
  if temperature is not None:
365
381
  self.temperature = temperature
366
382
 
383
+ raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
367
384
  self.model_setting = self.backend_settings.models[self.model]
385
+ if self.model_id is None:
386
+ self.model_id = self.model_setting.id
368
387
 
369
388
  if self.context_length_control == ContextLengthControlType.Latest:
370
389
  messages = cutoff_messages(
371
390
  messages,
372
391
  max_count=self.model_setting.context_length,
373
392
  backend=self.BACKEND_NAME,
374
- model=self.model_setting.id,
393
+ model=self.model,
375
394
  )
376
395
 
377
396
  if tools:
@@ -395,7 +414,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
395
414
 
396
415
  if max_tokens is None:
397
416
  max_output_tokens = self.model_setting.max_output_tokens
398
- token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model_setting.id)
417
+ token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model)
399
418
  if max_output_tokens is not None:
400
419
  max_tokens = self.model_setting.context_length - token_counts - 64
401
420
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -403,8 +422,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
403
422
  max_tokens = self.model_setting.context_length - token_counts - 64
404
423
 
405
424
  if self.stream:
406
- stream_response: AsyncStream[ChatCompletionChunk] = await self.raw_client.chat.completions.create(
407
- model=self.model_setting.id,
425
+ stream_response: AsyncStream[ChatCompletionChunk] = await raw_client.chat.completions.create(
426
+ model=self.model_id,
408
427
  messages=messages,
409
428
  stream=self.stream,
410
429
  temperature=self.temperature,
@@ -444,8 +463,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
444
463
 
445
464
  return generator()
446
465
  else:
447
- response: ChatCompletion = await self.raw_client.chat.completions.create(
448
- model=self.model_setting.id,
466
+ response: ChatCompletion = await raw_client.chat.completions.create(
467
+ model=self.model_id,
449
468
  messages=messages,
450
469
  stream=self.stream,
451
470
  temperature=self.temperature,
@@ -1,6 +1,6 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-26 23:48:04
3
- from typing import List, Dict, Optional, Union, Iterable
3
+ from typing import List, Dict, Optional, Union, Iterable, TypedDict
4
4
 
5
5
  from pydantic import BaseModel, Field
6
6
 
@@ -18,6 +18,11 @@ from openai.types.chat.chat_completion_tool_choice_option_param import ChatCompl
18
18
  from . import defaults as defs
19
19
 
20
20
 
21
+ class EndpointOptionDict(TypedDict):
22
+ endpoint_id: str
23
+ model_id: str
24
+
25
+
21
26
  class EndpointSetting(BaseModel):
22
27
  id: str = Field(..., description="The id of the endpoint.")
23
28
  region: Optional[str] = Field(None, description="The region for the endpoint.")
@@ -40,7 +45,9 @@ class EndpointSetting(BaseModel):
40
45
 
41
46
  class ModelSetting(BaseModel):
42
47
  id: str = Field(..., description="The id of the model.")
43
- endpoints: List[str] = Field(default_factory=list, description="Available endpoints for the model.")
48
+ endpoints: List[Union[str, EndpointOptionDict]] = Field(
49
+ default_factory=list, description="Available endpoints for the model."
50
+ )
44
51
  function_call_available: bool = Field(False, description="Indicates if function call is available.")
45
52
  response_format_available: bool = Field(False, description="Indicates if response format is available.")
46
53
  native_multimodal: bool = Field(False, description="Indicates if the model is a native multimodal model.")
File without changes