hammad-python 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ham/__init__.py +10 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
- hammad_python-0.0.31.dist-info/RECORD +6 -0
- hammad/__init__.py +0 -84
- hammad/_internal.py +0 -256
- hammad/_main.py +0 -226
- hammad/cache/__init__.py +0 -40
- hammad/cache/base_cache.py +0 -181
- hammad/cache/cache.py +0 -169
- hammad/cache/decorators.py +0 -261
- hammad/cache/file_cache.py +0 -80
- hammad/cache/ttl_cache.py +0 -74
- hammad/cli/__init__.py +0 -33
- hammad/cli/animations.py +0 -573
- hammad/cli/plugins.py +0 -867
- hammad/cli/styles/__init__.py +0 -55
- hammad/cli/styles/settings.py +0 -139
- hammad/cli/styles/types.py +0 -358
- hammad/cli/styles/utils.py +0 -634
- hammad/data/__init__.py +0 -90
- hammad/data/collections/__init__.py +0 -49
- hammad/data/collections/collection.py +0 -326
- hammad/data/collections/indexes/__init__.py +0 -37
- hammad/data/collections/indexes/qdrant/__init__.py +0 -1
- hammad/data/collections/indexes/qdrant/index.py +0 -723
- hammad/data/collections/indexes/qdrant/settings.py +0 -94
- hammad/data/collections/indexes/qdrant/utils.py +0 -210
- hammad/data/collections/indexes/tantivy/__init__.py +0 -1
- hammad/data/collections/indexes/tantivy/index.py +0 -426
- hammad/data/collections/indexes/tantivy/settings.py +0 -40
- hammad/data/collections/indexes/tantivy/utils.py +0 -176
- hammad/data/configurations/__init__.py +0 -35
- hammad/data/configurations/configuration.py +0 -564
- hammad/data/models/__init__.py +0 -50
- hammad/data/models/extensions/__init__.py +0 -4
- hammad/data/models/extensions/pydantic/__init__.py +0 -42
- hammad/data/models/extensions/pydantic/converters.py +0 -759
- hammad/data/models/fields.py +0 -546
- hammad/data/models/model.py +0 -1078
- hammad/data/models/utils.py +0 -280
- hammad/data/sql/__init__.py +0 -24
- hammad/data/sql/database.py +0 -576
- hammad/data/sql/types.py +0 -127
- hammad/data/types/__init__.py +0 -75
- hammad/data/types/file.py +0 -431
- hammad/data/types/multimodal/__init__.py +0 -36
- hammad/data/types/multimodal/audio.py +0 -200
- hammad/data/types/multimodal/image.py +0 -182
- hammad/data/types/text.py +0 -1308
- hammad/formatting/__init__.py +0 -33
- hammad/formatting/json/__init__.py +0 -27
- hammad/formatting/json/converters.py +0 -158
- hammad/formatting/text/__init__.py +0 -63
- hammad/formatting/text/converters.py +0 -723
- hammad/formatting/text/markdown.py +0 -131
- hammad/formatting/yaml/__init__.py +0 -26
- hammad/formatting/yaml/converters.py +0 -5
- hammad/genai/__init__.py +0 -217
- hammad/genai/a2a/__init__.py +0 -32
- hammad/genai/a2a/workers.py +0 -552
- hammad/genai/agents/__init__.py +0 -59
- hammad/genai/agents/agent.py +0 -1973
- hammad/genai/agents/run.py +0 -1024
- hammad/genai/agents/types/__init__.py +0 -42
- hammad/genai/agents/types/agent_context.py +0 -13
- hammad/genai/agents/types/agent_event.py +0 -128
- hammad/genai/agents/types/agent_hooks.py +0 -220
- hammad/genai/agents/types/agent_messages.py +0 -31
- hammad/genai/agents/types/agent_response.py +0 -125
- hammad/genai/agents/types/agent_stream.py +0 -327
- hammad/genai/graphs/__init__.py +0 -125
- hammad/genai/graphs/_utils.py +0 -190
- hammad/genai/graphs/base.py +0 -1828
- hammad/genai/graphs/plugins.py +0 -316
- hammad/genai/graphs/types.py +0 -638
- hammad/genai/models/__init__.py +0 -1
- hammad/genai/models/embeddings/__init__.py +0 -43
- hammad/genai/models/embeddings/model.py +0 -226
- hammad/genai/models/embeddings/run.py +0 -163
- hammad/genai/models/embeddings/types/__init__.py +0 -37
- hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
- hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
- hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
- hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
- hammad/genai/models/language/__init__.py +0 -57
- hammad/genai/models/language/model.py +0 -1098
- hammad/genai/models/language/run.py +0 -878
- hammad/genai/models/language/types/__init__.py +0 -40
- hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
- hammad/genai/models/language/types/language_model_messages.py +0 -28
- hammad/genai/models/language/types/language_model_name.py +0 -239
- hammad/genai/models/language/types/language_model_request.py +0 -127
- hammad/genai/models/language/types/language_model_response.py +0 -217
- hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
- hammad/genai/models/language/types/language_model_settings.py +0 -89
- hammad/genai/models/language/types/language_model_stream.py +0 -600
- hammad/genai/models/language/utils/__init__.py +0 -28
- hammad/genai/models/language/utils/requests.py +0 -421
- hammad/genai/models/language/utils/structured_outputs.py +0 -135
- hammad/genai/models/model_provider.py +0 -4
- hammad/genai/models/multimodal.py +0 -47
- hammad/genai/models/reranking.py +0 -26
- hammad/genai/types/__init__.py +0 -1
- hammad/genai/types/base.py +0 -215
- hammad/genai/types/history.py +0 -290
- hammad/genai/types/tools.py +0 -507
- hammad/logging/__init__.py +0 -35
- hammad/logging/decorators.py +0 -834
- hammad/logging/logger.py +0 -1018
- hammad/mcp/__init__.py +0 -53
- hammad/mcp/client/__init__.py +0 -35
- hammad/mcp/client/client.py +0 -624
- hammad/mcp/client/client_service.py +0 -400
- hammad/mcp/client/settings.py +0 -178
- hammad/mcp/servers/__init__.py +0 -26
- hammad/mcp/servers/launcher.py +0 -1161
- hammad/runtime/__init__.py +0 -32
- hammad/runtime/decorators.py +0 -142
- hammad/runtime/run.py +0 -299
- hammad/service/__init__.py +0 -49
- hammad/service/create.py +0 -527
- hammad/service/decorators.py +0 -283
- hammad/types.py +0 -288
- hammad/typing/__init__.py +0 -435
- hammad/web/__init__.py +0 -43
- hammad/web/http/__init__.py +0 -1
- hammad/web/http/client.py +0 -944
- hammad/web/models.py +0 -275
- hammad/web/openapi/__init__.py +0 -1
- hammad/web/openapi/client.py +0 -740
- hammad/web/search/__init__.py +0 -1
- hammad/web/search/client.py +0 -1023
- hammad/web/utils.py +0 -472
- hammad_python-0.0.30.dist-info/RECORD +0 -135
- {hammad → ham}/py.typed +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,217 +0,0 @@
|
|
1
|
-
"""hammad.genai.language_models.language_model_response"""
|
2
|
-
|
3
|
-
from __future__ import annotations
|
4
|
-
|
5
|
-
from typing import (
|
6
|
-
List,
|
7
|
-
TypeVar,
|
8
|
-
Optional,
|
9
|
-
Any,
|
10
|
-
Dict,
|
11
|
-
Union,
|
12
|
-
Literal,
|
13
|
-
)
|
14
|
-
|
15
|
-
from .....cache import cached
|
16
|
-
from .....typing import get_type_description
|
17
|
-
|
18
|
-
from ...model_provider import litellm
|
19
|
-
from ....types.base import BaseGenAIModelResponse
|
20
|
-
from openai.types.chat import (
|
21
|
-
ChatCompletionContentPartParam,
|
22
|
-
ChatCompletionMessageParam,
|
23
|
-
ChatCompletionMessageToolCall,
|
24
|
-
)
|
25
|
-
|
26
|
-
__all__ = [
|
27
|
-
"LanguageModelResponse",
|
28
|
-
]
|
29
|
-
|
30
|
-
|
31
|
-
T = TypeVar("T")
|
32
|
-
|
33
|
-
|
34
|
-
class LanguageModelResponse(BaseGenAIModelResponse[T]):
|
35
|
-
"""A response generated by a language model. This response is unified
|
36
|
-
to represent both standard completions as well as structured outputs."""
|
37
|
-
|
38
|
-
type: Literal["language_model"] = "language_model"
|
39
|
-
|
40
|
-
model: str
|
41
|
-
"""The model that generated this response."""
|
42
|
-
|
43
|
-
output: T
|
44
|
-
"""The 'final' or primary response content from the language model, this is
|
45
|
-
in the type requested by the user.
|
46
|
-
|
47
|
-
NOTE:
|
48
|
-
In many cases with tool calling, message content is not present, in these cases
|
49
|
-
this field will **NOT** represent tool calls, and will be returned as `None`."""
|
50
|
-
|
51
|
-
completion: Any
|
52
|
-
"""The raw Chat Completion (`litellm.ModelResponse`) object returned by the
|
53
|
-
language model."""
|
54
|
-
|
55
|
-
content: Optional[str] = None
|
56
|
-
"""The actual response content of the completion. This is the string that
|
57
|
-
was generated by the model."""
|
58
|
-
|
59
|
-
tool_calls: Optional[List["litellm.ChatCompletionMessageToolCall"]] = None
|
60
|
-
"""The tool calls that were made by the model. This is a list of tool calls
|
61
|
-
that were made by the model."""
|
62
|
-
|
63
|
-
refusal: Optional[str] = None
|
64
|
-
"""The refusal message generated by the model. This is the string that
|
65
|
-
was generated by the model when it refused to generate the completion."""
|
66
|
-
|
67
|
-
def get_content(
|
68
|
-
self, choice: int = 0
|
69
|
-
) -> Union[str, List["ChatCompletionContentPartParam"], None]:
|
70
|
-
"""The 'raw' message content generated by the language model, this
|
71
|
-
can be either a string, a list of content parts, or `None`."""
|
72
|
-
|
73
|
-
if not self.completion or not self.completion.choices:
|
74
|
-
return None
|
75
|
-
|
76
|
-
if choice >= len(self.completion.choices):
|
77
|
-
return None
|
78
|
-
|
79
|
-
return self.completion.choices[choice].message.content
|
80
|
-
|
81
|
-
def get_tool_calls(
|
82
|
-
self, *, name: Optional[str] = None, id: Optional[str] = None, choice: int = 0
|
83
|
-
) -> Optional[List["ChatCompletionMessageToolCall"]]:
|
84
|
-
"""The tool calls generated by the language model, this is a list of
|
85
|
-
`ChatCompletionMessageToolCall` objects. Optionally can be filtered
|
86
|
-
by name or ID to return specific tool calls.
|
87
|
-
|
88
|
-
NOTE: Only one of `name` or `id` can be provided."""
|
89
|
-
if not self.completion or not self.completion.choices:
|
90
|
-
return None
|
91
|
-
|
92
|
-
if choice >= len(self.completion.choices):
|
93
|
-
return None
|
94
|
-
|
95
|
-
tool_calls = self.completion.choices[choice].message.tool_calls
|
96
|
-
if not tool_calls:
|
97
|
-
return None
|
98
|
-
|
99
|
-
# Filter by name or id if provided
|
100
|
-
if name is not None and id is not None:
|
101
|
-
raise ValueError("Only one of 'name' or 'id' can be provided, not both")
|
102
|
-
|
103
|
-
if name is not None:
|
104
|
-
return [call for call in tool_calls if call.function.name == name]
|
105
|
-
|
106
|
-
if id is not None:
|
107
|
-
return [call for call in tool_calls if call.id == id]
|
108
|
-
|
109
|
-
return tool_calls
|
110
|
-
|
111
|
-
def has_tool_calls(self, name: Optional[str] = None, choice: int = 0) -> bool:
|
112
|
-
"""Checks if the response has tool calls, optionally filtered by name.
|
113
|
-
If `name` is provided, it will check if the tool call with that name
|
114
|
-
exists in the response."""
|
115
|
-
if not self.completion or not self.completion.choices:
|
116
|
-
return False
|
117
|
-
|
118
|
-
tool_calls = self.get_tool_calls(name=name, choice=choice)
|
119
|
-
return tool_calls is not None and len(tool_calls) > 0
|
120
|
-
|
121
|
-
def get_tool_call_parameters(
|
122
|
-
self, tool: Optional[str] = None, choice: int = 0
|
123
|
-
) -> Optional[Dict[str, Any]]:
|
124
|
-
"""Returns the generated parameters for a tool call within a response.
|
125
|
-
If the response has multiple tool calls, and no tool is specified,
|
126
|
-
an error will be raised.
|
127
|
-
|
128
|
-
Args:
|
129
|
-
tool: The name of the tool to get the parameters for.
|
130
|
-
choice: The choice index to get tool calls from.
|
131
|
-
|
132
|
-
Returns:
|
133
|
-
Dict[str, Any]: The generated parameters for the tool call.
|
134
|
-
"""
|
135
|
-
tool_calls = self.get_tool_calls(choice=choice)
|
136
|
-
if not tool_calls:
|
137
|
-
return None
|
138
|
-
|
139
|
-
if tool is None:
|
140
|
-
if len(tool_calls) > 1:
|
141
|
-
raise ValueError(
|
142
|
-
"Multiple tool calls found in response, and no tool specified."
|
143
|
-
)
|
144
|
-
tool = tool_calls[0].function.name
|
145
|
-
|
146
|
-
for tool_call in tool_calls:
|
147
|
-
if tool_call.function.name == tool:
|
148
|
-
import json
|
149
|
-
|
150
|
-
return json.loads(tool_call.function.arguments)
|
151
|
-
return None
|
152
|
-
|
153
|
-
def to_message(
|
154
|
-
self, format_tool_calls: bool = False, choice: int = 0
|
155
|
-
) -> "ChatCompletionMessageParam":
|
156
|
-
"""Converts the LanguageModelResponse to a Chat Completions
|
157
|
-
message object.
|
158
|
-
|
159
|
-
If the `format_tool_calls` parameter is True, the tool calls
|
160
|
-
will be cleanly formatted and added to the message content
|
161
|
-
with something similar to:
|
162
|
-
|
163
|
-
'I called the function `get_weather` with the following arguments:
|
164
|
-
{arguments}'
|
165
|
-
"""
|
166
|
-
if not self.completion or not self.completion.choices:
|
167
|
-
return {"role": "assistant", "content": ""}
|
168
|
-
|
169
|
-
if choice >= len(self.completion.choices):
|
170
|
-
return {"role": "assistant", "content": ""}
|
171
|
-
|
172
|
-
choice_message = self.completion.choices[choice].message
|
173
|
-
|
174
|
-
# Base message structure
|
175
|
-
message: "ChatCompletionMessageParam" = {
|
176
|
-
"role": "assistant",
|
177
|
-
"content": choice_message.content or "",
|
178
|
-
}
|
179
|
-
|
180
|
-
# Add tool calls if they exist and format_tool_calls is False
|
181
|
-
if choice_message.tool_calls and not format_tool_calls:
|
182
|
-
message["tool_calls"] = choice_message.tool_calls
|
183
|
-
|
184
|
-
# Format tool calls into content if format_tool_calls is True
|
185
|
-
elif choice_message.tool_calls and format_tool_calls:
|
186
|
-
content_parts = []
|
187
|
-
if choice_message.content:
|
188
|
-
content_parts.append(choice_message.content)
|
189
|
-
|
190
|
-
for tool_call in choice_message.tool_calls:
|
191
|
-
formatted_call = f"I called the function `{tool_call.function.name}` with the following arguments:\n{tool_call.function.arguments}"
|
192
|
-
content_parts.append(formatted_call)
|
193
|
-
|
194
|
-
message["content"] = "\n\n".join(content_parts)
|
195
|
-
|
196
|
-
return message
|
197
|
-
|
198
|
-
def to_content_part(self) -> "ChatCompletionContentPartParam":
|
199
|
-
"""Converts the LanguageModelResponse to a Chat Completions
|
200
|
-
content part object."""
|
201
|
-
return {"type": "text", "text": self.content}
|
202
|
-
|
203
|
-
@cached
|
204
|
-
def __str__(self) -> str:
|
205
|
-
"""Pretty prints the response object."""
|
206
|
-
output = ">>> LanguageModelResponse:"
|
207
|
-
|
208
|
-
if self.output or self.content:
|
209
|
-
output += f"\n{self.output if self.output else self.content}"
|
210
|
-
else:
|
211
|
-
output += f"\n{self.completion}"
|
212
|
-
|
213
|
-
output += f"\n\n>>> Model: {self.model}"
|
214
|
-
output += f"\n>>> Type: {get_type_description(type(self.output))}"
|
215
|
-
output += f"\n>>> Tool Calls: {len(self.tool_calls) if self.tool_calls else 0}"
|
216
|
-
|
217
|
-
return output
|
@@ -1,56 +0,0 @@
|
|
1
|
-
"""hammad.genai.language_models.language_model_response_chunk"""
|
2
|
-
|
3
|
-
from typing import TypeVar, Optional, Any, Literal
|
4
|
-
|
5
|
-
from ....types.base import BaseGenAIModelEvent
|
6
|
-
|
7
|
-
__all__ = [
|
8
|
-
"LanguageModelResponseChunk",
|
9
|
-
]
|
10
|
-
|
11
|
-
|
12
|
-
T = TypeVar("T")
|
13
|
-
|
14
|
-
|
15
|
-
class LanguageModelResponseChunk(BaseGenAIModelEvent[T]):
|
16
|
-
"""Represents a chunk of data from a language model response stream.
|
17
|
-
|
18
|
-
This class unifies chunks from both LiteLLM and Instructor streaming,
|
19
|
-
providing a consistent interface for processing streaming responses.
|
20
|
-
"""
|
21
|
-
|
22
|
-
type: Literal["language_model"] = "language_model"
|
23
|
-
"""The type of the event, always `language_model`."""
|
24
|
-
|
25
|
-
content: Optional[str] = None
|
26
|
-
"""The content delta for this chunk."""
|
27
|
-
|
28
|
-
output: Optional[T] = None
|
29
|
-
"""The structured output for this chunk (from instructor)."""
|
30
|
-
|
31
|
-
model: Optional[str] = None
|
32
|
-
"""The model that generated this chunk."""
|
33
|
-
|
34
|
-
finish_reason: Optional[str] = None
|
35
|
-
"""The reason the stream finished (if applicable)."""
|
36
|
-
|
37
|
-
chunk: Optional[Any] = None
|
38
|
-
"""The original chunk object from the provider."""
|
39
|
-
|
40
|
-
is_final: bool = False
|
41
|
-
"""Whether this is the final chunk in the stream."""
|
42
|
-
|
43
|
-
def __bool__(self) -> bool:
|
44
|
-
"""Check if this chunk has meaningful content."""
|
45
|
-
return bool(self.content or self.output or self.finish_reason)
|
46
|
-
|
47
|
-
def __str__(self) -> str:
|
48
|
-
"""String representation of the chunk."""
|
49
|
-
if self.output:
|
50
|
-
return f"LanguageModelResponseChunk(output={self.output})"
|
51
|
-
elif self.content:
|
52
|
-
return f"LanguageModelResponseChunk(content={repr(self.content)})"
|
53
|
-
elif self.finish_reason:
|
54
|
-
return f"LanguageModelResponseChunk(finish_reason={self.finish_reason})"
|
55
|
-
else:
|
56
|
-
return "LanguageModelResponseChunk(empty)"
|
@@ -1,89 +0,0 @@
|
|
1
|
-
"""hammad.genai.language_models.language_model_settings"""
|
2
|
-
|
3
|
-
from typing import (
|
4
|
-
Any,
|
5
|
-
Dict,
|
6
|
-
List,
|
7
|
-
Union,
|
8
|
-
Type,
|
9
|
-
TypeVar,
|
10
|
-
TYPE_CHECKING,
|
11
|
-
Callable,
|
12
|
-
Optional,
|
13
|
-
)
|
14
|
-
import sys
|
15
|
-
from pydantic import BaseModel, Field
|
16
|
-
|
17
|
-
if sys.version_info >= (3, 12):
|
18
|
-
from typing import TypedDict, Required, NotRequired
|
19
|
-
else:
|
20
|
-
from typing_extensions import TypedDict, Required, NotRequired
|
21
|
-
|
22
|
-
if TYPE_CHECKING:
|
23
|
-
pass
|
24
|
-
|
25
|
-
from .language_model_name import LanguageModelName
|
26
|
-
from .language_model_instructor_mode import LanguageModelInstructorMode
|
27
|
-
from ....types.base import BaseGenAIModelSettings
|
28
|
-
|
29
|
-
__all__ = [
|
30
|
-
"LanguageModelSettings",
|
31
|
-
"LanguageModelProviderSettings",
|
32
|
-
]
|
33
|
-
|
34
|
-
|
35
|
-
T = TypeVar("T")
|
36
|
-
|
37
|
-
|
38
|
-
class LanguageModelSettings(BaseGenAIModelSettings):
|
39
|
-
"""Complete settings for language model requests."""
|
40
|
-
|
41
|
-
# Structured output settings
|
42
|
-
type: Optional[Type[T]] = None
|
43
|
-
instructor_mode: Optional[LanguageModelInstructorMode] = None
|
44
|
-
response_field_name: Optional[str] = None
|
45
|
-
response_field_instruction: Optional[str] = None
|
46
|
-
max_retries: Optional[int] = None
|
47
|
-
strict: Optional[bool] = None
|
48
|
-
validation_context: Optional[Dict[str, Any]] = None
|
49
|
-
context: Optional[Dict[str, Any]] = None
|
50
|
-
|
51
|
-
# Tool settings
|
52
|
-
tools: Optional[List[Any]] = None
|
53
|
-
tool_choice: Optional[Union[str, Dict[str, Any]]] = None
|
54
|
-
parallel_tool_calls: Optional[bool] = None
|
55
|
-
functions: Optional[List[Any]] = None
|
56
|
-
function_call: Optional[str] = None
|
57
|
-
|
58
|
-
# Streaming settings
|
59
|
-
stream: Optional[bool] = None
|
60
|
-
stream_options: Optional[Dict[str, Any]] = None
|
61
|
-
|
62
|
-
# Hook settings
|
63
|
-
completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None
|
64
|
-
completion_response_hooks: Optional[List[Callable[..., None]]] = None
|
65
|
-
completion_error_hooks: Optional[List[Callable[..., None]]] = None
|
66
|
-
completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None
|
67
|
-
parse_error_hooks: Optional[List[Callable[..., None]]] = None
|
68
|
-
|
69
|
-
# Extended settings
|
70
|
-
timeout: Optional[Union[float, str]] = None
|
71
|
-
temperature: Optional[float] = None
|
72
|
-
top_p: Optional[float] = None
|
73
|
-
n: Optional[int] = None
|
74
|
-
stop: Optional[str] = None
|
75
|
-
max_completion_tokens: Optional[int] = None
|
76
|
-
max_tokens: Optional[int] = None
|
77
|
-
modalities: Optional[List[Any]] = None
|
78
|
-
prediction: Optional[Any] = None
|
79
|
-
audio: Optional[Any] = None
|
80
|
-
presence_penalty: Optional[float] = None
|
81
|
-
frequency_penalty: Optional[float] = None
|
82
|
-
logit_bias: Optional[Dict[str, float]] = None
|
83
|
-
user: Optional[str] = None
|
84
|
-
reasoning_effort: Optional[str] = None
|
85
|
-
seed: Optional[int] = None
|
86
|
-
logprobs: Optional[bool] = None
|
87
|
-
top_logprobs: Optional[int] = None
|
88
|
-
thinking: Optional[Dict[str, Any]] = None
|
89
|
-
web_search_options: Optional[Dict[str, Any]] = None
|