pydantic-ai-slim 1.7.0__py3-none-any.whl → 1.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydantic_ai/__init__.py +2 -0
- pydantic_ai/_agent_graph.py +3 -0
- pydantic_ai/_cli.py +2 -2
- pydantic_ai/ag_ui.py +50 -696
- pydantic_ai/agent/abstract.py +17 -6
- pydantic_ai/direct.py +16 -4
- pydantic_ai/durable_exec/dbos/_agent.py +3 -0
- pydantic_ai/durable_exec/prefect/_agent.py +3 -0
- pydantic_ai/durable_exec/temporal/_agent.py +3 -0
- pydantic_ai/messages.py +39 -7
- pydantic_ai/models/__init__.py +42 -1
- pydantic_ai/models/groq.py +9 -1
- pydantic_ai/models/openai.py +2 -3
- pydantic_ai/result.py +19 -7
- pydantic_ai/ui/__init__.py +16 -0
- pydantic_ai/ui/_adapter.py +386 -0
- pydantic_ai/ui/_event_stream.py +591 -0
- pydantic_ai/ui/_messages_builder.py +28 -0
- pydantic_ai/ui/ag_ui/__init__.py +9 -0
- pydantic_ai/ui/ag_ui/_adapter.py +187 -0
- pydantic_ai/ui/ag_ui/_event_stream.py +227 -0
- pydantic_ai/ui/ag_ui/app.py +141 -0
- pydantic_ai/ui/vercel_ai/__init__.py +16 -0
- pydantic_ai/ui/vercel_ai/_adapter.py +199 -0
- pydantic_ai/ui/vercel_ai/_event_stream.py +187 -0
- pydantic_ai/ui/vercel_ai/_utils.py +16 -0
- pydantic_ai/ui/vercel_ai/request_types.py +275 -0
- pydantic_ai/ui/vercel_ai/response_types.py +230 -0
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/METADATA +5 -3
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/RECORD +33 -19
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,386 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from collections.abc import AsyncIterator, Sequence
|
|
5
|
+
from dataclasses import KW_ONLY, Field, dataclass, replace
|
|
6
|
+
from functools import cached_property
|
|
7
|
+
from http import HTTPStatus
|
|
8
|
+
from typing import (
|
|
9
|
+
TYPE_CHECKING,
|
|
10
|
+
Any,
|
|
11
|
+
ClassVar,
|
|
12
|
+
Generic,
|
|
13
|
+
Protocol,
|
|
14
|
+
TypeVar,
|
|
15
|
+
runtime_checkable,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel, ValidationError
|
|
19
|
+
|
|
20
|
+
from pydantic_ai import DeferredToolRequests, DeferredToolResults
|
|
21
|
+
from pydantic_ai.agent import AbstractAgent
|
|
22
|
+
from pydantic_ai.builtin_tools import AbstractBuiltinTool
|
|
23
|
+
from pydantic_ai.exceptions import UserError
|
|
24
|
+
from pydantic_ai.messages import ModelMessage
|
|
25
|
+
from pydantic_ai.models import KnownModelName, Model
|
|
26
|
+
from pydantic_ai.output import OutputDataT, OutputSpec
|
|
27
|
+
from pydantic_ai.settings import ModelSettings
|
|
28
|
+
from pydantic_ai.tools import AgentDepsT
|
|
29
|
+
from pydantic_ai.toolsets import AbstractToolset
|
|
30
|
+
from pydantic_ai.usage import RunUsage, UsageLimits
|
|
31
|
+
|
|
32
|
+
from ._event_stream import NativeEvent, OnCompleteFunc, UIEventStream
|
|
33
|
+
|
|
34
|
+
if TYPE_CHECKING:
|
|
35
|
+
from starlette.requests import Request
|
|
36
|
+
from starlette.responses import Response, StreamingResponse
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
__all__ = [
|
|
40
|
+
'UIAdapter',
|
|
41
|
+
'StateHandler',
|
|
42
|
+
'StateDeps',
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
RunInputT = TypeVar('RunInputT')
|
|
47
|
+
"""Type variable for protocol-specific run input types."""
|
|
48
|
+
|
|
49
|
+
MessageT = TypeVar('MessageT')
|
|
50
|
+
"""Type variable for protocol-specific message types."""
|
|
51
|
+
|
|
52
|
+
EventT = TypeVar('EventT')
|
|
53
|
+
"""Type variable for protocol-specific event types."""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
StateT = TypeVar('StateT', bound=BaseModel)
|
|
57
|
+
"""Type variable for the state type, which must be a subclass of `BaseModel`."""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@runtime_checkable
|
|
61
|
+
class StateHandler(Protocol):
|
|
62
|
+
"""Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field."""
|
|
63
|
+
|
|
64
|
+
# Has to be a dataclass so we can use `replace` to update the state.
|
|
65
|
+
# From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352
|
|
66
|
+
__dataclass_fields__: ClassVar[dict[str, Field[Any]]]
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def state(self) -> Any:
|
|
70
|
+
"""Get the current state of the agent run."""
|
|
71
|
+
...
|
|
72
|
+
|
|
73
|
+
@state.setter
|
|
74
|
+
def state(self, state: Any) -> None:
|
|
75
|
+
"""Set the state of the agent run.
|
|
76
|
+
|
|
77
|
+
This method is called to update the state of the agent run with the
|
|
78
|
+
provided state.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
state: The run state.
|
|
82
|
+
"""
|
|
83
|
+
...
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class StateDeps(Generic[StateT]):
|
|
88
|
+
"""Dependency type that holds state.
|
|
89
|
+
|
|
90
|
+
This class is used to manage the state of an agent run. It allows setting
|
|
91
|
+
the state of the agent run with a specific type of state model, which must
|
|
92
|
+
be a subclass of `BaseModel`.
|
|
93
|
+
|
|
94
|
+
The state is set using the `state` setter by the `Adapter` when the run starts.
|
|
95
|
+
|
|
96
|
+
Implements the `StateHandler` protocol.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
state: StateT
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@dataclass
|
|
103
|
+
class UIAdapter(ABC, Generic[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]):
|
|
104
|
+
"""Base class for UI adapters.
|
|
105
|
+
|
|
106
|
+
This class is responsible for transforming agent run input received from the frontend into arguments for [`Agent.run_stream_events()`][pydantic_ai.Agent.run_stream_events], running the agent, and then transforming Pydantic AI events into protocol-specific events.
|
|
107
|
+
|
|
108
|
+
The event stream transformation is handled by a protocol-specific [`UIEventStream`][pydantic_ai.ui.UIEventStream] subclass.
|
|
109
|
+
"""
|
|
110
|
+
|
|
111
|
+
agent: AbstractAgent[AgentDepsT, OutputDataT]
|
|
112
|
+
"""The Pydantic AI agent to run."""
|
|
113
|
+
|
|
114
|
+
run_input: RunInputT
|
|
115
|
+
"""The protocol-specific run input object."""
|
|
116
|
+
|
|
117
|
+
_: KW_ONLY
|
|
118
|
+
|
|
119
|
+
accept: str | None = None
|
|
120
|
+
"""The `Accept` header value of the request, used to determine how to encode the protocol-specific events for the streaming response."""
|
|
121
|
+
|
|
122
|
+
@classmethod
|
|
123
|
+
async def from_request(
|
|
124
|
+
cls, request: Request, *, agent: AbstractAgent[AgentDepsT, OutputDataT]
|
|
125
|
+
) -> UIAdapter[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]:
|
|
126
|
+
"""Create an adapter from a request."""
|
|
127
|
+
return cls(
|
|
128
|
+
agent=agent,
|
|
129
|
+
run_input=cls.build_run_input(await request.body()),
|
|
130
|
+
accept=request.headers.get('accept'),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
@classmethod
|
|
134
|
+
@abstractmethod
|
|
135
|
+
def build_run_input(cls, body: bytes) -> RunInputT:
|
|
136
|
+
"""Build a protocol-specific run input object from the request body."""
|
|
137
|
+
raise NotImplementedError
|
|
138
|
+
|
|
139
|
+
@classmethod
|
|
140
|
+
@abstractmethod
|
|
141
|
+
def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]:
|
|
142
|
+
"""Transform protocol-specific messages into Pydantic AI messages."""
|
|
143
|
+
raise NotImplementedError
|
|
144
|
+
|
|
145
|
+
@abstractmethod
|
|
146
|
+
def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, OutputDataT]:
|
|
147
|
+
"""Build a protocol-specific event stream transformer."""
|
|
148
|
+
raise NotImplementedError
|
|
149
|
+
|
|
150
|
+
@cached_property
|
|
151
|
+
@abstractmethod
|
|
152
|
+
def messages(self) -> list[ModelMessage]:
|
|
153
|
+
"""Pydantic AI messages from the protocol-specific run input."""
|
|
154
|
+
raise NotImplementedError
|
|
155
|
+
|
|
156
|
+
@cached_property
|
|
157
|
+
def toolset(self) -> AbstractToolset[AgentDepsT] | None:
|
|
158
|
+
"""Toolset representing frontend tools from the protocol-specific run input."""
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
@cached_property
|
|
162
|
+
def state(self) -> dict[str, Any] | None:
|
|
163
|
+
"""Frontend state from the protocol-specific run input."""
|
|
164
|
+
return None
|
|
165
|
+
|
|
166
|
+
def transform_stream(
|
|
167
|
+
self,
|
|
168
|
+
stream: AsyncIterator[NativeEvent],
|
|
169
|
+
on_complete: OnCompleteFunc[EventT] | None = None,
|
|
170
|
+
) -> AsyncIterator[EventT]:
|
|
171
|
+
"""Transform a stream of Pydantic AI events into protocol-specific events.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
stream: The stream of Pydantic AI events to transform.
|
|
175
|
+
on_complete: Optional callback function called when the agent run completes successfully.
|
|
176
|
+
The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events.
|
|
177
|
+
"""
|
|
178
|
+
return self.build_event_stream().transform_stream(stream, on_complete=on_complete)
|
|
179
|
+
|
|
180
|
+
def encode_stream(self, stream: AsyncIterator[EventT]) -> AsyncIterator[str]:
|
|
181
|
+
"""Encode a stream of protocol-specific events as strings according to the `Accept` header value.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
stream: The stream of protocol-specific events to encode.
|
|
185
|
+
"""
|
|
186
|
+
return self.build_event_stream().encode_stream(stream)
|
|
187
|
+
|
|
188
|
+
def streaming_response(self, stream: AsyncIterator[EventT]) -> StreamingResponse:
|
|
189
|
+
"""Generate a streaming response from a stream of protocol-specific events.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
stream: The stream of protocol-specific events to encode.
|
|
193
|
+
"""
|
|
194
|
+
return self.build_event_stream().streaming_response(stream)
|
|
195
|
+
|
|
196
|
+
def run_stream_native(
|
|
197
|
+
self,
|
|
198
|
+
*,
|
|
199
|
+
output_type: OutputSpec[Any] | None = None,
|
|
200
|
+
message_history: Sequence[ModelMessage] | None = None,
|
|
201
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
202
|
+
model: Model | KnownModelName | str | None = None,
|
|
203
|
+
deps: AgentDepsT = None,
|
|
204
|
+
model_settings: ModelSettings | None = None,
|
|
205
|
+
usage_limits: UsageLimits | None = None,
|
|
206
|
+
usage: RunUsage | None = None,
|
|
207
|
+
infer_name: bool = True,
|
|
208
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
209
|
+
builtin_tools: Sequence[AbstractBuiltinTool] | None = None,
|
|
210
|
+
) -> AsyncIterator[NativeEvent]:
|
|
211
|
+
"""Run the agent with the protocol-specific run input and stream Pydantic AI events.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no
|
|
215
|
+
output validators since output validators would expect an argument that matches the agent's output type.
|
|
216
|
+
message_history: History of the conversation so far.
|
|
217
|
+
deferred_tool_results: Optional results for deferred tool calls in the message history.
|
|
218
|
+
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
219
|
+
deps: Optional dependencies to use for this run.
|
|
220
|
+
model_settings: Optional settings to use for this model's request.
|
|
221
|
+
usage_limits: Optional limits on model request count or token usage.
|
|
222
|
+
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
223
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
224
|
+
toolsets: Optional additional toolsets for this run.
|
|
225
|
+
builtin_tools: Optional additional builtin tools to use for this run.
|
|
226
|
+
"""
|
|
227
|
+
message_history = [*(message_history or []), *self.messages]
|
|
228
|
+
|
|
229
|
+
toolset = self.toolset
|
|
230
|
+
if toolset:
|
|
231
|
+
output_type = [output_type or self.agent.output_type, DeferredToolRequests]
|
|
232
|
+
toolsets = [*(toolsets or []), toolset]
|
|
233
|
+
|
|
234
|
+
if isinstance(deps, StateHandler):
|
|
235
|
+
raw_state = self.state or {}
|
|
236
|
+
if isinstance(deps.state, BaseModel):
|
|
237
|
+
state = type(deps.state).model_validate(raw_state)
|
|
238
|
+
else:
|
|
239
|
+
state = raw_state
|
|
240
|
+
|
|
241
|
+
deps = replace(deps, state=state)
|
|
242
|
+
elif self.state:
|
|
243
|
+
raise UserError(
|
|
244
|
+
f'State is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.'
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
return self.agent.run_stream_events(
|
|
248
|
+
output_type=output_type,
|
|
249
|
+
message_history=message_history,
|
|
250
|
+
deferred_tool_results=deferred_tool_results,
|
|
251
|
+
model=model,
|
|
252
|
+
deps=deps,
|
|
253
|
+
model_settings=model_settings,
|
|
254
|
+
usage_limits=usage_limits,
|
|
255
|
+
usage=usage,
|
|
256
|
+
infer_name=infer_name,
|
|
257
|
+
toolsets=toolsets,
|
|
258
|
+
builtin_tools=builtin_tools,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
def run_stream(
|
|
262
|
+
self,
|
|
263
|
+
*,
|
|
264
|
+
output_type: OutputSpec[Any] | None = None,
|
|
265
|
+
message_history: Sequence[ModelMessage] | None = None,
|
|
266
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
267
|
+
model: Model | KnownModelName | str | None = None,
|
|
268
|
+
deps: AgentDepsT = None,
|
|
269
|
+
model_settings: ModelSettings | None = None,
|
|
270
|
+
usage_limits: UsageLimits | None = None,
|
|
271
|
+
usage: RunUsage | None = None,
|
|
272
|
+
infer_name: bool = True,
|
|
273
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
274
|
+
builtin_tools: Sequence[AbstractBuiltinTool] | None = None,
|
|
275
|
+
on_complete: OnCompleteFunc[EventT] | None = None,
|
|
276
|
+
) -> AsyncIterator[EventT]:
|
|
277
|
+
"""Run the agent with the protocol-specific run input and stream protocol-specific events.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no
|
|
281
|
+
output validators since output validators would expect an argument that matches the agent's output type.
|
|
282
|
+
message_history: History of the conversation so far.
|
|
283
|
+
deferred_tool_results: Optional results for deferred tool calls in the message history.
|
|
284
|
+
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
285
|
+
deps: Optional dependencies to use for this run.
|
|
286
|
+
model_settings: Optional settings to use for this model's request.
|
|
287
|
+
usage_limits: Optional limits on model request count or token usage.
|
|
288
|
+
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
289
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
290
|
+
toolsets: Optional additional toolsets for this run.
|
|
291
|
+
builtin_tools: Optional additional builtin tools to use for this run.
|
|
292
|
+
on_complete: Optional callback function called when the agent run completes successfully.
|
|
293
|
+
The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events.
|
|
294
|
+
"""
|
|
295
|
+
return self.transform_stream(
|
|
296
|
+
self.run_stream_native(
|
|
297
|
+
output_type=output_type,
|
|
298
|
+
message_history=message_history,
|
|
299
|
+
deferred_tool_results=deferred_tool_results,
|
|
300
|
+
model=model,
|
|
301
|
+
deps=deps,
|
|
302
|
+
model_settings=model_settings,
|
|
303
|
+
usage_limits=usage_limits,
|
|
304
|
+
usage=usage,
|
|
305
|
+
infer_name=infer_name,
|
|
306
|
+
toolsets=toolsets,
|
|
307
|
+
builtin_tools=builtin_tools,
|
|
308
|
+
),
|
|
309
|
+
on_complete=on_complete,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
@classmethod
|
|
313
|
+
async def dispatch_request(
|
|
314
|
+
cls,
|
|
315
|
+
request: Request,
|
|
316
|
+
*,
|
|
317
|
+
agent: AbstractAgent[AgentDepsT, OutputDataT],
|
|
318
|
+
message_history: Sequence[ModelMessage] | None = None,
|
|
319
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
320
|
+
model: Model | KnownModelName | str | None = None,
|
|
321
|
+
deps: AgentDepsT = None,
|
|
322
|
+
output_type: OutputSpec[Any] | None = None,
|
|
323
|
+
model_settings: ModelSettings | None = None,
|
|
324
|
+
usage_limits: UsageLimits | None = None,
|
|
325
|
+
usage: RunUsage | None = None,
|
|
326
|
+
infer_name: bool = True,
|
|
327
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
328
|
+
builtin_tools: Sequence[AbstractBuiltinTool] | None = None,
|
|
329
|
+
on_complete: OnCompleteFunc[EventT] | None = None,
|
|
330
|
+
) -> Response:
|
|
331
|
+
"""Handle a protocol-specific HTTP request by running the agent and returning a streaming response of protocol-specific events.
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
request: The incoming Starlette/FastAPI request.
|
|
335
|
+
agent: The agent to run.
|
|
336
|
+
output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no
|
|
337
|
+
output validators since output validators would expect an argument that matches the agent's output type.
|
|
338
|
+
message_history: History of the conversation so far.
|
|
339
|
+
deferred_tool_results: Optional results for deferred tool calls in the message history.
|
|
340
|
+
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
341
|
+
deps: Optional dependencies to use for this run.
|
|
342
|
+
model_settings: Optional settings to use for this model's request.
|
|
343
|
+
usage_limits: Optional limits on model request count or token usage.
|
|
344
|
+
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
345
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
346
|
+
toolsets: Optional additional toolsets for this run.
|
|
347
|
+
builtin_tools: Optional additional builtin tools to use for this run.
|
|
348
|
+
on_complete: Optional callback function called when the agent run completes successfully.
|
|
349
|
+
The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events.
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
A streaming Starlette response with protocol-specific events encoded per the request's `Accept` header value.
|
|
353
|
+
"""
|
|
354
|
+
try:
|
|
355
|
+
from starlette.responses import Response
|
|
356
|
+
except ImportError as e: # pragma: no cover
|
|
357
|
+
raise ImportError(
|
|
358
|
+
'Please install the `starlette` package to use `dispatch_request()` method, '
|
|
359
|
+
'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`'
|
|
360
|
+
) from e
|
|
361
|
+
|
|
362
|
+
try:
|
|
363
|
+
adapter = await cls.from_request(request, agent=agent)
|
|
364
|
+
except ValidationError as e: # pragma: no cover
|
|
365
|
+
return Response(
|
|
366
|
+
content=e.json(),
|
|
367
|
+
media_type='application/json',
|
|
368
|
+
status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
return adapter.streaming_response(
|
|
372
|
+
adapter.run_stream(
|
|
373
|
+
message_history=message_history,
|
|
374
|
+
deferred_tool_results=deferred_tool_results,
|
|
375
|
+
deps=deps,
|
|
376
|
+
output_type=output_type,
|
|
377
|
+
model=model,
|
|
378
|
+
model_settings=model_settings,
|
|
379
|
+
usage_limits=usage_limits,
|
|
380
|
+
usage=usage,
|
|
381
|
+
infer_name=infer_name,
|
|
382
|
+
toolsets=toolsets,
|
|
383
|
+
builtin_tools=builtin_tools,
|
|
384
|
+
on_complete=on_complete,
|
|
385
|
+
),
|
|
386
|
+
)
|