pydantic-ai-slim 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

pydantic_ai/_cli.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations as _annotations
2
2
 
3
3
  import argparse
4
4
  import asyncio
5
+ import importlib
5
6
  import sys
6
7
  from asyncio import CancelledError
7
8
  from collections.abc import Sequence
@@ -12,6 +13,9 @@ from typing import Any, cast
12
13
 
13
14
  from typing_inspection.introspection import get_literal_values
14
15
 
16
+ from pydantic_ai.result import OutputDataT
17
+ from pydantic_ai.tools import AgentDepsT
18
+
15
19
  from . import __version__
16
20
  from .agent import Agent
17
21
  from .exceptions import UserError
@@ -123,6 +127,11 @@ Special prompts:
123
127
  # e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
124
128
  qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n]
125
129
  arg.completer = argcomplete.ChoicesCompleter(qualified_model_names) # type: ignore[reportPrivateUsage]
130
+ parser.add_argument(
131
+ '-a',
132
+ '--agent',
133
+ help='Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"',
134
+ )
126
135
  parser.add_argument(
127
136
  '-l',
128
137
  '--list-models',
@@ -155,8 +164,22 @@ Special prompts:
155
164
  console.print(f' {model}', highlight=False)
156
165
  return 0
157
166
 
167
+ agent: Agent[None, str] = cli_agent
168
+ if args.agent:
169
+ try:
170
+ module_path, variable_name = args.agent.split(':')
171
+ module = importlib.import_module(module_path)
172
+ agent = getattr(module, variable_name)
173
+ if not isinstance(agent, Agent):
174
+ console.print(f'[red]Error: {args.agent} is not an Agent instance[/red]')
175
+ return 1
176
+ console.print(f'[green]Using custom agent:[/green] [magenta]{args.agent}[/magenta]', highlight=False)
177
+ except ValueError:
178
+ console.print('[red]Error: Agent must be specified in "module:variable" format[/red]')
179
+ return 1
180
+
158
181
  try:
159
- cli_agent.model = infer_model(args.model)
182
+ agent.model = infer_model(args.model)
160
183
  except UserError as e:
161
184
  console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]')
162
185
  return 1
@@ -171,7 +194,7 @@ Special prompts:
171
194
 
172
195
  if prompt := cast(str, args.prompt):
173
196
  try:
174
- asyncio.run(ask_agent(cli_agent, prompt, stream, console, code_theme))
197
+ asyncio.run(ask_agent(agent, prompt, stream, console, code_theme))
175
198
  except KeyboardInterrupt:
176
199
  pass
177
200
  return 0
@@ -179,13 +202,19 @@ Special prompts:
179
202
  # doing this instead of `PromptSession[Any](history=` allows mocking of PromptSession in tests
180
203
  session: PromptSession[Any] = PromptSession(history=FileHistory(str(PROMPT_HISTORY_PATH)))
181
204
  try:
182
- return asyncio.run(run_chat(session, stream, cli_agent, console, code_theme, prog_name))
205
+ return asyncio.run(run_chat(session, stream, agent, console, code_theme, prog_name))
183
206
  except KeyboardInterrupt: # pragma: no cover
184
207
  return 0
185
208
 
186
209
 
187
210
  async def run_chat(
188
- session: PromptSession[Any], stream: bool, agent: Agent, console: Console, code_theme: str, prog_name: str
211
+ session: PromptSession[Any],
212
+ stream: bool,
213
+ agent: Agent[AgentDepsT, OutputDataT],
214
+ console: Console,
215
+ code_theme: str,
216
+ prog_name: str,
217
+ deps: AgentDepsT = None,
189
218
  ) -> int:
190
219
  multiline = False
191
220
  messages: list[ModelMessage] = []
@@ -207,30 +236,31 @@ async def run_chat(
207
236
  return exit_value
208
237
  else:
209
238
  try:
210
- messages = await ask_agent(agent, text, stream, console, code_theme, messages)
239
+ messages = await ask_agent(agent, text, stream, console, code_theme, deps, messages)
211
240
  except CancelledError: # pragma: no cover
212
241
  console.print('[dim]Interrupted[/dim]')
213
242
 
214
243
 
215
244
  async def ask_agent(
216
- agent: Agent,
245
+ agent: Agent[AgentDepsT, OutputDataT],
217
246
  prompt: str,
218
247
  stream: bool,
219
248
  console: Console,
220
249
  code_theme: str,
250
+ deps: AgentDepsT = None,
221
251
  messages: list[ModelMessage] | None = None,
222
252
  ) -> list[ModelMessage]:
223
253
  status = Status('[dim]Working on it…[/dim]', console=console)
224
254
 
225
255
  if not stream:
226
256
  with status:
227
- result = await agent.run(prompt, message_history=messages)
228
- content = result.output
257
+ result = await agent.run(prompt, message_history=messages, deps=deps)
258
+ content = str(result.output)
229
259
  console.print(Markdown(content, code_theme=code_theme))
230
260
  return result.all_messages()
231
261
 
232
262
  with status, ExitStack() as stack:
233
- async with agent.iter(prompt, message_history=messages) as agent_run:
263
+ async with agent.iter(prompt, message_history=messages, deps=deps) as agent_run:
234
264
  live = Live('', refresh_per_second=15, console=console, vertical_overflow='ellipsis')
235
265
  async for node in agent_run:
236
266
  if Agent.is_model_request_node(node):
pydantic_ai/agent.py CHANGED
@@ -12,7 +12,7 @@ from typing import TYPE_CHECKING, Any, Callable, ClassVar, Generic, cast, final,
12
12
 
13
13
  from opentelemetry.trace import NoOpTracer, use_span
14
14
  from pydantic.json_schema import GenerateJsonSchema
15
- from typing_extensions import Literal, Never, TypeIs, TypeVar, deprecated
15
+ from typing_extensions import Literal, Never, Self, TypeIs, TypeVar, deprecated
16
16
 
17
17
  from pydantic_graph import End, Graph, GraphRun, GraphRunContext
18
18
  from pydantic_graph._utils import get_event_loop
@@ -1688,6 +1688,51 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1688
1688
  finally:
1689
1689
  await exit_stack.aclose()
1690
1690
 
1691
+ async def to_cli(self: Self, deps: AgentDepsT = None) -> None:
1692
+ """Run the agent in a CLI chat interface.
1693
+
1694
+ Example:
1695
+ ```python {title="agent_to_cli.py" test="skip"}
1696
+ from pydantic_ai import Agent
1697
+
1698
+ agent = Agent('openai:gpt-4o', instructions='You always respond in Italian.')
1699
+
1700
+ async def main():
1701
+ await agent.to_cli()
1702
+ ```
1703
+ """
1704
+ from prompt_toolkit import PromptSession
1705
+ from prompt_toolkit.history import FileHistory
1706
+ from rich.console import Console
1707
+
1708
+ from pydantic_ai._cli import PROMPT_HISTORY_PATH, run_chat
1709
+
1710
+ # TODO(Marcelo): We need to refactor the CLI code to be able to be able to just pass `agent`, `deps` and
1711
+ # `prog_name` from here.
1712
+
1713
+ session: PromptSession[Any] = PromptSession(history=FileHistory(str(PROMPT_HISTORY_PATH)))
1714
+ await run_chat(
1715
+ session=session,
1716
+ stream=True,
1717
+ agent=self,
1718
+ deps=deps,
1719
+ console=Console(),
1720
+ code_theme='monokai',
1721
+ prog_name='pydantic-ai',
1722
+ )
1723
+
1724
+ def to_cli_sync(self: Self, deps: AgentDepsT = None) -> None:
1725
+ """Run the agent in a CLI chat interface with the non-async interface.
1726
+
1727
+ ```python {title="agent_to_cli_sync.py" test="skip"}
1728
+ from pydantic_ai import Agent
1729
+
1730
+ agent = Agent('openai:gpt-4o', instructions='You always respond in Italian.')
1731
+ agent.to_cli_sync()
1732
+ ```
1733
+ """
1734
+ return get_event_loop().run_until_complete(self.to_cli(deps=deps))
1735
+
1691
1736
 
1692
1737
  @dataclasses.dataclass(repr=False)
1693
1738
  class AgentRun(Generic[AgentDepsT, OutputDataT]):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.2.1
3
+ Version: 0.2.2
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.2.1
32
+ Requires-Dist: pydantic-graph==0.2.2
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
45
45
  Provides-Extra: duckduckgo
46
46
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
47
47
  Provides-Extra: evals
48
- Requires-Dist: pydantic-evals==0.2.1; extra == 'evals'
48
+ Requires-Dist: pydantic-evals==0.2.2; extra == 'evals'
49
49
  Provides-Extra: groq
50
50
  Requires-Dist: groq>=0.15.0; extra == 'groq'
51
51
  Provides-Extra: logfire
@@ -1,14 +1,14 @@
1
1
  pydantic_ai/__init__.py,sha256=5flxyMQJVrHRMQ3MYaZf1el2ctNs0JmPClKbw2Q-Lsk,1160
2
2
  pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
3
3
  pydantic_ai/_agent_graph.py,sha256=28JXHfSU78tBWZJr3ZES6gWB5wZoevyk8rMlbHDHfFc,35145
4
- pydantic_ai/_cli.py,sha256=KKF_f8ccFYaGSQooVHfGPqdNaQVM8UVZCKCOm2MThzI,11183
4
+ pydantic_ai/_cli.py,sha256=tCUKc3wDOdH4uFb2XIoeKFIPZD7_MM1Mb-GNiEQKtoM,12266
5
5
  pydantic_ai/_griffe.py,sha256=Sf_DisE9k2TA0VFeVIK2nf1oOct5MygW86PBCACJkFA,5244
6
6
  pydantic_ai/_output.py,sha256=w_kBc5Lx5AmI0APbohxxYgpFd5VAwh6K0IjP7QIOu9U,11209
7
7
  pydantic_ai/_parts_manager.py,sha256=kG4xynxXHAr9uGFwCVqqhsGCac5a_UjFdRBucoTCXEE,12189
8
8
  pydantic_ai/_pydantic.py,sha256=1EO1tv-ULj3l_L1qMcC7gIOKTL2e2a-xTbUD_kqKiOg,8921
9
9
  pydantic_ai/_system_prompt.py,sha256=602c2jyle2R_SesOrITBDETZqsLk4BZ8Cbo8yEhmx04,1120
10
10
  pydantic_ai/_utils.py,sha256=Vlww1AMQMTvFfGRlFKAyvl4VrE24Lk1MH28EwVTWy8c,10122
11
- pydantic_ai/agent.py,sha256=_pHvpCj78HgQd-xpy5PON4HfuJgnr0lc7xPJVVZy5Sc,90370
11
+ pydantic_ai/agent.py,sha256=fzoQAvgUa6RmeQo4YFidkdiSt04e5ckp6Hxa_m5a0Rg,91934
12
12
  pydantic_ai/exceptions.py,sha256=1ujJeB3jDDQ-pH5ydBYrgStvR35-GlEW0bYGTGEr4ME,3127
13
13
  pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
14
14
  pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
@@ -47,7 +47,7 @@ pydantic_ai/providers/google_vertex.py,sha256=WAwPxKTARVzs8DFs2veEUOJSur0krDOo9-
47
47
  pydantic_ai/providers/groq.py,sha256=DoY6qkfhuemuKB5JXhUkqG-3t1HQkxwSXoE_kHQIAK0,2788
48
48
  pydantic_ai/providers/mistral.py,sha256=FAS7yKn26yWy7LTmEiBSvqe0HpTXi8_nIf824vE6RFQ,2892
49
49
  pydantic_ai/providers/openai.py,sha256=ePF-QWwLkGkSE5w245gTTDVR3VoTIUqFoIhQ0TAoUiA,2866
50
- pydantic_ai_slim-0.2.1.dist-info/METADATA,sha256=N_37NxRHRsZS8icisuIzdGbc5a6jd-E0w9ya4C8kM88,3680
51
- pydantic_ai_slim-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
52
- pydantic_ai_slim-0.2.1.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
53
- pydantic_ai_slim-0.2.1.dist-info/RECORD,,
50
+ pydantic_ai_slim-0.2.2.dist-info/METADATA,sha256=xG01EN4b9lpWtfNjclQQCv_sZzOLSVj9NgtKS3hIFQU,3680
51
+ pydantic_ai_slim-0.2.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
52
+ pydantic_ai_slim-0.2.2.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
53
+ pydantic_ai_slim-0.2.2.dist-info/RECORD,,