pydantic-ai-examples 0.3.4__tar.gz → 0.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-examples might be problematic. Click here for more details.

Files changed (41) hide show
  1. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/PKG-INFO +4 -3
  2. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/__init__.py +0 -0
  3. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/agent.py +47 -0
  4. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/app.py +36 -0
  5. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/functions.py +85 -0
  6. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/modal.py +66 -0
  7. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/models.py +46 -0
  8. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/slack.py +30 -0
  9. pydantic_ai_examples-0.3.6/pydantic_ai_examples/slack_lead_qualifier/store.py +31 -0
  10. pydantic_ai_examples-0.3.6/pydantic_ai_examples/weather_agent.py +105 -0
  11. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/weather_agent_gradio.py +1 -5
  12. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pyproject.toml +2 -1
  13. pydantic_ai_examples-0.3.4/pydantic_ai_examples/weather_agent.py +0 -158
  14. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/.gitignore +0 -0
  15. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/LICENSE +0 -0
  16. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/README.md +0 -0
  17. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/__main__.py +0 -0
  18. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/bank_support.py +0 -0
  19. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/chat_app.html +0 -0
  20. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/chat_app.py +0 -0
  21. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/chat_app.ts +0 -0
  22. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/__init__.py +0 -0
  23. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/agent.py +0 -0
  24. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/custom_evaluators.py +0 -0
  25. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/datasets/time_range_v1.yaml +0 -0
  26. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/datasets/time_range_v1_schema.json +0 -0
  27. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/datasets/time_range_v2.yaml +0 -0
  28. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/datasets/time_range_v2_schema.json +0 -0
  29. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/example_01_generate_dataset.py +0 -0
  30. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/example_02_add_custom_evaluators.py +0 -0
  31. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/example_03_unit_testing.py +0 -0
  32. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/example_04_compare_models.py +0 -0
  33. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/evals/models.py +0 -0
  34. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/flight_booking.py +0 -0
  35. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/pydantic_model.py +0 -0
  36. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/question_graph.py +0 -0
  37. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/rag.py +0 -0
  38. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/roulette_wheel.py +0 -0
  39. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/sql_gen.py +0 -0
  40. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/stream_markdown.py +0 -0
  41. {pydantic_ai_examples-0.3.4 → pydantic_ai_examples-0.3.6}/pydantic_ai_examples/stream_whales.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-examples
3
- Version: 0.3.4
3
+ Version: 0.3.6
4
4
  Summary: Examples of how to use PydanticAI and what it can do.
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -31,8 +31,9 @@ Requires-Dist: fastapi>=0.115.4
31
31
  Requires-Dist: gradio>=5.9.0; python_version > '3.9'
32
32
  Requires-Dist: logfire[asyncpg,fastapi,httpx,sqlite3]>=2.6
33
33
  Requires-Dist: mcp[cli]>=1.4.1; python_version >= '3.10'
34
- Requires-Dist: pydantic-ai-slim[anthropic,groq,openai,vertexai]==0.3.4
35
- Requires-Dist: pydantic-evals==0.3.4
34
+ Requires-Dist: modal>=1.0.4
35
+ Requires-Dist: pydantic-ai-slim[anthropic,groq,openai,vertexai]==0.3.6
36
+ Requires-Dist: pydantic-evals==0.3.6
36
37
  Requires-Dist: python-multipart>=0.0.17
37
38
  Requires-Dist: rich>=13.9.2
38
39
  Requires-Dist: uvicorn>=0.32.0
@@ -0,0 +1,47 @@
1
+ from textwrap import dedent
2
+ from types import NoneType
3
+
4
+ import logfire
5
+
6
+ ### [imports]
7
+ from pydantic_ai import Agent, NativeOutput
8
+ from pydantic_ai.common_tools.duckduckgo import duckduckgo_search_tool ### [/imports]
9
+
10
+ from .models import Analysis, Profile
11
+
12
+ ### [agent]
13
+ agent = Agent(
14
+ 'openai:gpt-4o',
15
+ instructions=dedent(
16
+ """
17
+ When a new person joins our public Slack, please put together a brief snapshot so we can be most useful to them.
18
+
19
+ **What to include**
20
+
21
+ 1. **Who they are:** Any details about their professional role or projects (e.g. LinkedIn, GitHub, company bio).
22
+ 2. **Where they work:** Name of the organisation and its domain.
23
+ 3. **How we can help:** On a scale of 1–5, estimate how likely they are to benefit from **Pydantic Logfire**
24
+ (our paid observability tool) based on factors such as company size, product maturity, or AI usage.
25
+ *1 = probably not relevant, 5 = very strong fit.*
26
+
27
+ **Our products (for context only)**
28
+ • **Pydantic Validation** – Python data-validation (open source)
29
+ • **Pydantic AI** – Python agent framework (open source)
30
+ • **Pydantic Logfire** – Observability for traces, logs & metrics with first-class AI support (commercial)
31
+
32
+ **How to research**
33
+
34
+ • Use the provided DuckDuckGo search tool to research the person and the organization they work for, based on the email domain or what you find on e.g. LinkedIn and GitHub.
35
+ • If you can't find enough to form a reasonable view, return **None**.
36
+ """
37
+ ),
38
+ tools=[duckduckgo_search_tool()],
39
+ output_type=NativeOutput([Analysis, NoneType]),
40
+ ) ### [/agent]
41
+
42
+
43
+ ### [analyze_profile]
44
+ @logfire.instrument('Analyze profile')
45
+ async def analyze_profile(profile: Profile) -> Analysis | None:
46
+ result = await agent.run(profile.as_prompt())
47
+ return result.output ### [/analyze_profile]
@@ -0,0 +1,36 @@
1
+ from typing import Any
2
+
3
+ import logfire
4
+ from fastapi import FastAPI, HTTPException, status
5
+ from logfire.propagate import get_context
6
+
7
+ from .models import Profile
8
+
9
+
10
+ ### [process_slack_member]
11
+ def process_slack_member(profile: Profile):
12
+ from .modal import process_slack_member as _process_slack_member
13
+
14
+ _process_slack_member.spawn(
15
+ profile.model_dump(), logfire_ctx=get_context()
16
+ ) ### [/process_slack_member]
17
+
18
+
19
+ ### [app]
20
+ app = FastAPI()
21
+ logfire.instrument_fastapi(app, capture_headers=True)
22
+
23
+
24
+ @app.post('/')
25
+ async def process_webhook(payload: dict[str, Any]) -> dict[str, Any]:
26
+ if payload['type'] == 'url_verification':
27
+ return {'challenge': payload['challenge']}
28
+ elif (
29
+ payload['type'] == 'event_callback' and payload['event']['type'] == 'team_join'
30
+ ):
31
+ profile = Profile.model_validate(payload['event']['user']['profile'])
32
+
33
+ process_slack_member(profile)
34
+ return {'status': 'OK'}
35
+
36
+ raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY) ### [/app]
@@ -0,0 +1,85 @@
1
+ import logfire
2
+
3
+ ### [imports]
4
+ from .agent import analyze_profile
5
+ from .models import Profile
6
+
7
+ ### [imports-daily_summary]
8
+ from .slack import send_slack_message
9
+ from .store import AnalysisStore ### [/imports,/imports-daily_summary]
10
+
11
+ ### [constant-new_lead_channel]
12
+ NEW_LEAD_CHANNEL = '#new-slack-leads'
13
+ ### [/constant-new_lead_channel]
14
+ ### [constant-daily_summary_channel]
15
+ DAILY_SUMMARY_CHANNEL = '#daily-slack-leads-summary'
16
+ ### [/constant-daily_summary_channel]
17
+
18
+
19
+ ### [process_slack_member]
20
+ @logfire.instrument('Process Slack member')
21
+ async def process_slack_member(profile: Profile):
22
+ analysis = await analyze_profile(profile)
23
+ logfire.info('Analysis', analysis=analysis)
24
+
25
+ if analysis is None:
26
+ return
27
+
28
+ await AnalysisStore().add(analysis)
29
+
30
+ await send_slack_message(
31
+ NEW_LEAD_CHANNEL,
32
+ [
33
+ {
34
+ 'type': 'header',
35
+ 'text': {
36
+ 'type': 'plain_text',
37
+ 'text': f'New Slack member with score {analysis.relevance}/5',
38
+ },
39
+ },
40
+ {
41
+ 'type': 'divider',
42
+ },
43
+ *analysis.as_slack_blocks(),
44
+ ],
45
+ ) ### [/process_slack_member]
46
+
47
+
48
+ ### [send_daily_summary]
49
+ @logfire.instrument('Send daily summary')
50
+ async def send_daily_summary():
51
+ analyses = await AnalysisStore().list()
52
+ logfire.info('Analyses', analyses=analyses)
53
+
54
+ if len(analyses) == 0:
55
+ return
56
+
57
+ sorted_analyses = sorted(analyses, key=lambda x: x.relevance, reverse=True)
58
+ top_analyses = sorted_analyses[:5]
59
+
60
+ blocks = [
61
+ {
62
+ 'type': 'header',
63
+ 'text': {
64
+ 'type': 'plain_text',
65
+ 'text': f'Top {len(top_analyses)} new Slack members from the last 24 hours',
66
+ },
67
+ },
68
+ ]
69
+
70
+ for analysis in top_analyses:
71
+ blocks.extend(
72
+ [
73
+ {
74
+ 'type': 'divider',
75
+ },
76
+ *analysis.as_slack_blocks(include_relevance=True),
77
+ ]
78
+ )
79
+
80
+ await send_slack_message(
81
+ DAILY_SUMMARY_CHANNEL,
82
+ blocks,
83
+ )
84
+
85
+ await AnalysisStore().clear() ### [/send_daily_summary]
@@ -0,0 +1,66 @@
1
+ from typing import Any
2
+
3
+ ### [setup_modal]
4
+ import modal
5
+
6
+ image = modal.Image.debian_slim(python_version='3.13').pip_install(
7
+ 'pydantic',
8
+ 'pydantic_ai_slim[openai,duckduckgo]',
9
+ 'logfire[httpx,fastapi]',
10
+ 'fastapi[standard]',
11
+ 'httpx',
12
+ )
13
+ app = modal.App(
14
+ name='slack-lead-qualifier',
15
+ image=image,
16
+ secrets=[
17
+ modal.Secret.from_name('logfire'),
18
+ modal.Secret.from_name('openai'),
19
+ modal.Secret.from_name('slack'),
20
+ ],
21
+ ) ### [/setup_modal]
22
+
23
+
24
+ ### [setup_logfire]
25
+ def setup_logfire():
26
+ import logfire
27
+
28
+ logfire.configure(service_name=app.name)
29
+ logfire.instrument_pydantic_ai()
30
+ logfire.instrument_httpx(capture_all=True) ### [/setup_logfire]
31
+
32
+
33
+ ### [web_app]
34
+ @app.function(min_containers=1)
35
+ @modal.asgi_app() # type: ignore
36
+ def web_app():
37
+ setup_logfire()
38
+
39
+ from .app import app as _app
40
+
41
+ return _app ### [/web_app]
42
+
43
+
44
+ ### [process_slack_member]
45
+ @app.function()
46
+ async def process_slack_member(profile_raw: dict[str, Any], logfire_ctx: Any):
47
+ setup_logfire()
48
+
49
+ from logfire.propagate import attach_context
50
+
51
+ from .functions import process_slack_member as _process_slack_member
52
+ from .models import Profile
53
+
54
+ with attach_context(logfire_ctx):
55
+ profile = Profile.model_validate(profile_raw)
56
+ await _process_slack_member(profile) ### [/process_slack_member]
57
+
58
+
59
+ ### [send_daily_summary]
60
+ @app.function(schedule=modal.Cron('0 8 * * *')) # Every day at 8am UTC
61
+ async def send_daily_summary():
62
+ setup_logfire()
63
+
64
+ from .functions import send_daily_summary as _send_daily_summary
65
+
66
+ await _send_daily_summary() ### [/send_daily_summary]
@@ -0,0 +1,46 @@
1
+ from typing import Annotated, Any
2
+
3
+ from annotated_types import Ge, Le
4
+ from pydantic import BaseModel
5
+
6
+ ### [import-format_as_xml]
7
+ from pydantic_ai import format_as_xml ### [/import-format_as_xml]
8
+
9
+
10
+ ### [profile,profile-intro]
11
+ class Profile(BaseModel): ### [/profile-intro]
12
+ first_name: str | None = None
13
+ last_name: str | None = None
14
+ display_name: str | None = None
15
+ email: str ### [/profile]
16
+
17
+ ### [profile-as_prompt]
18
+ def as_prompt(self) -> str:
19
+ return format_as_xml(self, root_tag='profile') ### [/profile-as_prompt]
20
+
21
+
22
+ ### [analysis,analysis-intro]
23
+ class Analysis(BaseModel): ### [/analysis-intro]
24
+ profile: Profile
25
+ organization_name: str
26
+ organization_domain: str
27
+ job_title: str
28
+ relevance: Annotated[int, Ge(1), Le(5)]
29
+ """Estimated fit for Pydantic Logfire: 1 = low, 5 = high"""
30
+ summary: str
31
+ """One-sentence welcome note summarising who they are and how we might help""" ### [/analysis]
32
+
33
+ ### [analysis-as_slack_blocks]
34
+ def as_slack_blocks(self, include_relevance: bool = False) -> list[dict[str, Any]]:
35
+ profile = self.profile
36
+ relevance = f'({self.relevance}/5)' if include_relevance else ''
37
+ return [
38
+ {
39
+ 'type': 'markdown',
40
+ 'text': f'[{profile.display_name}](mailto:{profile.email}), {self.job_title} at [**{self.organization_name}**](https://{self.organization_domain}) {relevance}',
41
+ },
42
+ {
43
+ 'type': 'markdown',
44
+ 'text': self.summary,
45
+ },
46
+ ] ### [/analysis-as_slack_blocks]
@@ -0,0 +1,30 @@
1
+ import os
2
+ from typing import Any
3
+
4
+ import httpx
5
+ import logfire
6
+
7
+ ### [send_slack_message]
8
+ API_KEY = os.getenv('SLACK_API_KEY')
9
+ assert API_KEY, 'SLACK_API_KEY is not set'
10
+
11
+
12
+ @logfire.instrument('Send Slack message')
13
+ async def send_slack_message(channel: str, blocks: list[dict[str, Any]]):
14
+ client = httpx.AsyncClient()
15
+ response = await client.post(
16
+ 'https://slack.com/api/chat.postMessage',
17
+ json={
18
+ 'channel': channel,
19
+ 'blocks': blocks,
20
+ },
21
+ headers={
22
+ 'Authorization': f'Bearer {API_KEY}',
23
+ },
24
+ timeout=5,
25
+ )
26
+ response.raise_for_status()
27
+ result = response.json()
28
+ if not result.get('ok', False):
29
+ error = result.get('error', 'Unknown error')
30
+ raise Exception(f'Failed to send to Slack: {error}') ### [/send_slack_message]
@@ -0,0 +1,31 @@
1
+ import logfire
2
+
3
+ ### [import-modal]
4
+ import modal ### [/import-modal]
5
+
6
+ from .models import Analysis
7
+
8
+
9
+ ### [analysis_store]
10
+ class AnalysisStore:
11
+ @classmethod
12
+ @logfire.instrument('Add analysis to store')
13
+ async def add(cls, analysis: Analysis):
14
+ await cls._get_store().put.aio(analysis.profile.email, analysis.model_dump())
15
+
16
+ @classmethod
17
+ @logfire.instrument('List analyses from store')
18
+ async def list(cls) -> list[Analysis]:
19
+ return [
20
+ Analysis.model_validate(analysis)
21
+ async for analysis in cls._get_store().values.aio()
22
+ ]
23
+
24
+ @classmethod
25
+ @logfire.instrument('Clear analyses from store')
26
+ async def clear(cls):
27
+ await cls._get_store().clear.aio()
28
+
29
+ @classmethod
30
+ def _get_store(cls) -> modal.Dict:
31
+ return modal.Dict.from_name('analyses', create_if_missing=True) # type: ignore ### [/analysis_store]
@@ -0,0 +1,105 @@
1
+ """Example of PydanticAI with multiple tools which the LLM needs to call in turn to answer a question.
2
+
3
+ In this case the idea is a "weather" agent — the user can ask for the weather in multiple cities,
4
+ the agent will use the `get_lat_lng` tool to get the latitude and longitude of the locations, then use
5
+ the `get_weather` tool to get the weather.
6
+
7
+ Run with:
8
+
9
+ uv run -m pydantic_ai_examples.weather_agent
10
+ """
11
+
12
+ from __future__ import annotations as _annotations
13
+
14
+ import asyncio
15
+ from dataclasses import dataclass
16
+ from typing import Any
17
+
18
+ import logfire
19
+ from httpx import AsyncClient
20
+ from pydantic import BaseModel
21
+
22
+ from pydantic_ai import Agent, RunContext
23
+
24
+ # 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured
25
+ logfire.configure(send_to_logfire='if-token-present')
26
+ logfire.instrument_pydantic_ai()
27
+
28
+
29
+ @dataclass
30
+ class Deps:
31
+ client: AsyncClient
32
+
33
+
34
+ weather_agent = Agent(
35
+ 'openai:gpt-4.1-mini',
36
+ # 'Be concise, reply with one sentence.' is enough for some models (like openai) to use
37
+ # the below tools appropriately, but others like anthropic and gemini require a bit more direction.
38
+ instructions='Be concise, reply with one sentence.',
39
+ deps_type=Deps,
40
+ retries=2,
41
+ )
42
+
43
+
44
+ class LatLng(BaseModel):
45
+ lat: float
46
+ lng: float
47
+
48
+
49
+ @weather_agent.tool
50
+ async def get_lat_lng(ctx: RunContext[Deps], location_description: str) -> LatLng:
51
+ """Get the latitude and longitude of a location.
52
+
53
+ Args:
54
+ ctx: The context.
55
+ location_description: A description of a location.
56
+ """
57
+ # NOTE: the response here will be random, and is not related to the location description.
58
+ r = await ctx.deps.client.get(
59
+ 'https://demo-endpoints.pydantic.workers.dev/latlng',
60
+ params={'location': location_description},
61
+ )
62
+ r.raise_for_status()
63
+ return LatLng.model_validate_json(r.content)
64
+
65
+
66
+ @weather_agent.tool
67
+ async def get_weather(ctx: RunContext[Deps], lat: float, lng: float) -> dict[str, Any]:
68
+ """Get the weather at a location.
69
+
70
+ Args:
71
+ ctx: The context.
72
+ lat: Latitude of the location.
73
+ lng: Longitude of the location.
74
+ """
75
+ # NOTE: the responses here will be random, and are not related to the lat and lng.
76
+ temp_response, descr_response = await asyncio.gather(
77
+ ctx.deps.client.get(
78
+ 'https://demo-endpoints.pydantic.workers.dev/number',
79
+ params={'min': 10, 'max': 30},
80
+ ),
81
+ ctx.deps.client.get(
82
+ 'https://demo-endpoints.pydantic.workers.dev/weather',
83
+ params={'lat': lat, 'lng': lng},
84
+ ),
85
+ )
86
+ temp_response.raise_for_status()
87
+ descr_response.raise_for_status()
88
+ return {
89
+ 'temperature': f'{temp_response.text} °C',
90
+ 'description': descr_response.text,
91
+ }
92
+
93
+
94
+ async def main():
95
+ async with AsyncClient() as client:
96
+ logfire.instrument_httpx(client, capture_all=True)
97
+ deps = Deps(client=client)
98
+ result = await weather_agent.run(
99
+ 'What is the weather like in London and in Wiltshire?', deps=deps
100
+ )
101
+ print('Response:', result.output)
102
+
103
+
104
+ if __name__ == '__main__':
105
+ asyncio.run(main())
@@ -1,7 +1,6 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import json
4
- import os
5
4
 
6
5
  from httpx import AsyncClient
7
6
 
@@ -18,10 +17,7 @@ except ImportError as e:
18
17
  TOOL_TO_DISPLAY_NAME = {'get_lat_lng': 'Geocoding API', 'get_weather': 'Weather API'}
19
18
 
20
19
  client = AsyncClient()
21
- weather_api_key = os.getenv('WEATHER_API_KEY')
22
- # create a free API key at https://geocode.maps.co/
23
- geo_api_key = os.getenv('GEO_API_KEY')
24
- deps = Deps(client=client, weather_api_key=weather_api_key, geo_api_key=geo_api_key)
20
+ deps = Deps(client=client)
25
21
 
26
22
 
27
23
  async def stream_from_agent(prompt: str, chatbot: list[dict], past_messages: list):
@@ -52,7 +52,8 @@ dependencies = [
52
52
  "uvicorn>=0.32.0",
53
53
  "devtools>=0.12.2",
54
54
  "gradio>=5.9.0; python_version>'3.9'",
55
- "mcp[cli]>=1.4.1; python_version >= '3.10'"
55
+ "mcp[cli]>=1.4.1; python_version >= '3.10'",
56
+ "modal>=1.0.4",
56
57
  ]
57
58
 
58
59
  [tool.hatch.build.targets.wheel]
@@ -1,158 +0,0 @@
1
- """Example of PydanticAI with multiple tools which the LLM needs to call in turn to answer a question.
2
-
3
- In this case the idea is a "weather" agent — the user can ask for the weather in multiple cities,
4
- the agent will use the `get_lat_lng` tool to get the latitude and longitude of the locations, then use
5
- the `get_weather` tool to get the weather.
6
-
7
- Run with:
8
-
9
- uv run -m pydantic_ai_examples.weather_agent
10
- """
11
-
12
- from __future__ import annotations as _annotations
13
-
14
- import asyncio
15
- import os
16
- import urllib.parse
17
- from dataclasses import dataclass
18
- from typing import Any
19
-
20
- import logfire
21
- from devtools import debug
22
- from httpx import AsyncClient
23
-
24
- from pydantic_ai import Agent, ModelRetry, RunContext
25
-
26
- # 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured
27
- logfire.configure(send_to_logfire='if-token-present')
28
- logfire.instrument_pydantic_ai()
29
-
30
-
31
- @dataclass
32
- class Deps:
33
- client: AsyncClient
34
- weather_api_key: str | None
35
- geo_api_key: str | None
36
-
37
-
38
- weather_agent = Agent(
39
- 'openai:gpt-4o',
40
- # 'Be concise, reply with one sentence.' is enough for some models (like openai) to use
41
- # the below tools appropriately, but others like anthropic and gemini require a bit more direction.
42
- instructions=(
43
- 'Be concise, reply with one sentence.'
44
- 'Use the `get_lat_lng` tool to get the latitude and longitude of the locations, '
45
- 'then use the `get_weather` tool to get the weather.'
46
- ),
47
- deps_type=Deps,
48
- retries=2,
49
- )
50
-
51
-
52
- @weather_agent.tool
53
- async def get_lat_lng(
54
- ctx: RunContext[Deps], location_description: str
55
- ) -> dict[str, float]:
56
- """Get the latitude and longitude of a location.
57
-
58
- Args:
59
- ctx: The context.
60
- location_description: A description of a location.
61
- """
62
- if ctx.deps.geo_api_key is None:
63
- # if no API key is provided, return a dummy response (London)
64
- return {'lat': 51.1, 'lng': -0.1}
65
-
66
- params = {'access_token': ctx.deps.geo_api_key}
67
- loc = urllib.parse.quote(location_description)
68
- r = await ctx.deps.client.get(
69
- f'https://api.mapbox.com/geocoding/v5/mapbox.places/{loc}.json', params=params
70
- )
71
- r.raise_for_status()
72
- data = r.json()
73
-
74
- if features := data['features']:
75
- lat, lng = features[0]['center']
76
- return {'lat': lat, 'lng': lng}
77
- else:
78
- raise ModelRetry('Could not find the location')
79
-
80
-
81
- @weather_agent.tool
82
- async def get_weather(ctx: RunContext[Deps], lat: float, lng: float) -> dict[str, Any]:
83
- """Get the weather at a location.
84
-
85
- Args:
86
- ctx: The context.
87
- lat: Latitude of the location.
88
- lng: Longitude of the location.
89
- """
90
- if ctx.deps.weather_api_key is None:
91
- # if no API key is provided, return a dummy response
92
- return {'temperature': '21 °C', 'description': 'Sunny'}
93
-
94
- params = {
95
- 'apikey': ctx.deps.weather_api_key,
96
- 'location': f'{lat},{lng}',
97
- 'units': 'metric',
98
- }
99
- with logfire.span('calling weather API', params=params) as span:
100
- r = await ctx.deps.client.get(
101
- 'https://api.tomorrow.io/v4/weather/realtime', params=params
102
- )
103
- r.raise_for_status()
104
- data = r.json()
105
- span.set_attribute('response', data)
106
-
107
- values = data['data']['values']
108
- # https://docs.tomorrow.io/reference/data-layers-weather-codes
109
- code_lookup = {
110
- 1000: 'Clear, Sunny',
111
- 1100: 'Mostly Clear',
112
- 1101: 'Partly Cloudy',
113
- 1102: 'Mostly Cloudy',
114
- 1001: 'Cloudy',
115
- 2000: 'Fog',
116
- 2100: 'Light Fog',
117
- 4000: 'Drizzle',
118
- 4001: 'Rain',
119
- 4200: 'Light Rain',
120
- 4201: 'Heavy Rain',
121
- 5000: 'Snow',
122
- 5001: 'Flurries',
123
- 5100: 'Light Snow',
124
- 5101: 'Heavy Snow',
125
- 6000: 'Freezing Drizzle',
126
- 6001: 'Freezing Rain',
127
- 6200: 'Light Freezing Rain',
128
- 6201: 'Heavy Freezing Rain',
129
- 7000: 'Ice Pellets',
130
- 7101: 'Heavy Ice Pellets',
131
- 7102: 'Light Ice Pellets',
132
- 8000: 'Thunderstorm',
133
- }
134
- return {
135
- 'temperature': f'{values["temperatureApparent"]:0.0f}°C',
136
- 'description': code_lookup.get(values['weatherCode'], 'Unknown'),
137
- }
138
-
139
-
140
- async def main():
141
- async with AsyncClient() as client:
142
- logfire.instrument_httpx(client, capture_all=True)
143
- # create a free API key at https://www.tomorrow.io/weather-api/
144
- weather_api_key = os.getenv('WEATHER_API_KEY')
145
- # create a free API key at https://www.mapbox.com/
146
- geo_api_key = os.getenv('GEO_API_KEY')
147
- deps = Deps(
148
- client=client, weather_api_key=weather_api_key, geo_api_key=geo_api_key
149
- )
150
- result = await weather_agent.run(
151
- 'What is the weather like in London and in Wiltshire?', deps=deps
152
- )
153
- debug(result)
154
- print('Response:', result.output)
155
-
156
-
157
- if __name__ == '__main__':
158
- asyncio.run(main())