pydantic-ai-examples 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,41 @@
1
+ """Example usage of the AG-UI adapter for Pydantic AI.
2
+
3
+ This provides a FastAPI application that demonstrates how to use the
4
+ Pydantic AI agent with the AG-UI protocol. It includes examples for
5
+ each of the AG-UI dojo features:
6
+ - Agentic Chat
7
+ - Human in the Loop
8
+ - Agentic Generative UI
9
+ - Tool Based Generative UI
10
+ - Shared State
11
+ - Predictive State Updates
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from fastapi import FastAPI
17
+
18
+ from .api import (
19
+ agentic_chat_app,
20
+ agentic_generative_ui_app,
21
+ human_in_the_loop_app,
22
+ predictive_state_updates_app,
23
+ shared_state_app,
24
+ tool_based_generative_ui_app,
25
+ )
26
+
27
+ app = FastAPI(title='Pydantic AI AG-UI server')
28
+ app.mount('/agentic_chat', agentic_chat_app, 'Agentic Chat')
29
+ app.mount('/agentic_generative_ui', agentic_generative_ui_app, 'Agentic Generative UI')
30
+ app.mount('/human_in_the_loop', human_in_the_loop_app, 'Human in the Loop')
31
+ app.mount(
32
+ '/predictive_state_updates',
33
+ predictive_state_updates_app,
34
+ 'Predictive State Updates',
35
+ )
36
+ app.mount('/shared_state', shared_state_app, 'Shared State')
37
+ app.mount(
38
+ '/tool_based_generative_ui',
39
+ tool_based_generative_ui_app,
40
+ 'Tool Based Generative UI',
41
+ )
@@ -0,0 +1,9 @@
1
+ """Very simply CLI to run the AG-UI example.
2
+
3
+ See https://ai.pydantic.dev/examples/ag-ui/ for more information.
4
+ """
5
+
6
+ if __name__ == '__main__':
7
+ import uvicorn
8
+
9
+ uvicorn.run('pydantic_ai_examples.ag_ui:app', port=9000)
@@ -0,0 +1,19 @@
1
+ """Example API for a AG-UI compatible Pydantic AI Agent UI."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from .agentic_chat import app as agentic_chat_app
6
+ from .agentic_generative_ui import app as agentic_generative_ui_app
7
+ from .human_in_the_loop import app as human_in_the_loop_app
8
+ from .predictive_state_updates import app as predictive_state_updates_app
9
+ from .shared_state import app as shared_state_app
10
+ from .tool_based_generative_ui import app as tool_based_generative_ui_app
11
+
12
+ __all__ = [
13
+ 'agentic_chat_app',
14
+ 'agentic_generative_ui_app',
15
+ 'human_in_the_loop_app',
16
+ 'predictive_state_updates_app',
17
+ 'shared_state_app',
18
+ 'tool_based_generative_ui_app',
19
+ ]
@@ -0,0 +1,25 @@
1
+ """Agentic Chat feature."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime
6
+ from zoneinfo import ZoneInfo
7
+
8
+ from pydantic_ai import Agent
9
+
10
+ agent = Agent('openai:gpt-4o-mini')
11
+ app = agent.to_ag_ui()
12
+
13
+
14
+ @agent.tool_plain
15
+ async def current_time(timezone: str = 'UTC') -> str:
16
+ """Get the current time in ISO format.
17
+
18
+ Args:
19
+ timezone: The timezone to use.
20
+
21
+ Returns:
22
+ The current time in ISO format string.
23
+ """
24
+ tz: ZoneInfo = ZoneInfo(timezone)
25
+ return datetime.now(tz=tz).isoformat()
@@ -0,0 +1,119 @@
1
+ """Agentic Generative UI feature."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from textwrap import dedent
6
+ from typing import Any, Literal
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+ from ag_ui.core import EventType, StateDeltaEvent, StateSnapshotEvent
11
+ from pydantic_ai import Agent
12
+
13
+ StepStatus = Literal['pending', 'completed']
14
+
15
+
16
+ class Step(BaseModel):
17
+ """Represents a step in a plan."""
18
+
19
+ description: str = Field(description='The description of the step')
20
+ status: StepStatus = Field(
21
+ default='pending',
22
+ description='The status of the step (e.g., pending, completed)',
23
+ )
24
+
25
+
26
+ class Plan(BaseModel):
27
+ """Represents a plan with multiple steps."""
28
+
29
+ steps: list[Step] = Field(default_factory=list, description='The steps in the plan')
30
+
31
+
32
+ class JSONPatchOp(BaseModel):
33
+ """A class representing a JSON Patch operation (RFC 6902)."""
34
+
35
+ op: Literal['add', 'remove', 'replace', 'move', 'copy', 'test'] = Field(
36
+ description='The operation to perform: add, remove, replace, move, copy, or test',
37
+ )
38
+ path: str = Field(description='JSON Pointer (RFC 6901) to the target location')
39
+ value: Any = Field(
40
+ default=None,
41
+ description='The value to apply (for add, replace operations)',
42
+ )
43
+ from_: str | None = Field(
44
+ default=None,
45
+ alias='from',
46
+ description='Source path (for move, copy operations)',
47
+ )
48
+
49
+
50
+ agent = Agent(
51
+ 'openai:gpt-4o-mini',
52
+ instructions=dedent(
53
+ """
54
+ When planning use tools only, without any other messages.
55
+ IMPORTANT:
56
+ - Use the `create_plan` tool to set the initial state of the steps
57
+ - Use the `update_plan_step` tool to update the status of each step
58
+ - Do NOT repeat the plan or summarise it in a message
59
+ - Do NOT confirm the creation or updates in a message
60
+ - Do NOT ask the user for additional information or next steps
61
+
62
+ Only one plan can be active at a time, so do not call the `create_plan` tool
63
+ again until all the steps in current plan are completed.
64
+ """
65
+ ),
66
+ )
67
+
68
+
69
+ @agent.tool_plain
70
+ def create_plan(steps: list[str]) -> StateSnapshotEvent:
71
+ """Create a plan with multiple steps.
72
+
73
+ Args:
74
+ steps: List of step descriptions to create the plan.
75
+
76
+ Returns:
77
+ StateSnapshotEvent containing the initial state of the steps.
78
+ """
79
+ plan: Plan = Plan(
80
+ steps=[Step(description=step) for step in steps],
81
+ )
82
+ return StateSnapshotEvent(
83
+ type=EventType.STATE_SNAPSHOT,
84
+ snapshot=plan.model_dump(),
85
+ )
86
+
87
+
88
+ @agent.tool_plain
89
+ def update_plan_step(
90
+ index: int, description: str | None = None, status: StepStatus | None = None
91
+ ) -> StateDeltaEvent:
92
+ """Update the plan with new steps or changes.
93
+
94
+ Args:
95
+ index: The index of the step to update.
96
+ description: The new description for the step.
97
+ status: The new status for the step.
98
+
99
+ Returns:
100
+ StateDeltaEvent containing the changes made to the plan.
101
+ """
102
+ changes: list[JSONPatchOp] = []
103
+ if description is not None:
104
+ changes.append(
105
+ JSONPatchOp(
106
+ op='replace', path=f'/steps/{index}/description', value=description
107
+ )
108
+ )
109
+ if status is not None:
110
+ changes.append(
111
+ JSONPatchOp(op='replace', path=f'/steps/{index}/status', value=status)
112
+ )
113
+ return StateDeltaEvent(
114
+ type=EventType.STATE_DELTA,
115
+ delta=changes,
116
+ )
117
+
118
+
119
+ app = agent.to_ag_ui()
@@ -0,0 +1,26 @@
1
+ """Human in the Loop Feature.
2
+
3
+ No special handling is required for this feature.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from textwrap import dedent
9
+
10
+ from pydantic_ai import Agent
11
+
12
+ agent = Agent(
13
+ 'openai:gpt-4o-mini',
14
+ instructions=dedent(
15
+ """
16
+ When planning tasks use tools only, without any other messages.
17
+ IMPORTANT:
18
+ - Use the `generate_task_steps` tool to display the suggested steps to the user
19
+ - Never repeat the plan, or send a message detailing steps
20
+ - If accepted, confirm the creation of the plan and the number of selected (enabled) steps only
21
+ - If not accepted, ask the user for more information, DO NOT use the `generate_task_steps` tool again
22
+ """
23
+ ),
24
+ )
25
+
26
+ app = agent.to_ag_ui()
@@ -0,0 +1,77 @@
1
+ """Predictive State feature."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from textwrap import dedent
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from ag_ui.core import CustomEvent, EventType
10
+ from pydantic_ai import Agent, RunContext
11
+ from pydantic_ai.ag_ui import StateDeps
12
+
13
+
14
+ class DocumentState(BaseModel):
15
+ """State for the document being written."""
16
+
17
+ document: str = ''
18
+
19
+
20
+ agent = Agent('openai:gpt-4o-mini', deps_type=StateDeps[DocumentState])
21
+
22
+
23
+ # Tools which return AG-UI events will be sent to the client as part of the
24
+ # event stream, single events and iterables of events are supported.
25
+ @agent.tool_plain
26
+ def document_predict_state() -> list[CustomEvent]:
27
+ """Enable document state prediction.
28
+
29
+ Returns:
30
+ CustomEvent containing the event to enable state prediction.
31
+ """
32
+ return [
33
+ CustomEvent(
34
+ type=EventType.CUSTOM,
35
+ name='PredictState',
36
+ value=[
37
+ {
38
+ 'state_key': 'document',
39
+ 'tool': 'write_document',
40
+ 'tool_argument': 'document',
41
+ },
42
+ ],
43
+ ),
44
+ ]
45
+
46
+
47
+ @agent.instructions()
48
+ def story_instructions(ctx: RunContext[StateDeps[DocumentState]]) -> str:
49
+ """Provide instructions for writing document if present.
50
+
51
+ Args:
52
+ ctx: The run context containing document state information.
53
+
54
+ Returns:
55
+ Instructions string for the document writing agent.
56
+ """
57
+ return dedent(
58
+ f"""You are a helpful assistant for writing documents.
59
+
60
+ Before you start writing, you MUST call the `document_predict_state`
61
+ tool to enable state prediction.
62
+
63
+ To present the document to the user for review, you MUST use the
64
+ `write_document` tool.
65
+
66
+ When you have written the document, DO NOT repeat it as a message.
67
+ If accepted briefly summarize the changes you made, 2 sentences
68
+ max, otherwise ask the user to clarify what they want to change.
69
+
70
+ This is the current document:
71
+
72
+ {ctx.deps.state.document}
73
+ """
74
+ )
75
+
76
+
77
+ app = agent.to_ag_ui(deps=StateDeps(DocumentState()))
@@ -0,0 +1,137 @@
1
+ """Shared State feature."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from enum import StrEnum
6
+ from textwrap import dedent
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+ from ag_ui.core import EventType, StateSnapshotEvent
11
+ from pydantic_ai import Agent, RunContext
12
+ from pydantic_ai.ag_ui import StateDeps
13
+
14
+
15
+ class SkillLevel(StrEnum):
16
+ """The level of skill required for the recipe."""
17
+
18
+ BEGINNER = 'Beginner'
19
+ INTERMEDIATE = 'Intermediate'
20
+ ADVANCED = 'Advanced'
21
+
22
+
23
+ class SpecialPreferences(StrEnum):
24
+ """Special preferences for the recipe."""
25
+
26
+ HIGH_PROTEIN = 'High Protein'
27
+ LOW_CARB = 'Low Carb'
28
+ SPICY = 'Spicy'
29
+ BUDGET_FRIENDLY = 'Budget-Friendly'
30
+ ONE_POT_MEAL = 'One-Pot Meal'
31
+ VEGETARIAN = 'Vegetarian'
32
+ VEGAN = 'Vegan'
33
+
34
+
35
+ class CookingTime(StrEnum):
36
+ """The cooking time of the recipe."""
37
+
38
+ FIVE_MIN = '5 min'
39
+ FIFTEEN_MIN = '15 min'
40
+ THIRTY_MIN = '30 min'
41
+ FORTY_FIVE_MIN = '45 min'
42
+ SIXTY_PLUS_MIN = '60+ min'
43
+
44
+
45
+ class Ingredient(BaseModel):
46
+ """A class representing an ingredient in a recipe."""
47
+
48
+ icon: str = Field(
49
+ default='ingredient',
50
+ description="The icon emoji (not emoji code like '\x1f35e', but the actual emoji like 🥕) of the ingredient",
51
+ )
52
+ name: str
53
+ amount: str
54
+
55
+
56
+ class Recipe(BaseModel):
57
+ """A class representing a recipe."""
58
+
59
+ skill_level: SkillLevel = Field(
60
+ default=SkillLevel.BEGINNER,
61
+ description='The skill level required for the recipe',
62
+ )
63
+ special_preferences: list[SpecialPreferences] = Field(
64
+ default_factory=list,
65
+ description='Any special preferences for the recipe',
66
+ )
67
+ cooking_time: CookingTime = Field(
68
+ default=CookingTime.FIVE_MIN, description='The cooking time of the recipe'
69
+ )
70
+ ingredients: list[Ingredient] = Field(
71
+ default_factory=list,
72
+ description='Ingredients for the recipe',
73
+ )
74
+ instructions: list[str] = Field(
75
+ default_factory=list, description='Instructions for the recipe'
76
+ )
77
+
78
+
79
+ class RecipeSnapshot(BaseModel):
80
+ """A class representing the state of the recipe."""
81
+
82
+ recipe: Recipe = Field(
83
+ default_factory=Recipe, description='The current state of the recipe'
84
+ )
85
+
86
+
87
+ agent = Agent('openai:gpt-4o-mini', deps_type=StateDeps[RecipeSnapshot])
88
+
89
+
90
+ @agent.tool_plain
91
+ def display_recipe(recipe: Recipe) -> StateSnapshotEvent:
92
+ """Display the recipe to the user.
93
+
94
+ Args:
95
+ recipe: The recipe to display.
96
+
97
+ Returns:
98
+ StateSnapshotEvent containing the recipe snapshot.
99
+ """
100
+ return StateSnapshotEvent(
101
+ type=EventType.STATE_SNAPSHOT,
102
+ snapshot={'recipe': recipe},
103
+ )
104
+
105
+
106
+ @agent.instructions
107
+ def recipe_instructions(ctx: RunContext[StateDeps[RecipeSnapshot]]) -> str:
108
+ """Instructions for the recipe generation agent.
109
+
110
+ Args:
111
+ ctx: The run context containing recipe state information.
112
+
113
+ Returns:
114
+ Instructions string for the recipe generation agent.
115
+ """
116
+ return dedent(
117
+ f"""
118
+ You are a helpful assistant for creating recipes.
119
+
120
+ IMPORTANT:
121
+ - Create a complete recipe using the existing ingredients
122
+ - Append new ingredients to the existing ones
123
+ - Use the `display_recipe` tool to present the recipe to the user
124
+ - Do NOT repeat the recipe in the message, use the tool instead
125
+
126
+ Once you have created the updated recipe and displayed it to the user,
127
+ summarise the changes in one sentence, don't describe the recipe in
128
+ detail or send it as a message to the user.
129
+
130
+ The current state of the recipe is:
131
+
132
+ {ctx.deps.state.recipe.model_dump_json(indent=2)}
133
+ """,
134
+ )
135
+
136
+
137
+ app = agent.to_ag_ui(deps=StateDeps(RecipeSnapshot()))
@@ -0,0 +1,11 @@
1
+ """Tool Based Generative UI feature.
2
+
3
+ No special handling is required for this feature.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from pydantic_ai import Agent
9
+
10
+ agent = Agent('openai:gpt-4o-mini')
11
+ app = agent.to_ag_ui()
@@ -1,4 +1,4 @@
1
- """Small but complete example of using PydanticAI to build a support agent for a bank.
1
+ """Small but complete example of using Pydantic AI to build a support agent for a bank.
2
2
 
3
3
  Run with:
4
4
 
File without changes
@@ -1,4 +1,4 @@
1
- """Simple example of using PydanticAI to construct a Pydantic model from a text input.
1
+ """Simple example of using Pydantic AI to construct a Pydantic model from a text input.
2
2
 
3
3
  Run with:
4
4
 
@@ -1,4 +1,4 @@
1
- """Example demonstrating how to use PydanticAI to create a simple roulette game.
1
+ """Example demonstrating how to use Pydantic AI to create a simple roulette game.
2
2
 
3
3
  Run with:
4
4
  uv run -m pydantic_ai_examples.roulette_wheel
@@ -1,4 +1,4 @@
1
- """Example demonstrating how to use PydanticAI to generate SQL queries based on user input.
1
+ """Example demonstrating how to use Pydantic AI to generate SQL queries based on user input.
2
2
 
3
3
  Run postgres with:
4
4
 
@@ -26,7 +26,7 @@ agent = Agent()
26
26
 
27
27
  # models to try, and the appropriate env var
28
28
  models: list[tuple[KnownModelName, str]] = [
29
- ('google-gla:gemini-1.5-flash', 'GEMINI_API_KEY'),
29
+ ('google-gla:gemini-2.0-flash', 'GEMINI_API_KEY'),
30
30
  ('openai:gpt-4o-mini', 'OPENAI_API_KEY'),
31
31
  ('groq:llama-3.3-70b-versatile', 'GROQ_API_KEY'),
32
32
  ]
@@ -1,4 +1,4 @@
1
- """Example of PydanticAI with multiple tools which the LLM needs to call in turn to answer a question.
1
+ """Example of Pydantic AI with multiple tools which the LLM needs to call in turn to answer a question.
2
2
 
3
3
  In this case the idea is a "weather" agent — the user can ask for the weather in multiple cities,
4
4
  the agent will use the `get_lat_lng` tool to get the latitude and longitude of the locations, then use
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-examples
3
- Version: 0.4.3
4
- Summary: Examples of how to use PydanticAI and what it can do.
5
- Author-email: Samuel Colvin <samuel@pydantic.dev>
3
+ Version: 0.4.4
4
+ Summary: Examples of how to use Pydantic AI and what it can do.
5
+ Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
6
6
  License-Expression: MIT
7
7
  License-File: LICENSE
8
8
  Classifier: Development Status :: 4 - Beta
@@ -32,14 +32,14 @@ Requires-Dist: gradio>=5.9.0; python_version > '3.9'
32
32
  Requires-Dist: logfire[asyncpg,fastapi,httpx,sqlite3]>=2.6
33
33
  Requires-Dist: mcp[cli]>=1.4.1; python_version >= '3.10'
34
34
  Requires-Dist: modal>=1.0.4
35
- Requires-Dist: pydantic-ai-slim[anthropic,groq,openai,vertexai]==0.4.3
36
- Requires-Dist: pydantic-evals==0.4.3
35
+ Requires-Dist: pydantic-ai-slim[ag-ui,anthropic,groq,openai,vertexai]==0.4.4
36
+ Requires-Dist: pydantic-evals==0.4.4
37
37
  Requires-Dist: python-multipart>=0.0.17
38
38
  Requires-Dist: rich>=13.9.2
39
39
  Requires-Dist: uvicorn>=0.32.0
40
40
  Description-Content-Type: text/markdown
41
41
 
42
- # PydanticAI Examples
42
+ # Pydantic AI Examples
43
43
 
44
44
  [![CI](https://github.com/pydantic/pydantic-ai/actions/workflows/ci.yml/badge.svg?event=push)](https://github.com/pydantic/pydantic-ai/actions/workflows/ci.yml?query=branch%3Amain)
45
45
  [![Coverage](https://coverage-badge.samuelcolvin.workers.dev/pydantic/pydantic-ai.svg)](https://coverage-badge.samuelcolvin.workers.dev/redirect/pydantic/pydantic-ai)
@@ -47,6 +47,6 @@ Description-Content-Type: text/markdown
47
47
  [![versions](https://img.shields.io/pypi/pyversions/pydantic-ai.svg)](https://github.com/pydantic/pydantic-ai)
48
48
  [![license](https://img.shields.io/github/license/pydantic/pydantic-ai.svg?v)](https://github.com/pydantic/pydantic-ai/blob/main/LICENSE)
49
49
 
50
- Examples of how to use PydanticAI and what it can do.
50
+ Examples of how to use Pydantic AI and what it can do.
51
51
 
52
52
  For full documentation of these examples and how to run them, see [ai.pydantic.dev/examples/](https://ai.pydantic.dev/examples/).
@@ -1,18 +1,28 @@
1
1
  pydantic_ai_examples/__main__.py,sha256=i0LEo2JBOZ-gnHED0ou5Bya43gi7KmOyQ_jKN7M5Ces,1647
2
- pydantic_ai_examples/bank_support.py,sha256=PZmc5oI395-030OaGtPiEmaF6hGtcVj21-yD8OAnrdw,2725
2
+ pydantic_ai_examples/bank_support.py,sha256=vJL2zLEq19OztP1fUGG7_6cYHllvxvzkafFMqukimMo,2726
3
3
  pydantic_ai_examples/chat_app.html,sha256=90XhxrpDAT09mPVTn9edEn8PqAD-tHxWkeeMz9r_okQ,2580
4
4
  pydantic_ai_examples/chat_app.py,sha256=zrurOhwbjPDTYi9FkYZF3wMxc2AT9PFckM-xngQ5188,7105
5
5
  pydantic_ai_examples/chat_app.ts,sha256=2KfZ2rJU2o0iCPjelyqEi5sH6vfemzWaa5Evx_VcAE4,3307
6
6
  pydantic_ai_examples/flight_booking.py,sha256=hwz1ct0laIZQCb0cuhFYyrR-PBDUL2zc4CS39ytOwn0,7454
7
- pydantic_ai_examples/pydantic_model.py,sha256=veskNKY-PaPGYkWkTfjLGKfF_fvGh09fJ2xBTuJQcRQ,774
7
+ pydantic_ai_examples/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ pydantic_ai_examples/pydantic_model.py,sha256=EQsHBig2bvb0PG_2XDgP9Le5xJ4n8eZJzQDGQYhDykg,775
8
9
  pydantic_ai_examples/question_graph.py,sha256=POIBYGNDRyqAg08ReglBPcAZAcqHJMdcpCA2VOwiWNw,5107
9
10
  pydantic_ai_examples/rag.py,sha256=mFEnJqofVbx_1jru2FZWgskfyDP965IFfanwSlbQzMs,8005
10
- pydantic_ai_examples/roulette_wheel.py,sha256=WUPklPKsnmJy-NoPY20mC-AI0820T-YMAYvAljKyiOc,1653
11
- pydantic_ai_examples/sql_gen.py,sha256=x-vRRDe93DRW83RWgirXkQdthMFwSGcHm0VIoBgvdaE,5200
12
- pydantic_ai_examples/stream_markdown.py,sha256=4YcUIXI29gwV5rgUb-u7uaf6el7FxfsY9Rcoo1t2_EA,2447
11
+ pydantic_ai_examples/roulette_wheel.py,sha256=2YHKbGzYOkLsd98hO3ntjM6pChR1UpmsRrLD36Qh5f0,1654
12
+ pydantic_ai_examples/sql_gen.py,sha256=HLNR6-Ah_7T0IodE62Tk88-MP3_w_LOybTocyaIV3dU,5201
13
+ pydantic_ai_examples/stream_markdown.py,sha256=-ucIev91GExnTuEokT54jq-feXXp2CUP7B_3lvt2l9I,2447
13
14
  pydantic_ai_examples/stream_whales.py,sha256=KC1oth1rMpFtS1-Tbc9EHMtt-BynahH_SGAEJD5sfIE,2714
14
- pydantic_ai_examples/weather_agent.py,sha256=MA1SDkHuUyh2hC-lZdWUSsBVVLALSajgKduIb5Wer1Q,3186
15
+ pydantic_ai_examples/weather_agent.py,sha256=E42RbuVDJzxlBw9lF2ARNSNAhL1HWVEmTt5MN70DyDU,3187
15
16
  pydantic_ai_examples/weather_agent_gradio.py,sha256=WVoRqD74jEvGyUs5VHmsRIGduLMu2sP7GHvc3E79T6A,4521
17
+ pydantic_ai_examples/ag_ui/__init__.py,sha256=ZZs2V-5e9RaLl_7hJAq9-0Juk_f0mk2Vr7a4QT2QB-k,1174
18
+ pydantic_ai_examples/ag_ui/__main__.py,sha256=PMycatJt8Abb-Q8HXRGZoEY6vnOcvRebH7iI9MxLknA,225
19
+ pydantic_ai_examples/ag_ui/api/__init__.py,sha256=Pe307_ET_ERKBP-8Vs4L1yZRkK3ILPpajwxDpeW8YiI,673
20
+ pydantic_ai_examples/ag_ui/api/agentic_chat.py,sha256=Vrz7TVLHRs28gEdVXQ44R8jJtRU7EPrDjgqSUH90yh4,533
21
+ pydantic_ai_examples/ag_ui/api/agentic_generative_ui.py,sha256=6VVxQdPirBCVofePsaYriL1b5bH2CZF58zkVZRtfqWo,3477
22
+ pydantic_ai_examples/ag_ui/api/human_in_the_loop.py,sha256=130SDox5HoYjusPr2ubliI8udhxtNxUS3kD9RPb0zYc,765
23
+ pydantic_ai_examples/ag_ui/api/predictive_state_updates.py,sha256=x0lEV7Fr_BUGH9FApgBE18XgioTvGQ8zG_MyBmJPIlI,2112
24
+ pydantic_ai_examples/ag_ui/api/shared_state.py,sha256=bY75CUk4RJm1cqKAbhTlL8RzZkeEc8cof9N7reFdJXQ,3716
25
+ pydantic_ai_examples/ag_ui/api/tool_based_generative_ui.py,sha256=eT--lWjTzL0S3aIu9C14yeoixLjFXPWqwcdiuIlUAJk,219
16
26
  pydantic_ai_examples/evals/__init__.py,sha256=4f1v2o4F-gnUVtlkZU-dpwwwbLhqRxMcZv676atjNLg,115
17
27
  pydantic_ai_examples/evals/agent.py,sha256=KjCsUiL28RCNT6NwoQnQCwJ0xRw3EUGdIrYhlIjmVqI,2042
18
28
  pydantic_ai_examples/evals/custom_evaluators.py,sha256=Uz37_wbT4uA6s9fl46nTsH3NQKyS1KamMPPP860stww,2245
@@ -33,7 +43,7 @@ pydantic_ai_examples/slack_lead_qualifier/modal.py,sha256=f464AaeyP-n3UIfvEVVc4D
33
43
  pydantic_ai_examples/slack_lead_qualifier/models.py,sha256=WTp6D2WCASXqrjPVT3vGgTSYATLPBM3_cjq9wvXMRao,1586
34
44
  pydantic_ai_examples/slack_lead_qualifier/slack.py,sha256=VJVfMeUXYClWUJBLHNuaW8PB2sxjNzpTC-O_AJwcxQ4,833
35
45
  pydantic_ai_examples/slack_lead_qualifier/store.py,sha256=04vB4eZWKk_Tx0b9K4QuVI1U24JEyJyBS4X76cui7OI,896
36
- pydantic_ai_examples-0.4.3.dist-info/METADATA,sha256=f3E791cB-TsT1XlBX7XuLW3Ie9_46lN3AG6DruR9Jpc,2598
37
- pydantic_ai_examples-0.4.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
- pydantic_ai_examples-0.4.3.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
39
- pydantic_ai_examples-0.4.3.dist-info/RECORD,,
46
+ pydantic_ai_examples-0.4.4.dist-info/METADATA,sha256=Y_hLytJDTJnssmhktz1IpdMLPJi655L-lOwy0QqPEXM,2754
47
+ pydantic_ai_examples-0.4.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
48
+ pydantic_ai_examples-0.4.4.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
49
+ pydantic_ai_examples-0.4.4.dist-info/RECORD,,