pydantic-ai-examples 0.1.2__py3-none-any.whl → 1.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydantic_ai_examples/ag_ui/__init__.py +41 -0
- pydantic_ai_examples/ag_ui/__main__.py +9 -0
- pydantic_ai_examples/ag_ui/api/__init__.py +19 -0
- pydantic_ai_examples/ag_ui/api/agentic_chat.py +28 -0
- pydantic_ai_examples/ag_ui/api/agentic_generative_ui.py +120 -0
- pydantic_ai_examples/ag_ui/api/human_in_the_loop.py +27 -0
- pydantic_ai_examples/ag_ui/api/predictive_state_updates.py +78 -0
- pydantic_ai_examples/ag_ui/api/shared_state.py +139 -0
- pydantic_ai_examples/ag_ui/api/tool_based_generative_ui.py +12 -0
- pydantic_ai_examples/bank_support.py +16 -10
- pydantic_ai_examples/chat_app.py +8 -7
- pydantic_ai_examples/data_analyst.py +107 -0
- pydantic_ai_examples/evals/agent.py +1 -1
- pydantic_ai_examples/evals/custom_evaluators.py +5 -6
- pydantic_ai_examples/evals/example_01_generate_dataset.py +1 -2
- pydantic_ai_examples/evals/example_02_add_custom_evaluators.py +1 -2
- pydantic_ai_examples/evals/example_03_unit_testing.py +4 -2
- pydantic_ai_examples/evals/example_04_compare_models.py +3 -3
- pydantic_ai_examples/flight_booking.py +15 -10
- pydantic_ai_examples/py.typed +0 -0
- pydantic_ai_examples/pydantic_model.py +4 -3
- pydantic_ai_examples/question_graph.py +5 -5
- pydantic_ai_examples/rag.py +8 -7
- pydantic_ai_examples/roulette_wheel.py +1 -2
- pydantic_ai_examples/slack_lead_qualifier/__init__.py +0 -0
- pydantic_ai_examples/slack_lead_qualifier/agent.py +47 -0
- pydantic_ai_examples/slack_lead_qualifier/app.py +36 -0
- pydantic_ai_examples/slack_lead_qualifier/functions.py +85 -0
- pydantic_ai_examples/slack_lead_qualifier/modal.py +66 -0
- pydantic_ai_examples/slack_lead_qualifier/models.py +46 -0
- pydantic_ai_examples/slack_lead_qualifier/slack.py +30 -0
- pydantic_ai_examples/slack_lead_qualifier/store.py +31 -0
- pydantic_ai_examples/sql_gen.py +6 -7
- pydantic_ai_examples/stream_markdown.py +5 -4
- pydantic_ai_examples/stream_whales.py +4 -16
- pydantic_ai_examples/weather_agent.py +36 -88
- pydantic_ai_examples/weather_agent_gradio.py +10 -15
- {pydantic_ai_examples-0.1.2.dist-info → pydantic_ai_examples-1.12.0.dist-info}/METADATA +17 -13
- pydantic_ai_examples-1.12.0.dist-info/RECORD +50 -0
- pydantic_ai_examples-1.12.0.dist-info/licenses/LICENSE +21 -0
- pydantic_ai_examples-0.1.2.dist-info/RECORD +0 -30
- {pydantic_ai_examples-0.1.2.dist-info → pydantic_ai_examples-1.12.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
|
|
3
|
+
import datasets
|
|
4
|
+
import duckdb
|
|
5
|
+
import pandas as pd
|
|
6
|
+
|
|
7
|
+
from pydantic_ai import Agent, ModelRetry, RunContext
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class AnalystAgentDeps:
|
|
12
|
+
output: dict[str, pd.DataFrame] = field(default_factory=dict)
|
|
13
|
+
|
|
14
|
+
def store(self, value: pd.DataFrame) -> str:
|
|
15
|
+
"""Store the output in deps and return the reference such as Out[1] to be used by the LLM."""
|
|
16
|
+
ref = f'Out[{len(self.output) + 1}]'
|
|
17
|
+
self.output[ref] = value
|
|
18
|
+
return ref
|
|
19
|
+
|
|
20
|
+
def get(self, ref: str) -> pd.DataFrame:
|
|
21
|
+
if ref not in self.output:
|
|
22
|
+
raise ModelRetry(
|
|
23
|
+
f'Error: {ref} is not a valid variable reference. Check the previous messages and try again.'
|
|
24
|
+
)
|
|
25
|
+
return self.output[ref]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
analyst_agent = Agent(
|
|
29
|
+
'openai:gpt-5',
|
|
30
|
+
deps_type=AnalystAgentDeps,
|
|
31
|
+
instructions='You are a data analyst and your job is to analyze the data according to the user request.',
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@analyst_agent.tool
|
|
36
|
+
def load_dataset(
|
|
37
|
+
ctx: RunContext[AnalystAgentDeps],
|
|
38
|
+
path: str,
|
|
39
|
+
split: str = 'train',
|
|
40
|
+
) -> str:
|
|
41
|
+
"""Load the `split` of dataset `dataset_name` from huggingface.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
ctx: Pydantic AI agent RunContext
|
|
45
|
+
path: name of the dataset in the form of `<user_name>/<dataset_name>`
|
|
46
|
+
split: load the split of the dataset (default: "train")
|
|
47
|
+
"""
|
|
48
|
+
# begin load data from hf
|
|
49
|
+
builder = datasets.load_dataset_builder(path) # pyright: ignore[reportUnknownMemberType]
|
|
50
|
+
splits: dict[str, datasets.SplitInfo] = builder.info.splits or {} # pyright: ignore[reportUnknownMemberType]
|
|
51
|
+
if split not in splits:
|
|
52
|
+
raise ModelRetry(
|
|
53
|
+
f'{split} is not valid for dataset {path}. Valid splits are {",".join(splits.keys())}'
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
builder.download_and_prepare() # pyright: ignore[reportUnknownMemberType]
|
|
57
|
+
dataset = builder.as_dataset(split=split)
|
|
58
|
+
assert isinstance(dataset, datasets.Dataset)
|
|
59
|
+
dataframe = dataset.to_pandas()
|
|
60
|
+
assert isinstance(dataframe, pd.DataFrame)
|
|
61
|
+
# end load data from hf
|
|
62
|
+
|
|
63
|
+
# store the dataframe in the deps and get a ref like "Out[1]"
|
|
64
|
+
ref = ctx.deps.store(dataframe)
|
|
65
|
+
# construct a summary of the loaded dataset
|
|
66
|
+
output = [
|
|
67
|
+
f'Loaded the dataset as `{ref}`.',
|
|
68
|
+
f'Description: {dataset.info.description}'
|
|
69
|
+
if dataset.info.description
|
|
70
|
+
else None,
|
|
71
|
+
f'Features: {dataset.info.features!r}' if dataset.info.features else None,
|
|
72
|
+
]
|
|
73
|
+
return '\n'.join(filter(None, output))
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@analyst_agent.tool
|
|
77
|
+
def run_duckdb(ctx: RunContext[AnalystAgentDeps], dataset: str, sql: str) -> str:
|
|
78
|
+
"""Run DuckDB SQL query on the DataFrame.
|
|
79
|
+
|
|
80
|
+
Note that the virtual table name used in DuckDB SQL must be `dataset`.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
ctx: Pydantic AI agent RunContext
|
|
84
|
+
dataset: reference string to the DataFrame
|
|
85
|
+
sql: the query to be executed using DuckDB
|
|
86
|
+
"""
|
|
87
|
+
data = ctx.deps.get(dataset)
|
|
88
|
+
result = duckdb.query_df(df=data, virtual_table_name='dataset', sql_query=sql)
|
|
89
|
+
# pass the result as ref (because DuckDB SQL can select many rows, creating another huge dataframe)
|
|
90
|
+
ref = ctx.deps.store(result.df()) # pyright: ignore[reportUnknownMemberType]
|
|
91
|
+
return f'Executed SQL, result is `{ref}`'
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@analyst_agent.tool
|
|
95
|
+
def display(ctx: RunContext[AnalystAgentDeps], name: str) -> str:
|
|
96
|
+
"""Display at most 5 rows of the dataframe."""
|
|
97
|
+
dataset = ctx.deps.get(name)
|
|
98
|
+
return dataset.head().to_string() # pyright: ignore[reportUnknownMemberType]
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
if __name__ == '__main__':
|
|
102
|
+
deps = AnalystAgentDeps()
|
|
103
|
+
result = analyst_agent.run_sync(
|
|
104
|
+
user_prompt='Count how many negative comments are there in the dataset `cornell-movie-review-data/rotten_tomatoes`',
|
|
105
|
+
deps=deps,
|
|
106
|
+
)
|
|
107
|
+
print(result.output)
|
|
@@ -21,7 +21,7 @@ class TimeRangeDeps:
|
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
time_range_agent = Agent[TimeRangeDeps, TimeRangeResponse](
|
|
24
|
-
'gpt-
|
|
24
|
+
'gpt-5',
|
|
25
25
|
output_type=TimeRangeResponse, # type: ignore # we can't yet annotate something as receiving a TypeForm
|
|
26
26
|
deps_type=TimeRangeDeps,
|
|
27
27
|
system_prompt="Convert the user's request into a structured time range.",
|
|
@@ -1,6 +1,11 @@
|
|
|
1
1
|
from dataclasses import dataclass
|
|
2
2
|
from datetime import timedelta
|
|
3
3
|
|
|
4
|
+
from pydantic_ai_examples.evals.models import (
|
|
5
|
+
TimeRangeBuilderSuccess,
|
|
6
|
+
TimeRangeInputs,
|
|
7
|
+
TimeRangeResponse,
|
|
8
|
+
)
|
|
4
9
|
from pydantic_evals.evaluators import (
|
|
5
10
|
Evaluator,
|
|
6
11
|
EvaluatorContext,
|
|
@@ -8,12 +13,6 @@ from pydantic_evals.evaluators import (
|
|
|
8
13
|
)
|
|
9
14
|
from pydantic_evals.otel import SpanQuery
|
|
10
15
|
|
|
11
|
-
from pydantic_ai_examples.evals.models import (
|
|
12
|
-
TimeRangeBuilderSuccess,
|
|
13
|
-
TimeRangeInputs,
|
|
14
|
-
TimeRangeResponse,
|
|
15
|
-
)
|
|
16
|
-
|
|
17
16
|
|
|
18
17
|
@dataclass
|
|
19
18
|
class ValidateTimeRange(Evaluator[TimeRangeInputs, TimeRangeResponse]):
|
|
@@ -2,11 +2,10 @@ import asyncio
|
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
from types import NoneType
|
|
4
4
|
|
|
5
|
+
from pydantic_ai_examples.evals.models import TimeRangeInputs, TimeRangeResponse
|
|
5
6
|
from pydantic_evals import Dataset
|
|
6
7
|
from pydantic_evals.generation import generate_dataset
|
|
7
8
|
|
|
8
|
-
from pydantic_ai_examples.evals.models import TimeRangeInputs, TimeRangeResponse
|
|
9
|
-
|
|
10
9
|
|
|
11
10
|
async def main():
|
|
12
11
|
dataset = await generate_dataset(
|
|
@@ -1,8 +1,6 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
2
|
from types import NoneType
|
|
3
3
|
|
|
4
|
-
from pydantic_evals import Dataset
|
|
5
|
-
|
|
6
4
|
from pydantic_ai_examples.evals.custom_evaluators import (
|
|
7
5
|
CUSTOM_EVALUATOR_TYPES,
|
|
8
6
|
AgentCalledTool,
|
|
@@ -13,6 +11,7 @@ from pydantic_ai_examples.evals.models import (
|
|
|
13
11
|
TimeRangeInputs,
|
|
14
12
|
TimeRangeResponse,
|
|
15
13
|
)
|
|
14
|
+
from pydantic_evals import Dataset
|
|
16
15
|
|
|
17
16
|
|
|
18
17
|
def main():
|
|
@@ -2,7 +2,6 @@ from pathlib import Path
|
|
|
2
2
|
from types import NoneType
|
|
3
3
|
|
|
4
4
|
import logfire
|
|
5
|
-
from pydantic_evals import Dataset
|
|
6
5
|
|
|
7
6
|
from pydantic_ai_examples.evals import infer_time_range
|
|
8
7
|
from pydantic_ai_examples.evals.custom_evaluators import (
|
|
@@ -12,6 +11,7 @@ from pydantic_ai_examples.evals.models import (
|
|
|
12
11
|
TimeRangeInputs,
|
|
13
12
|
TimeRangeResponse,
|
|
14
13
|
)
|
|
14
|
+
from pydantic_evals import Dataset
|
|
15
15
|
|
|
16
16
|
logfire.configure(
|
|
17
17
|
send_to_logfire='if-token-present',
|
|
@@ -29,7 +29,9 @@ def evaluate_dataset():
|
|
|
29
29
|
report = dataset.evaluate_sync(infer_time_range)
|
|
30
30
|
print(report)
|
|
31
31
|
|
|
32
|
-
|
|
32
|
+
averages = report.averages()
|
|
33
|
+
assert averages is not None
|
|
34
|
+
assertion_pass_rate = averages.assertions
|
|
33
35
|
assert assertion_pass_rate is not None, 'There should be at least one assertion'
|
|
34
36
|
assert assertion_pass_rate > 0.9, (
|
|
35
37
|
f'The assertion pass rate was {assertion_pass_rate:.1%}; it should be above 90%.'
|
|
@@ -2,7 +2,6 @@ from pathlib import Path
|
|
|
2
2
|
from types import NoneType
|
|
3
3
|
|
|
4
4
|
import logfire
|
|
5
|
-
from pydantic_evals import Dataset
|
|
6
5
|
|
|
7
6
|
from pydantic_ai_examples.evals import infer_time_range
|
|
8
7
|
from pydantic_ai_examples.evals.agent import time_range_agent
|
|
@@ -13,6 +12,7 @@ from pydantic_ai_examples.evals.models import (
|
|
|
13
12
|
TimeRangeInputs,
|
|
14
13
|
TimeRangeResponse,
|
|
15
14
|
)
|
|
15
|
+
from pydantic_evals import Dataset
|
|
16
16
|
|
|
17
17
|
logfire.configure(
|
|
18
18
|
send_to_logfire='if-token-present',
|
|
@@ -28,8 +28,8 @@ def compare_models():
|
|
|
28
28
|
dataset_path, custom_evaluator_types=CUSTOM_EVALUATOR_TYPES
|
|
29
29
|
)
|
|
30
30
|
with logfire.span('Comparing different models for time_range_agent'):
|
|
31
|
-
with time_range_agent.override(model='openai:gpt-
|
|
32
|
-
dataset.evaluate_sync(infer_time_range, name='openai:gpt-
|
|
31
|
+
with time_range_agent.override(model='openai:gpt-5'):
|
|
32
|
+
dataset.evaluate_sync(infer_time_range, name='openai:gpt-5')
|
|
33
33
|
with time_range_agent.override(model='openai:o1'):
|
|
34
34
|
dataset.evaluate_sync(infer_time_range, name='openai:o1')
|
|
35
35
|
|
|
@@ -11,12 +11,18 @@ import logfire
|
|
|
11
11
|
from pydantic import BaseModel, Field
|
|
12
12
|
from rich.prompt import Prompt
|
|
13
13
|
|
|
14
|
-
from pydantic_ai import
|
|
15
|
-
|
|
16
|
-
|
|
14
|
+
from pydantic_ai import (
|
|
15
|
+
Agent,
|
|
16
|
+
ModelMessage,
|
|
17
|
+
ModelRetry,
|
|
18
|
+
RunContext,
|
|
19
|
+
RunUsage,
|
|
20
|
+
UsageLimits,
|
|
21
|
+
)
|
|
17
22
|
|
|
18
23
|
# 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured
|
|
19
24
|
logfire.configure(send_to_logfire='if-token-present')
|
|
25
|
+
logfire.instrument_pydantic_ai()
|
|
20
26
|
|
|
21
27
|
|
|
22
28
|
class FlightDetails(BaseModel):
|
|
@@ -43,19 +49,18 @@ class Deps:
|
|
|
43
49
|
|
|
44
50
|
# This agent is responsible for controlling the flow of the conversation.
|
|
45
51
|
search_agent = Agent[Deps, FlightDetails | NoFlightFound](
|
|
46
|
-
'openai:gpt-
|
|
52
|
+
'openai:gpt-5',
|
|
47
53
|
output_type=FlightDetails | NoFlightFound, # type: ignore
|
|
48
54
|
retries=4,
|
|
49
55
|
system_prompt=(
|
|
50
56
|
'Your job is to find the cheapest flight for the user on the given date. '
|
|
51
57
|
),
|
|
52
|
-
instrument=True,
|
|
53
58
|
)
|
|
54
59
|
|
|
55
60
|
|
|
56
61
|
# This agent is responsible for extracting flight details from web page text.
|
|
57
62
|
extraction_agent = Agent(
|
|
58
|
-
'openai:gpt-
|
|
63
|
+
'openai:gpt-5',
|
|
59
64
|
output_type=list[FlightDetails],
|
|
60
65
|
system_prompt='Extract all the flight details from the given text.',
|
|
61
66
|
)
|
|
@@ -107,8 +112,8 @@ class Failed(BaseModel):
|
|
|
107
112
|
|
|
108
113
|
# This agent is responsible for extracting the user's seat selection
|
|
109
114
|
seat_preference_agent = Agent[None, SeatPreference | Failed](
|
|
110
|
-
'openai:gpt-
|
|
111
|
-
output_type=SeatPreference | Failed,
|
|
115
|
+
'openai:gpt-5',
|
|
116
|
+
output_type=SeatPreference | Failed,
|
|
112
117
|
system_prompt=(
|
|
113
118
|
"Extract the user's seat preference. "
|
|
114
119
|
'Seats A and F are window seats. '
|
|
@@ -182,7 +187,7 @@ async def main():
|
|
|
182
187
|
req_date=datetime.date(2025, 1, 10),
|
|
183
188
|
)
|
|
184
189
|
message_history: list[ModelMessage] | None = None
|
|
185
|
-
usage:
|
|
190
|
+
usage: RunUsage = RunUsage()
|
|
186
191
|
# run the agent until a satisfactory flight is found
|
|
187
192
|
while True:
|
|
188
193
|
result = await search_agent.run(
|
|
@@ -213,7 +218,7 @@ async def main():
|
|
|
213
218
|
)
|
|
214
219
|
|
|
215
220
|
|
|
216
|
-
async def find_seat(usage:
|
|
221
|
+
async def find_seat(usage: RunUsage) -> SeatPreference:
|
|
217
222
|
message_history: list[ModelMessage] | None = None
|
|
218
223
|
while True:
|
|
219
224
|
answer = Prompt.ask('What seat would you like?')
|
|
File without changes
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Simple example of using
|
|
1
|
+
"""Simple example of using Pydantic AI to construct a Pydantic model from a text input.
|
|
2
2
|
|
|
3
3
|
Run with:
|
|
4
4
|
|
|
@@ -14,6 +14,7 @@ from pydantic_ai import Agent
|
|
|
14
14
|
|
|
15
15
|
# 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured
|
|
16
16
|
logfire.configure(send_to_logfire='if-token-present')
|
|
17
|
+
logfire.instrument_pydantic_ai()
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
class MyModel(BaseModel):
|
|
@@ -21,9 +22,9 @@ class MyModel(BaseModel):
|
|
|
21
22
|
country: str
|
|
22
23
|
|
|
23
24
|
|
|
24
|
-
model = os.getenv('PYDANTIC_AI_MODEL', 'openai:gpt-
|
|
25
|
+
model = os.getenv('PYDANTIC_AI_MODEL', 'openai:gpt-5')
|
|
25
26
|
print(f'Using model: {model}')
|
|
26
|
-
agent = Agent(model, output_type=MyModel
|
|
27
|
+
agent = Agent(model, output_type=MyModel)
|
|
27
28
|
|
|
28
29
|
if __name__ == '__main__':
|
|
29
30
|
result = agent.run_sync('The windy city in the US of A.')
|
|
@@ -12,6 +12,8 @@ from pathlib import Path
|
|
|
12
12
|
|
|
13
13
|
import logfire
|
|
14
14
|
from groq import BaseModel
|
|
15
|
+
|
|
16
|
+
from pydantic_ai import Agent, ModelMessage, format_as_xml
|
|
15
17
|
from pydantic_graph import (
|
|
16
18
|
BaseNode,
|
|
17
19
|
End,
|
|
@@ -20,13 +22,11 @@ from pydantic_graph import (
|
|
|
20
22
|
)
|
|
21
23
|
from pydantic_graph.persistence.file import FileStatePersistence
|
|
22
24
|
|
|
23
|
-
from pydantic_ai import Agent, format_as_xml
|
|
24
|
-
from pydantic_ai.messages import ModelMessage
|
|
25
|
-
|
|
26
25
|
# 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured
|
|
27
26
|
logfire.configure(send_to_logfire='if-token-present')
|
|
27
|
+
logfire.instrument_pydantic_ai()
|
|
28
28
|
|
|
29
|
-
ask_agent = Agent('openai:gpt-
|
|
29
|
+
ask_agent = Agent('openai:gpt-5', output_type=str)
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
@dataclass
|
|
@@ -65,7 +65,7 @@ class EvaluationOutput(BaseModel, use_attribute_docstrings=True):
|
|
|
65
65
|
|
|
66
66
|
|
|
67
67
|
evaluate_agent = Agent(
|
|
68
|
-
'openai:gpt-
|
|
68
|
+
'openai:gpt-5',
|
|
69
69
|
output_type=EvaluationOutput,
|
|
70
70
|
system_prompt='Given a question and answer, evaluate if the answer is correct.',
|
|
71
71
|
)
|
pydantic_ai_examples/rag.py
CHANGED
|
@@ -30,16 +30,17 @@ import asyncpg
|
|
|
30
30
|
import httpx
|
|
31
31
|
import logfire
|
|
32
32
|
import pydantic_core
|
|
33
|
+
from anyio import create_task_group
|
|
33
34
|
from openai import AsyncOpenAI
|
|
34
35
|
from pydantic import TypeAdapter
|
|
35
36
|
from typing_extensions import AsyncGenerator
|
|
36
37
|
|
|
37
|
-
from pydantic_ai import RunContext
|
|
38
|
-
from pydantic_ai.agent import Agent
|
|
38
|
+
from pydantic_ai import Agent, RunContext
|
|
39
39
|
|
|
40
40
|
# 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured
|
|
41
41
|
logfire.configure(send_to_logfire='if-token-present')
|
|
42
42
|
logfire.instrument_asyncpg()
|
|
43
|
+
logfire.instrument_pydantic_ai()
|
|
43
44
|
|
|
44
45
|
|
|
45
46
|
@dataclass
|
|
@@ -48,7 +49,7 @@ class Deps:
|
|
|
48
49
|
pool: asyncpg.Pool
|
|
49
50
|
|
|
50
51
|
|
|
51
|
-
agent = Agent('openai:gpt-
|
|
52
|
+
agent = Agent('openai:gpt-5', deps_type=Deps)
|
|
52
53
|
|
|
53
54
|
|
|
54
55
|
@agent.tool
|
|
@@ -114,7 +115,7 @@ async def build_search_db():
|
|
|
114
115
|
async with httpx.AsyncClient() as client:
|
|
115
116
|
response = await client.get(DOCS_JSON)
|
|
116
117
|
response.raise_for_status()
|
|
117
|
-
sections =
|
|
118
|
+
sections = sections_ta.validate_json(response.content)
|
|
118
119
|
|
|
119
120
|
openai = AsyncOpenAI()
|
|
120
121
|
logfire.instrument_openai(openai)
|
|
@@ -126,9 +127,9 @@ async def build_search_db():
|
|
|
126
127
|
await conn.execute(DB_SCHEMA)
|
|
127
128
|
|
|
128
129
|
sem = asyncio.Semaphore(10)
|
|
129
|
-
async with
|
|
130
|
+
async with create_task_group() as tg:
|
|
130
131
|
for section in sections:
|
|
131
|
-
tg.
|
|
132
|
+
tg.start_soon(insert_doc_section, sem, openai, pool, section)
|
|
132
133
|
|
|
133
134
|
|
|
134
135
|
async def insert_doc_section(
|
|
@@ -182,7 +183,7 @@ class DocsSection:
|
|
|
182
183
|
return '\n\n'.join((f'path: {self.path}', f'title: {self.title}', self.content))
|
|
183
184
|
|
|
184
185
|
|
|
185
|
-
|
|
186
|
+
sections_ta = TypeAdapter(list[DocsSection])
|
|
186
187
|
|
|
187
188
|
|
|
188
189
|
# pyright: reportUnknownMemberType=false
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Example demonstrating how to use
|
|
1
|
+
"""Example demonstrating how to use Pydantic AI to create a simple roulette game.
|
|
2
2
|
|
|
3
3
|
Run with:
|
|
4
4
|
uv run -m pydantic_ai_examples.roulette_wheel
|
|
@@ -28,7 +28,6 @@ roulette_agent = Agent(
|
|
|
28
28
|
system_prompt=(
|
|
29
29
|
'Use the `roulette_wheel` function to determine if the customer has won based on the number they bet on.'
|
|
30
30
|
),
|
|
31
|
-
instrument=True,
|
|
32
31
|
)
|
|
33
32
|
|
|
34
33
|
|
|
File without changes
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from textwrap import dedent
|
|
2
|
+
from types import NoneType
|
|
3
|
+
|
|
4
|
+
import logfire
|
|
5
|
+
|
|
6
|
+
### [imports]
|
|
7
|
+
from pydantic_ai import Agent, NativeOutput
|
|
8
|
+
from pydantic_ai.common_tools.duckduckgo import duckduckgo_search_tool ### [/imports]
|
|
9
|
+
|
|
10
|
+
from .models import Analysis, Profile
|
|
11
|
+
|
|
12
|
+
### [agent]
|
|
13
|
+
agent = Agent(
|
|
14
|
+
'openai:gpt-5',
|
|
15
|
+
instructions=dedent(
|
|
16
|
+
"""
|
|
17
|
+
When a new person joins our public Slack, please put together a brief snapshot so we can be most useful to them.
|
|
18
|
+
|
|
19
|
+
**What to include**
|
|
20
|
+
|
|
21
|
+
1. **Who they are:** Any details about their professional role or projects (e.g. LinkedIn, GitHub, company bio).
|
|
22
|
+
2. **Where they work:** Name of the organisation and its domain.
|
|
23
|
+
3. **How we can help:** On a scale of 1–5, estimate how likely they are to benefit from **Pydantic Logfire**
|
|
24
|
+
(our paid observability tool) based on factors such as company size, product maturity, or AI usage.
|
|
25
|
+
*1 = probably not relevant, 5 = very strong fit.*
|
|
26
|
+
|
|
27
|
+
**Our products (for context only)**
|
|
28
|
+
• **Pydantic Validation** – Python data-validation (open source)
|
|
29
|
+
• **Pydantic AI** – Python agent framework (open source)
|
|
30
|
+
• **Pydantic Logfire** – Observability for traces, logs & metrics with first-class AI support (commercial)
|
|
31
|
+
|
|
32
|
+
**How to research**
|
|
33
|
+
|
|
34
|
+
• Use the provided DuckDuckGo search tool to research the person and the organization they work for, based on the email domain or what you find on e.g. LinkedIn and GitHub.
|
|
35
|
+
• If you can't find enough to form a reasonable view, return **None**.
|
|
36
|
+
"""
|
|
37
|
+
),
|
|
38
|
+
tools=[duckduckgo_search_tool()],
|
|
39
|
+
output_type=NativeOutput([Analysis, NoneType]),
|
|
40
|
+
) ### [/agent]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
### [analyze_profile]
|
|
44
|
+
@logfire.instrument('Analyze profile')
|
|
45
|
+
async def analyze_profile(profile: Profile) -> Analysis | None:
|
|
46
|
+
result = await agent.run(profile.as_prompt())
|
|
47
|
+
return result.output ### [/analyze_profile]
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import logfire
|
|
4
|
+
from fastapi import FastAPI, HTTPException, status
|
|
5
|
+
from logfire.propagate import get_context
|
|
6
|
+
|
|
7
|
+
from .models import Profile
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
### [process_slack_member]
|
|
11
|
+
def process_slack_member(profile: Profile):
|
|
12
|
+
from .modal import process_slack_member as _process_slack_member
|
|
13
|
+
|
|
14
|
+
_process_slack_member.spawn(
|
|
15
|
+
profile.model_dump(), logfire_ctx=get_context()
|
|
16
|
+
) ### [/process_slack_member]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
### [app]
|
|
20
|
+
app = FastAPI()
|
|
21
|
+
logfire.instrument_fastapi(app, capture_headers=True)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@app.post('/')
|
|
25
|
+
async def process_webhook(payload: dict[str, Any]) -> dict[str, Any]:
|
|
26
|
+
if payload['type'] == 'url_verification':
|
|
27
|
+
return {'challenge': payload['challenge']}
|
|
28
|
+
elif (
|
|
29
|
+
payload['type'] == 'event_callback' and payload['event']['type'] == 'team_join'
|
|
30
|
+
):
|
|
31
|
+
profile = Profile.model_validate(payload['event']['user']['profile'])
|
|
32
|
+
|
|
33
|
+
process_slack_member(profile)
|
|
34
|
+
return {'status': 'OK'}
|
|
35
|
+
|
|
36
|
+
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY) ### [/app]
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import logfire
|
|
2
|
+
|
|
3
|
+
### [imports]
|
|
4
|
+
from .agent import analyze_profile
|
|
5
|
+
from .models import Profile
|
|
6
|
+
|
|
7
|
+
### [imports-daily_summary]
|
|
8
|
+
from .slack import send_slack_message
|
|
9
|
+
from .store import AnalysisStore ### [/imports,/imports-daily_summary]
|
|
10
|
+
|
|
11
|
+
### [constant-new_lead_channel]
|
|
12
|
+
NEW_LEAD_CHANNEL = '#new-slack-leads'
|
|
13
|
+
### [/constant-new_lead_channel]
|
|
14
|
+
### [constant-daily_summary_channel]
|
|
15
|
+
DAILY_SUMMARY_CHANNEL = '#daily-slack-leads-summary'
|
|
16
|
+
### [/constant-daily_summary_channel]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
### [process_slack_member]
|
|
20
|
+
@logfire.instrument('Process Slack member')
|
|
21
|
+
async def process_slack_member(profile: Profile):
|
|
22
|
+
analysis = await analyze_profile(profile)
|
|
23
|
+
logfire.info('Analysis', analysis=analysis)
|
|
24
|
+
|
|
25
|
+
if analysis is None:
|
|
26
|
+
return
|
|
27
|
+
|
|
28
|
+
await AnalysisStore().add(analysis)
|
|
29
|
+
|
|
30
|
+
await send_slack_message(
|
|
31
|
+
NEW_LEAD_CHANNEL,
|
|
32
|
+
[
|
|
33
|
+
{
|
|
34
|
+
'type': 'header',
|
|
35
|
+
'text': {
|
|
36
|
+
'type': 'plain_text',
|
|
37
|
+
'text': f'New Slack member with score {analysis.relevance}/5',
|
|
38
|
+
},
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
'type': 'divider',
|
|
42
|
+
},
|
|
43
|
+
*analysis.as_slack_blocks(),
|
|
44
|
+
],
|
|
45
|
+
) ### [/process_slack_member]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
### [send_daily_summary]
|
|
49
|
+
@logfire.instrument('Send daily summary')
|
|
50
|
+
async def send_daily_summary():
|
|
51
|
+
analyses = await AnalysisStore().list()
|
|
52
|
+
logfire.info('Analyses', analyses=analyses)
|
|
53
|
+
|
|
54
|
+
if len(analyses) == 0:
|
|
55
|
+
return
|
|
56
|
+
|
|
57
|
+
sorted_analyses = sorted(analyses, key=lambda x: x.relevance, reverse=True)
|
|
58
|
+
top_analyses = sorted_analyses[:5]
|
|
59
|
+
|
|
60
|
+
blocks = [
|
|
61
|
+
{
|
|
62
|
+
'type': 'header',
|
|
63
|
+
'text': {
|
|
64
|
+
'type': 'plain_text',
|
|
65
|
+
'text': f'Top {len(top_analyses)} new Slack members from the last 24 hours',
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
for analysis in top_analyses:
|
|
71
|
+
blocks.extend(
|
|
72
|
+
[
|
|
73
|
+
{
|
|
74
|
+
'type': 'divider',
|
|
75
|
+
},
|
|
76
|
+
*analysis.as_slack_blocks(include_relevance=True),
|
|
77
|
+
]
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
await send_slack_message(
|
|
81
|
+
DAILY_SUMMARY_CHANNEL,
|
|
82
|
+
blocks,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
await AnalysisStore().clear() ### [/send_daily_summary]
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
### [setup_modal]
|
|
4
|
+
import modal
|
|
5
|
+
|
|
6
|
+
image = modal.Image.debian_slim(python_version='3.13').pip_install(
|
|
7
|
+
'pydantic',
|
|
8
|
+
'pydantic_ai_slim[openai,duckduckgo]',
|
|
9
|
+
'logfire[httpx,fastapi]',
|
|
10
|
+
'fastapi[standard]',
|
|
11
|
+
'httpx',
|
|
12
|
+
)
|
|
13
|
+
app = modal.App(
|
|
14
|
+
name='slack-lead-qualifier',
|
|
15
|
+
image=image,
|
|
16
|
+
secrets=[
|
|
17
|
+
modal.Secret.from_name('logfire'),
|
|
18
|
+
modal.Secret.from_name('openai'),
|
|
19
|
+
modal.Secret.from_name('slack'),
|
|
20
|
+
],
|
|
21
|
+
) ### [/setup_modal]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
### [setup_logfire]
|
|
25
|
+
def setup_logfire():
|
|
26
|
+
import logfire
|
|
27
|
+
|
|
28
|
+
logfire.configure(service_name=app.name)
|
|
29
|
+
logfire.instrument_pydantic_ai()
|
|
30
|
+
logfire.instrument_httpx(capture_all=True) ### [/setup_logfire]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
### [web_app]
|
|
34
|
+
@app.function(min_containers=1)
|
|
35
|
+
@modal.asgi_app() # type: ignore
|
|
36
|
+
def web_app():
|
|
37
|
+
setup_logfire()
|
|
38
|
+
|
|
39
|
+
from .app import app as _app
|
|
40
|
+
|
|
41
|
+
return _app ### [/web_app]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
### [process_slack_member]
|
|
45
|
+
@app.function()
|
|
46
|
+
async def process_slack_member(profile_raw: dict[str, Any], logfire_ctx: Any):
|
|
47
|
+
setup_logfire()
|
|
48
|
+
|
|
49
|
+
from logfire.propagate import attach_context
|
|
50
|
+
|
|
51
|
+
from .functions import process_slack_member as _process_slack_member
|
|
52
|
+
from .models import Profile
|
|
53
|
+
|
|
54
|
+
with attach_context(logfire_ctx):
|
|
55
|
+
profile = Profile.model_validate(profile_raw)
|
|
56
|
+
await _process_slack_member(profile) ### [/process_slack_member]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
### [send_daily_summary]
|
|
60
|
+
@app.function(schedule=modal.Cron('0 8 * * *')) # Every day at 8am UTC
|
|
61
|
+
async def send_daily_summary():
|
|
62
|
+
setup_logfire()
|
|
63
|
+
|
|
64
|
+
from .functions import send_daily_summary as _send_daily_summary
|
|
65
|
+
|
|
66
|
+
await _send_daily_summary() ### [/send_daily_summary]
|