pixie-examples 0.1.1.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,328 @@
1
+ """
2
+ Financial Research Agent - Multi-Step Workflow - Pixie Integration
3
+
4
+ This example demonstrates a structured financial research workflow with multiple
5
+ specialized agents working together to produce comprehensive financial analysis reports.
6
+
7
+ Pattern: Multi-Step Workflow with Multi-Agent
8
+ Original: https://github.com/openai/openai-agents-python/tree/main/examples/financial_research_agent
9
+
10
+ Architecture:
11
+ 1. Planner Agent - Creates search plan
12
+ 2. Search Agent - Performs web searches
13
+ 3. Specialist Analysts - Financial fundamentals & risk analysis
14
+ 4. Writer Agent - Synthesizes final report
15
+ 5. Verifier Agent - Quality checks the report
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import asyncio
21
+ from collections.abc import Sequence
22
+
23
+ from pydantic import BaseModel
24
+ from agents import Agent, Runner, RunResult, RunResultStreaming, WebSearchTool
25
+ import pixie
26
+
27
+
28
+ # ============================================================================
29
+ # AGENT DEFINITIONS
30
+ # ============================================================================
31
+
32
+
33
+ # --- Planner Agent ---
34
+ class FinancialSearchItem(BaseModel):
35
+ """A single search to perform"""
36
+
37
+ reason: str
38
+ """Your reasoning for why this search is relevant."""
39
+
40
+ query: str
41
+ """The search term to feed into a web (or file) search."""
42
+
43
+
44
+ class FinancialSearchPlan(BaseModel):
45
+ """Plan of searches to perform"""
46
+
47
+ searches: list[FinancialSearchItem]
48
+ """A list of searches to perform."""
49
+
50
+
51
+ PLANNER_PROMPT = (
52
+ "You are a financial research planner. Given a request for financial analysis, "
53
+ "produce a set of web searches to gather the context needed. Aim for recent "
54
+ "headlines, earnings calls or 10‑K snippets, analyst commentary, and industry background. "
55
+ "Output between 5 and 15 search terms to query for."
56
+ )
57
+
58
+ planner_agent = Agent(
59
+ name="FinancialPlannerAgent",
60
+ instructions=PLANNER_PROMPT,
61
+ model="o3-mini",
62
+ output_type=FinancialSearchPlan,
63
+ )
64
+
65
+
66
+ # --- Search Agent ---
67
+ SEARCH_INSTRUCTIONS = (
68
+ "You are a research assistant specializing in financial topics. "
69
+ "Given a search term, use web search to retrieve up‑to‑date context and "
70
+ "produce a short summary of at most 300 words. Focus on key numbers, events, "
71
+ "or quotes that will be useful to a financial analyst."
72
+ )
73
+
74
+ search_agent = Agent(
75
+ name="FinancialSearchAgent",
76
+ model="gpt-5.2",
77
+ instructions=SEARCH_INSTRUCTIONS,
78
+ tools=[WebSearchTool()],
79
+ )
80
+
81
+
82
+ # --- Analyst Agents ---
83
+ class AnalysisSummary(BaseModel):
84
+ """Analysis output from specialist agents"""
85
+
86
+ summary: str
87
+ """Short text summary for this aspect of the analysis."""
88
+
89
+
90
+ FINANCIALS_PROMPT = (
91
+ "You are a financial analyst focused on company fundamentals such as revenue, "
92
+ "profit, margins and growth trajectory. Given a collection of web (and optional file) "
93
+ "search results about a company, write a concise analysis of its recent financial "
94
+ "performance. Pull out key metrics or quotes. Keep it under 2 paragraphs."
95
+ )
96
+
97
+ financials_agent = Agent(
98
+ name="FundamentalsAnalystAgent",
99
+ instructions=FINANCIALS_PROMPT,
100
+ output_type=AnalysisSummary,
101
+ )
102
+
103
+ RISK_PROMPT = (
104
+ "You are a risk analyst looking for potential red flags in a company's outlook. "
105
+ "Given background research, produce a short analysis of risks such as competitive threats, "
106
+ "regulatory issues, supply chain problems, or slowing growth. Keep it under 2 paragraphs."
107
+ )
108
+
109
+ risk_agent = Agent(
110
+ name="RiskAnalystAgent",
111
+ instructions=RISK_PROMPT,
112
+ output_type=AnalysisSummary,
113
+ )
114
+
115
+
116
+ # --- Writer Agent ---
117
+ class FinancialReportData(BaseModel):
118
+ """Final report output"""
119
+
120
+ short_summary: str
121
+ """A short 2‑3 sentence executive summary."""
122
+
123
+ markdown_report: str
124
+ """The full markdown report."""
125
+
126
+ follow_up_questions: list[str]
127
+ """Suggested follow‑up questions for further research."""
128
+
129
+
130
+ WRITER_PROMPT = (
131
+ "You are a senior financial analyst. You will be provided with the original query and "
132
+ "a set of raw search summaries. Your task is to synthesize these into a long‑form markdown "
133
+ "report (at least several paragraphs) including a short executive summary and follow‑up "
134
+ "questions. If needed, you can call the available analysis tools (e.g. fundamentals_analysis, "
135
+ "risk_analysis) to get short specialist write‑ups to incorporate."
136
+ )
137
+
138
+ writer_agent = Agent(
139
+ name="FinancialWriterAgent",
140
+ instructions=WRITER_PROMPT,
141
+ model="gpt-5.2",
142
+ output_type=FinancialReportData,
143
+ )
144
+
145
+
146
+ # --- Verifier Agent ---
147
+ class VerificationResult(BaseModel):
148
+ """Verification outcome"""
149
+
150
+ verified: bool
151
+ """Whether the report seems coherent and plausible."""
152
+
153
+ issues: str
154
+ """If not verified, describe the main issues or concerns."""
155
+
156
+
157
+ VERIFIER_PROMPT = (
158
+ "You are a meticulous auditor. You have been handed a financial analysis report. "
159
+ "Your job is to verify the report is internally consistent, clearly sourced, and makes "
160
+ "no unsupported claims. Point out any issues or uncertainties."
161
+ )
162
+
163
+ verifier_agent = Agent(
164
+ name="VerificationAgent",
165
+ instructions=VERIFIER_PROMPT,
166
+ model="gpt-5.2",
167
+ output_type=VerificationResult,
168
+ )
169
+
170
+
171
+ # ============================================================================
172
+ # HELPER FUNCTIONS
173
+ # ============================================================================
174
+
175
+
176
+ async def _summary_extractor(run_result: RunResult | RunResultStreaming) -> str:
177
+ """Custom output extractor for sub‑agents that return an AnalysisSummary."""
178
+ return str(run_result.final_output.summary)
179
+
180
+
181
+ async def _plan_searches(query: str) -> FinancialSearchPlan:
182
+ """Create a search plan for the given query"""
183
+ result = await Runner.run(planner_agent, f"Query: {query}")
184
+ return result.final_output_as(FinancialSearchPlan)
185
+
186
+
187
+ async def _perform_search(item: FinancialSearchItem) -> str | None:
188
+ """Perform a single search"""
189
+ input_data = f"Search term: {item.query}\nReason: {item.reason}"
190
+ try:
191
+ result = await Runner.run(search_agent, input_data)
192
+ return str(result.final_output)
193
+ except Exception:
194
+ return None
195
+
196
+
197
+ async def _perform_searches(search_plan: FinancialSearchPlan) -> Sequence[str]:
198
+ """Perform all searches in parallel"""
199
+ tasks = [
200
+ asyncio.create_task(_perform_search(item)) for item in search_plan.searches
201
+ ]
202
+ results: list[str] = []
203
+
204
+ for task in asyncio.as_completed(tasks):
205
+ result = await task
206
+ if result is not None:
207
+ results.append(result)
208
+
209
+ return results
210
+
211
+
212
+ async def _write_report(
213
+ query: str, search_results: Sequence[str]
214
+ ) -> FinancialReportData:
215
+ """Write the final report using specialist tools"""
216
+ # Expose the specialist analysts as tools
217
+ fundamentals_tool = financials_agent.as_tool(
218
+ tool_name="fundamentals_analysis",
219
+ tool_description="Use to get a short write‑up of key financial metrics",
220
+ custom_output_extractor=_summary_extractor,
221
+ )
222
+ risk_tool = risk_agent.as_tool(
223
+ tool_name="risk_analysis",
224
+ tool_description="Use to get a short write‑up of potential red flags",
225
+ custom_output_extractor=_summary_extractor,
226
+ )
227
+
228
+ writer_with_tools = writer_agent.clone(tools=[fundamentals_tool, risk_tool])
229
+ input_data = f"Original query: {query}\nSummarized search results: {search_results}"
230
+
231
+ result = await Runner.run(writer_with_tools, input_data)
232
+ return result.final_output_as(FinancialReportData)
233
+
234
+
235
+ async def _verify_report(report: FinancialReportData) -> VerificationResult:
236
+ """Verify report quality"""
237
+ result = await Runner.run(verifier_agent, report.markdown_report)
238
+ return result.final_output_as(VerificationResult)
239
+
240
+
241
+ # ============================================================================
242
+ # PIXIE APP
243
+ # ============================================================================
244
+
245
+
246
+ @pixie.app
247
+ async def openai_agents_financial_research(
248
+ query: str,
249
+ ) -> pixie.PixieGenerator[str, None]:
250
+ """
251
+ Comprehensive financial research agent with multi-step workflow.
252
+
253
+ This agent orchestrates a full research workflow:
254
+ 1. Plans searches based on the query
255
+ 2. Executes web searches in parallel
256
+ 3. Analyzes fundamentals and risks using specialist agents
257
+ 4. Synthesizes a comprehensive markdown report
258
+ 5. Verifies the report for quality and consistency
259
+
260
+ Args:
261
+ query: Financial research question or company to analyze
262
+
263
+ Yields:
264
+ Progress updates and the final comprehensive report
265
+
266
+ Example queries:
267
+ - "Write up an analysis of Apple Inc.'s most recent quarter."
268
+ - "Analyze Tesla's financial performance and growth prospects"
269
+ - "What are the key risks facing Microsoft in 2026?"
270
+ """
271
+ yield f"🔍 Starting financial research for: {query}\n"
272
+
273
+ # Step 1: Planning
274
+ yield "📋 Planning searches..."
275
+ search_plan = await _plan_searches(query)
276
+ num_searches = len(search_plan.searches)
277
+ yield f"✓ Will perform {num_searches} searches\n"
278
+
279
+ # Show search plan
280
+ yield "Search plan:"
281
+ for i, item in enumerate(search_plan.searches[:5], 1): # Show first 5
282
+ yield f" {i}. {item.query} - {item.reason}"
283
+ if num_searches > 5:
284
+ yield f" ... and {num_searches - 5} more"
285
+ yield ""
286
+
287
+ # Step 2: Searching
288
+ yield f"🔎 Executing {num_searches} searches in parallel..."
289
+ search_results = await _perform_searches(search_plan)
290
+ yield f"✓ Completed {len(search_results)} successful searches\n"
291
+
292
+ # Step 3: Writing report
293
+ yield "✍️ Analyzing data and writing comprehensive report..."
294
+ yield "(This may take a minute as specialist analysts are consulted...)"
295
+ report = await _write_report(query, search_results)
296
+ yield "✓ Report complete\n"
297
+
298
+ # Step 4: Verification
299
+ yield "🔍 Verifying report quality..."
300
+ verification = await _verify_report(report)
301
+ if verification.verified:
302
+ yield "✓ Report verified\n"
303
+ else:
304
+ yield f"⚠️ Verification issues found: {verification.issues}\n"
305
+
306
+ # Output summary
307
+ yield "=" * 60
308
+ yield "EXECUTIVE SUMMARY"
309
+ yield "=" * 60
310
+ yield report.short_summary
311
+ yield ""
312
+
313
+ # Full report
314
+ yield "=" * 60
315
+ yield "FULL REPORT"
316
+ yield "=" * 60
317
+ yield report.markdown_report
318
+ yield ""
319
+
320
+ # Follow-up questions
321
+ yield "=" * 60
322
+ yield "SUGGESTED FOLLOW-UP QUESTIONS"
323
+ yield "=" * 60
324
+ for i, question in enumerate(report.follow_up_questions, 1):
325
+ yield f"{i}. {question}"
326
+
327
+ yield ""
328
+ yield "✓ Financial research complete!"
@@ -0,0 +1,108 @@
1
+ """
2
+ LLM as a Judge Pattern - Pixie Integration
3
+
4
+ This example demonstrates the LLM as a judge pattern where one agent generates content
5
+ and another agent evaluates it iteratively until quality standards are met.
6
+
7
+ Pattern: Multi-Step Workflow
8
+ Original: https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/llm_as_a_judge.py
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from dataclasses import dataclass
14
+ from typing import Literal
15
+
16
+ from agents import Agent, ItemHelpers, Runner, TResponseInputItem
17
+ import pixie
18
+
19
+
20
+ # Story outline generator agent
21
+ story_outline_generator = Agent(
22
+ name="story_outline_generator",
23
+ instructions=(
24
+ "You generate a very short story outline based on the user's input. "
25
+ "If there is any feedback provided, use it to improve the outline."
26
+ ),
27
+ )
28
+
29
+
30
+ @dataclass
31
+ class EvaluationFeedback:
32
+ """Feedback from the evaluator agent"""
33
+
34
+ feedback: str
35
+ score: Literal["pass", "needs_improvement", "fail"]
36
+
37
+
38
+ # Evaluator agent that judges the story outline
39
+ evaluator = Agent[None](
40
+ name="evaluator",
41
+ instructions=(
42
+ "You evaluate a story outline and decide if it's good enough. "
43
+ "If it's not good enough, you provide feedback on what needs to be improved. "
44
+ "Never give it a pass on the first try. After 5 attempts, you can give "
45
+ "it a pass if the story outline is good enough - do not go for perfection"
46
+ ),
47
+ output_type=EvaluationFeedback,
48
+ )
49
+
50
+
51
+ @pixie.app
52
+ async def openai_agents_llm_as_a_judge(topic: str) -> pixie.PixieGenerator[str, None]:
53
+ """
54
+ Generate a story outline using LLM-as-a-judge pattern.
55
+
56
+ The story generator creates an outline, which is then evaluated by a judge agent.
57
+ The process repeats until the judge is satisfied with the quality.
58
+
59
+ Args:
60
+ topic: The story topic to generate an outline for
61
+
62
+ Yields:
63
+ Status updates and the final story outline
64
+ """
65
+ yield f"Starting story generation for: {topic}"
66
+
67
+ input_items: list[TResponseInputItem] = [{"content": topic, "role": "user"}]
68
+ latest_outline: str | None = None
69
+
70
+ iteration = 0
71
+ max_iterations = 10
72
+
73
+ while iteration < max_iterations:
74
+ iteration += 1
75
+ yield f"\n--- Iteration {iteration} ---"
76
+
77
+ # Generate story outline
78
+ yield "Generating story outline..."
79
+ story_outline_result = await Runner.run(
80
+ story_outline_generator,
81
+ input_items,
82
+ )
83
+ input_items = story_outline_result.to_input_list()
84
+ latest_outline = ItemHelpers.text_message_outputs(
85
+ story_outline_result.new_items
86
+ )
87
+ yield f"Story outline generated:\n{latest_outline}"
88
+
89
+ # Evaluate the outline
90
+ yield "\nEvaluating outline..."
91
+ evaluator_result = await Runner.run(evaluator, input_items)
92
+ result: EvaluationFeedback = evaluator_result.final_output
93
+
94
+ yield f"Evaluator score: {result.score}"
95
+
96
+ if result.score == "pass":
97
+ yield "\n✓ Story outline is good enough!"
98
+ break
99
+
100
+ # Provide feedback for next iteration
101
+ yield f"Feedback: {result.feedback}"
102
+ yield "Re-running with feedback..."
103
+ input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
104
+
105
+ if iteration >= max_iterations:
106
+ yield f"\n⚠ Reached maximum iterations ({max_iterations})"
107
+
108
+ yield f"\n=== Final Story Outline ===\n{latest_outline}"
@@ -0,0 +1,177 @@
1
+ """
2
+ Routing/Handoffs Pattern - Pixie Integration
3
+
4
+ This example demonstrates agent routing where a triage agent hands off to specialized
5
+ language agents based on the user's language preference.
6
+
7
+ Pattern: Graph/State-Machine (Routing)
8
+ Original: https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/routing.py
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+
14
+ from openai.types.responses import ResponseContentPartDoneEvent, ResponseTextDeltaEvent
15
+ from agents import Agent, RawResponsesStreamEvent, Runner, TResponseInputItem
16
+ import pixie
17
+
18
+
19
+ # ============================================================================
20
+ # LANGUAGE AGENTS
21
+ # ============================================================================
22
+
23
+ french_agent = Agent(
24
+ name="french_agent",
25
+ instructions="You only speak French",
26
+ )
27
+
28
+ spanish_agent = Agent(
29
+ name="spanish_agent",
30
+ instructions="You only speak Spanish",
31
+ )
32
+
33
+ english_agent = Agent(
34
+ name="english_agent",
35
+ instructions="You only speak English",
36
+ )
37
+
38
+ triage_agent = Agent(
39
+ name="triage_agent",
40
+ instructions="Handoff to the appropriate agent based on the language of the request.",
41
+ handoffs=[french_agent, spanish_agent, english_agent],
42
+ )
43
+
44
+
45
+ @pixie.app
46
+ async def openai_multilingual_routing() -> pixie.PixieGenerator[str, str]:
47
+ """
48
+ Multi-agent language routing system with streaming responses.
49
+
50
+ The triage agent receives messages and hands off to the appropriate language
51
+ specialist agent (French, Spanish, or English) based on the detected language.
52
+
53
+ Responses are streamed in real-time to provide immediate feedback.
54
+
55
+ Yields:
56
+ Streamed agent responses
57
+
58
+ Receives:
59
+ User messages in any supported language via InputRequired
60
+ """
61
+ agent = triage_agent
62
+ inputs: list[TResponseInputItem] = []
63
+
64
+ yield "Hi! We speak French, Spanish and English. How can I help?"
65
+ yield "(Type 'exit' to quit)"
66
+
67
+ while True:
68
+ # Get user message
69
+ user_msg = yield pixie.InputRequired(str)
70
+
71
+ # Check for exit
72
+ if user_msg.lower() in {"exit", "quit", "bye"}:
73
+ yield "Goodbye! Au revoir! ¡Adiós!"
74
+ break
75
+
76
+ inputs.append({"content": user_msg, "role": "user"})
77
+
78
+ # Run agent with streaming
79
+ result = Runner.run_streamed(
80
+ agent,
81
+ input=inputs,
82
+ )
83
+
84
+ # Stream the response
85
+ current_response = []
86
+ async for event in result.stream_events():
87
+ if not isinstance(event, RawResponsesStreamEvent):
88
+ continue
89
+
90
+ data = event.data
91
+
92
+ if isinstance(data, ResponseTextDeltaEvent):
93
+ # Stream text deltas
94
+ current_response.append(data.delta)
95
+ yield data.delta
96
+
97
+ elif isinstance(data, ResponseContentPartDoneEvent):
98
+ # Content part complete
99
+ yield "\n"
100
+
101
+ # Update state for next turn
102
+ inputs = result.to_input_list()
103
+ agent = result.current_agent
104
+
105
+ # Show which agent handled the request
106
+ agent_name = agent.name
107
+ yield f"\n[Handled by: {agent_name}]"
108
+
109
+
110
+ @pixie.app
111
+ async def openai_multilingual_routing_simple(
112
+ initial_message: str,
113
+ ) -> pixie.PixieGenerator[str, str]:
114
+ """
115
+ Simplified multilingual routing with single initial message.
116
+
117
+ This version accepts an initial message and then enters an interactive loop.
118
+ Good for testing with a specific language right away.
119
+
120
+ Args:
121
+ initial_message: The first message to send (in any supported language)
122
+
123
+ Yields:
124
+ Streamed agent responses
125
+
126
+ Receives:
127
+ Follow-up user messages via InputRequired
128
+ """
129
+ agent = triage_agent
130
+ inputs: list[TResponseInputItem] = [{"content": initial_message, "role": "user"}]
131
+
132
+ yield f"Processing your message: {initial_message[:50]}...\n"
133
+
134
+ # Process initial message
135
+ result = Runner.run_streamed(agent, input=inputs)
136
+
137
+ async for event in result.stream_events():
138
+ if not isinstance(event, RawResponsesStreamEvent):
139
+ continue
140
+
141
+ data = event.data
142
+ if isinstance(data, ResponseTextDeltaEvent):
143
+ yield data.delta
144
+ elif isinstance(data, ResponseContentPartDoneEvent):
145
+ yield "\n"
146
+
147
+ inputs = result.to_input_list()
148
+ agent = result.current_agent
149
+ yield f"\n[Agent: {agent.name}]"
150
+
151
+ # Continue conversation
152
+ yield "\nContinue the conversation (type 'exit' to quit):"
153
+
154
+ while True:
155
+ user_msg = yield pixie.InputRequired(str)
156
+
157
+ if user_msg.lower() in {"exit", "quit", "bye"}:
158
+ yield "Session ended. Merci! ¡Gracias! Thank you!"
159
+ break
160
+
161
+ inputs.append({"content": user_msg, "role": "user"})
162
+
163
+ result = Runner.run_streamed(agent, input=inputs)
164
+
165
+ async for event in result.stream_events():
166
+ if not isinstance(event, RawResponsesStreamEvent):
167
+ continue
168
+
169
+ data = event.data
170
+ if isinstance(data, ResponseTextDeltaEvent):
171
+ yield data.delta
172
+ elif isinstance(data, ResponseContentPartDoneEvent):
173
+ yield "\n"
174
+
175
+ inputs = result.to_input_list()
176
+ agent = result.current_agent
177
+ yield f"\n[Agent: {agent.name}]"
@@ -0,0 +1,26 @@
1
+ # API Keys for PydanticAI Examples
2
+ # Copy this file to .env and add your actual API keys
3
+
4
+ # OpenAI API Key (required for GPT models)
5
+ # Get yours at: https://platform.openai.com/api-keys
6
+ OPENAI_API_KEY=your-openai-api-key-here
7
+
8
+ # Google Gemini API Key (optional, for Gemini models)
9
+ # Get yours at: https://makersuite.google.com/app/apikey
10
+ GEMINI_API_KEY=your-gemini-api-key-here
11
+
12
+ # Groq API Key (optional, for Groq models)
13
+ # Get yours at: https://console.groq.com/keys
14
+ GROQ_API_KEY=your-groq-api-key-here
15
+
16
+ # Pydantic Logfire Token (optional, for observability)
17
+ # Get yours at: https://logfire.pydantic.dev/
18
+ LOGFIRE_TOKEN=your-logfire-token-here
19
+
20
+ # Weather API Key (optional, for weather_agent with real data)
21
+ # Get yours at: https://www.tomorrow.io/weather-api/
22
+ WEATHER_API_KEY=your-weather-api-key-here
23
+
24
+ # Geocoding API Key (optional, for weather_agent with real data)
25
+ # Get yours at: https://geocode.maps.co/
26
+ GEO_API_KEY=your-geo-api-key-here