pixie-examples 0.1.1.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
examples/__init__.py ADDED
File without changes
@@ -0,0 +1,39 @@
1
+ # LangChain Examples
2
+
3
+ This directory contains LangChain examples integrated with Pixie SDK.
4
+
5
+ ## Examples
6
+
7
+ 1. **basic_agent.py** - A simple quickstart agent that can answer questions and call tools
8
+ 2. **personal_assistant.py** - Multi-agent personal assistant with subagents for calendar and email
9
+ 3. **customer_support.py** - Customer support agent with state machine pattern (handoffs)
10
+ 4. **sql_agent.py** - SQL database query agent (multi-turn & multi-step)
11
+ 5. **langgraph_sql_agent.py** - Custom SQL agent built with LangGraph primitives
12
+ 6. **langgraph_rag.py** - Custom RAG (Retrieval Augmented Generation) agent with LangGraph
13
+
14
+ ## Setup
15
+
16
+ Install the required dependencies:
17
+
18
+ ```bash
19
+ poetry add langchain langchain-openai langchain-community langgraph langchain-text-splitters beautifulsoup4
20
+ ```
21
+
22
+ Set up environment variables in `.env`:
23
+
24
+ ```bash
25
+ OPENAI_API_KEY=your_openai_api_key
26
+ ANTHROPIC_API_KEY=your_anthropic_api_key
27
+ LANGSMITH_API_KEY=your_langsmith_api_key # Optional, for tracing
28
+ LANGSMITH_TRACING=true # Optional
29
+ ```
30
+
31
+ ## Running Examples
32
+
33
+ Start the Pixie server:
34
+
35
+ ```bash
36
+ poetry run pixie
37
+ ```
38
+
39
+ Then use GraphiQL at `http://127.0.0.1:8000/graphql` to run the agents.
@@ -0,0 +1 @@
1
+ # LangChain examples for Pixie SDK
@@ -0,0 +1,100 @@
1
+ """
2
+ Basic LangChain Agent Example (Quickstart)
3
+
4
+ This example demonstrates a simple agent that can answer questions and call tools.
5
+ Based on: https://docs.langchain.com/oss/python/langchain/quickstart
6
+ """
7
+
8
+ from langchain.agents import create_agent
9
+ from langchain.chat_models import init_chat_model
10
+
11
+ from langfuse.langchain import CallbackHandler
12
+ import pixie
13
+
14
+
15
+ langfuse_handler = CallbackHandler()
16
+
17
+
18
+ def get_weather(city: str) -> str:
19
+ """Get weather for a given city."""
20
+ return f"It's always sunny in {city}!"
21
+
22
+
23
+ @pixie.app
24
+ async def langchain_basic_weather_agent(query: str) -> str:
25
+ """A simple weather agent that can answer questions using tools.
26
+
27
+ Args:
28
+ query: User's question about weather
29
+
30
+ Returns:
31
+ AI-generated response
32
+ """
33
+ # Initialize the model
34
+ model = init_chat_model("gpt-4o-mini", temperature=0)
35
+
36
+ # Create agent with the weather tool
37
+ agent = create_agent(
38
+ model=model,
39
+ tools=[get_weather],
40
+ system_prompt="You are a helpful weather assistant",
41
+ )
42
+
43
+ # Run the agent
44
+ result = agent.invoke(
45
+ {"messages": [{"role": "user", "content": query}]},
46
+ config={"callbacks": [langfuse_handler]},
47
+ )
48
+
49
+ # Return the final response
50
+ return result["messages"][-1].content
51
+
52
+
53
+ @pixie.app
54
+ async def langchain_interactive_weather_agent() -> pixie.PixieGenerator[str, str]:
55
+ """An interactive weather chatbot that maintains conversation.
56
+
57
+ This agent can have multi-turn conversations with the user.
58
+
59
+ Yields:
60
+ AI responses to user questions
61
+ """
62
+ # Initialize the model
63
+ model = init_chat_model("gpt-4o-mini", temperature=0)
64
+
65
+ # Create agent with the weather tool
66
+ agent = create_agent(
67
+ model=model,
68
+ tools=[get_weather],
69
+ system_prompt="You are a helpful weather assistant that answers questions about weather.",
70
+ )
71
+
72
+ # Send welcome message
73
+ yield "Hello! I'm a weather assistant. Ask me about the weather in any city!"
74
+
75
+ # Initialize conversation history
76
+ messages = []
77
+
78
+ while True:
79
+ # Get user input
80
+ user_query = yield pixie.InputRequired(str)
81
+
82
+ # Check for exit commands
83
+ if user_query.lower() in {"exit", "quit", "bye", "goodbye"}:
84
+ yield "Goodbye! Have a great day!"
85
+ break
86
+
87
+ # Add user message to history
88
+ messages.append({"role": "user", "content": user_query})
89
+
90
+ # Run agent with full conversation history
91
+ result = agent.invoke(
92
+ {"messages": messages}, config={"callbacks": [langfuse_handler]}
93
+ )
94
+
95
+ # Update history with AI response
96
+ messages = result["messages"]
97
+
98
+ # Yield the AI's response
99
+ ai_response = result["messages"][-1].content
100
+ yield ai_response
@@ -0,0 +1,238 @@
1
+ """
2
+ Customer Support with Handoffs (State Machine)
3
+
4
+ This example demonstrates the state machine pattern where an agent's behavior changes
5
+ as it moves through different states of a workflow.
6
+
7
+ Based on: https://docs.langchain.com/oss/python/langchain/multi-agent/handoffs-customer-support
8
+ """
9
+
10
+ from typing import Literal, NotRequired
11
+ from langchain.agents import create_agent, AgentState
12
+ from langchain.chat_models import init_chat_model
13
+ from langchain.tools import tool, ToolRuntime
14
+ from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse
15
+ from langgraph.checkpoint.memory import InMemorySaver
16
+ from langgraph.types import Command
17
+ from typing import Callable
18
+
19
+ from langfuse.langchain import CallbackHandler
20
+ import pixie
21
+
22
+
23
+ langfuse_handler = CallbackHandler()
24
+
25
+
26
+ # Define the possible workflow steps
27
+ SupportStep = Literal["warranty_collector", "issue_classifier", "resolution_specialist"]
28
+
29
+
30
+ class SupportState(AgentState):
31
+ """State for customer support workflow."""
32
+
33
+ current_step: NotRequired[SupportStep]
34
+ warranty_status: NotRequired[Literal["in_warranty", "out_of_warranty"]]
35
+ issue_type: NotRequired[Literal["hardware", "software"]]
36
+
37
+
38
+ # Define tools that manage workflow state
39
+ @tool
40
+ def record_warranty_status(
41
+ status: Literal["in_warranty", "out_of_warranty"],
42
+ runtime: ToolRuntime[None, SupportState],
43
+ ) -> Command:
44
+ """Record the customer's warranty status and transition to issue classification."""
45
+ return Command(
46
+ update={
47
+ "messages": [
48
+ {
49
+ "role": "tool",
50
+ "content": f"Warranty status recorded as: {status}",
51
+ "tool_call_id": runtime.tool_call_id,
52
+ }
53
+ ],
54
+ "warranty_status": status,
55
+ "current_step": "issue_classifier",
56
+ }
57
+ )
58
+
59
+
60
+ @tool
61
+ def record_issue_type(
62
+ issue_type: Literal["hardware", "software"],
63
+ runtime: ToolRuntime[None, SupportState],
64
+ ) -> Command:
65
+ """Record the type of issue and transition to resolution specialist."""
66
+ return Command(
67
+ update={
68
+ "messages": [
69
+ {
70
+ "role": "tool",
71
+ "content": f"Issue type recorded as: {issue_type}",
72
+ "tool_call_id": runtime.tool_call_id,
73
+ }
74
+ ],
75
+ "issue_type": issue_type,
76
+ "current_step": "resolution_specialist",
77
+ }
78
+ )
79
+
80
+
81
+ @tool
82
+ def escalate_to_human(reason: str) -> str:
83
+ """Escalate the case to a human support specialist."""
84
+ return f"Escalating to human support. Reason: {reason}"
85
+
86
+
87
+ @tool
88
+ def provide_solution(solution: str) -> str:
89
+ """Provide a solution to the customer's issue."""
90
+ return f"Solution provided: {solution}"
91
+
92
+
93
+ # Step prompts
94
+ WARRANTY_COLLECTOR_PROMPT = """You are a customer support agent collecting warranty information.
95
+
96
+ CURRENT STAGE: Warranty Verification
97
+
98
+ Ask the customer if their device is under warranty. Once you have this information,
99
+ use the record_warranty_status tool to record it and move to the next step.
100
+
101
+ Be polite and professional."""
102
+
103
+ ISSUE_CLASSIFIER_PROMPT = """You are a customer support agent classifying technical issues.
104
+
105
+ CURRENT STAGE: Issue Classification
106
+ CUSTOMER INFO: Warranty status is {warranty_status}
107
+
108
+ Ask the customer to describe their issue, then determine if it's:
109
+ - HARDWARE: Physical problems (cracked screen, battery, ports, buttons)
110
+ - SOFTWARE: App crashes, performance, settings, updates
111
+
112
+ Use record_issue_type to record the classification and move to resolution."""
113
+
114
+ RESOLUTION_SPECIALIST_PROMPT = """You are a customer support agent helping with device issues.
115
+
116
+ CURRENT STAGE: Resolution
117
+ CUSTOMER INFO: Warranty status is {warranty_status}, issue type is {issue_type}
118
+
119
+ At this step, you need to:
120
+ 1. For SOFTWARE issues: provide troubleshooting steps using provide_solution
121
+ 2. For HARDWARE issues:
122
+ - If IN WARRANTY: explain warranty repair process using provide_solution
123
+ - If OUT OF WARRANTY: escalate_to_human for paid repair options
124
+
125
+ Be specific and helpful in your solutions."""
126
+
127
+ # Step configuration
128
+ STEP_CONFIG = {
129
+ "warranty_collector": {
130
+ "prompt": WARRANTY_COLLECTOR_PROMPT,
131
+ "tools": [record_warranty_status],
132
+ "requires": [],
133
+ },
134
+ "issue_classifier": {
135
+ "prompt": ISSUE_CLASSIFIER_PROMPT,
136
+ "tools": [record_issue_type],
137
+ "requires": ["warranty_status"],
138
+ },
139
+ "resolution_specialist": {
140
+ "prompt": RESOLUTION_SPECIALIST_PROMPT,
141
+ "tools": [provide_solution, escalate_to_human],
142
+ "requires": ["warranty_status", "issue_type"],
143
+ },
144
+ }
145
+
146
+
147
+ # Create step-based middleware
148
+ @wrap_model_call
149
+ def apply_step_config(
150
+ request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
151
+ ) -> ModelResponse:
152
+ """Configure agent behavior based on the current step."""
153
+ # Get current step (defaults to warranty_collector for first interaction)
154
+ current_step = request.state.get("current_step", "warranty_collector")
155
+
156
+ # Look up step configuration
157
+ stage_config = STEP_CONFIG[current_step]
158
+
159
+ # Validate required state exists
160
+ for key in stage_config["requires"]:
161
+ if request.state.get(key) is None:
162
+ raise ValueError(f"{key} must be set before reaching {current_step}")
163
+
164
+ # Format prompt with state values
165
+ # Note: In a production implementation, you would inject the formatted prompt
166
+ # and tools into the request. For simplicity, we'll let the handler process
167
+ # the request and handle tool selection based on state.
168
+ _ = stage_config["prompt"].format(**request.state)
169
+
170
+ # The middleware pattern here would need deeper integration with LangChain's
171
+ # internal APIs. For now, we pass through to the handler.
172
+ return handler(request)
173
+
174
+
175
+ @pixie.app
176
+ async def langchain_customer_support() -> pixie.PixieGenerator[str, str]:
177
+ """Customer support agent with state machine workflow.
178
+
179
+ The agent progresses through three stages:
180
+ 1. Warranty verification
181
+ 2. Issue classification (hardware/software)
182
+ 3. Resolution (solution or escalation)
183
+
184
+ Yields:
185
+ AI responses guiding the support workflow
186
+ """
187
+ # Initialize model
188
+ model = init_chat_model("gpt-4o-mini", temperature=0)
189
+
190
+ # Collect all tools
191
+ all_tools = [
192
+ record_warranty_status,
193
+ record_issue_type,
194
+ provide_solution,
195
+ escalate_to_human,
196
+ ]
197
+
198
+ # Create agent with step-based configuration
199
+ agent = create_agent(
200
+ model,
201
+ tools=all_tools,
202
+ state_schema=SupportState,
203
+ middleware=[apply_step_config],
204
+ checkpointer=InMemorySaver(),
205
+ )
206
+
207
+ # Send welcome message
208
+ yield "Welcome to customer support! I'm here to help with your device issue."
209
+
210
+ # Initialize conversation
211
+ thread_id = "support_thread"
212
+ config = {"configurable": {"thread_id": thread_id}, "callbacks": [langfuse_handler]}
213
+
214
+ while True:
215
+ # Get user input
216
+ user_message = yield pixie.InputRequired(str)
217
+
218
+ # Check for exit
219
+ if user_message.lower() in {"exit", "quit", "bye"}:
220
+ yield "Thank you for contacting support. Have a great day!"
221
+ break
222
+
223
+ # Process with agent
224
+ result = agent.invoke(
225
+ {"messages": [{"role": "user", "content": user_message}]}, config # type: ignore
226
+ )
227
+
228
+ # Yield the agent's response
229
+ yield result["messages"][-1].content
230
+
231
+ # Check if we've reached a resolution
232
+ current_state = result
233
+ if current_state.get("current_step") == "resolution_specialist" and any(
234
+ msg.get("role") == "tool"
235
+ and msg.get("name") in ["provide_solution", "escalate_to_human"]
236
+ for msg in result.get("messages", [])
237
+ ):
238
+ yield "Is there anything else I can help you with? (Type 'exit' to end)"
@@ -0,0 +1,163 @@
1
+ """
2
+ Personal Assistant with Subagents (Multi-Agent)
3
+
4
+ This example demonstrates the supervisor pattern where a central supervisor agent
5
+ coordinates specialized worker agents (calendar and email agents).
6
+
7
+ Based on: https://docs.langchain.com/oss/python/langchain/multi-agent/subagents-personal-assistant
8
+ """
9
+
10
+ from langchain.agents import create_agent
11
+ from langchain.chat_models import init_chat_model
12
+ from langchain.tools import tool
13
+ from langgraph.checkpoint.memory import InMemorySaver
14
+
15
+ from langfuse.langchain import CallbackHandler
16
+ import pixie
17
+
18
+
19
+ langfuse_handler = CallbackHandler()
20
+
21
+
22
+ # Define calendar tools (stubs for demonstration)
23
+ @tool
24
+ def create_calendar_event(
25
+ title: str,
26
+ start_time: str, # ISO format: "2024-01-15T14:00:00"
27
+ end_time: str, # ISO format: "2024-01-15T15:00:00"
28
+ attendees: list[str], # email addresses
29
+ location: str = "",
30
+ ) -> str:
31
+ """Create a calendar event. Requires exact ISO datetime format."""
32
+ return f"Event created: {title} from {start_time} to {end_time} with {len(attendees)} attendees"
33
+
34
+
35
+ @tool
36
+ def get_available_time_slots(
37
+ attendees: list[str], date: str, duration_minutes: int # ISO format: "2024-01-15"
38
+ ) -> list[str]:
39
+ """Check calendar availability for given attendees on a specific date."""
40
+ return ["09:00", "14:00", "16:00"]
41
+
42
+
43
+ @tool
44
+ def send_email(to: list[str], subject: str, body: str, cc: list[str] = []) -> str:
45
+ """Send an email via email API. Requires properly formatted addresses."""
46
+ return f"Email sent to {', '.join(to)} - Subject: {subject}"
47
+
48
+
49
+ # System prompts for specialized agents
50
+ CALENDAR_AGENT_PROMPT = (
51
+ "You are a calendar scheduling assistant. "
52
+ "Parse natural language scheduling requests (e.g., 'next Tuesday at 2pm') "
53
+ "into proper ISO datetime formats. "
54
+ "Use get_available_time_slots to check availability when needed. "
55
+ "Use create_calendar_event to schedule events. "
56
+ "Always confirm what was scheduled in your final response."
57
+ )
58
+
59
+ EMAIL_AGENT_PROMPT = (
60
+ "You are an email assistant. "
61
+ "Compose professional emails based on natural language requests. "
62
+ "Extract recipient information and craft appropriate subject lines and body text. "
63
+ "Use send_email to send the message. "
64
+ "Always confirm what was sent in your final response."
65
+ )
66
+
67
+ SUPERVISOR_PROMPT = (
68
+ "You are a helpful personal assistant. "
69
+ "You can schedule calendar events and send emails. "
70
+ "Break down user requests into appropriate tool calls and coordinate the results. "
71
+ "When a request involves multiple actions, use multiple tools in sequence."
72
+ )
73
+
74
+
75
+ @pixie.app
76
+ async def langchain_personal_assistant() -> pixie.PixieGenerator[str, str]:
77
+ """Multi-agent personal assistant with calendar and email subagents.
78
+
79
+ The supervisor coordinates specialized worker agents:
80
+ - Calendar agent: handles scheduling and availability
81
+ - Email agent: manages communication and drafts
82
+
83
+ Yields:
84
+ AI responses to user requests
85
+ """
86
+ # Initialize model
87
+ model = init_chat_model("gpt-4o-mini", temperature=0)
88
+
89
+ # Create calendar subagent
90
+ calendar_agent = create_agent(
91
+ model,
92
+ tools=[create_calendar_event, get_available_time_slots],
93
+ system_prompt=CALENDAR_AGENT_PROMPT,
94
+ )
95
+
96
+ # Create email subagent
97
+ email_agent = create_agent(
98
+ model,
99
+ tools=[send_email],
100
+ system_prompt=EMAIL_AGENT_PROMPT,
101
+ )
102
+
103
+ # Wrap subagents as tools for the supervisor
104
+ @tool
105
+ def schedule_event(request: str) -> str:
106
+ """Schedule calendar events using natural language.
107
+
108
+ Use this when the user wants to create, modify, or check calendar appointments.
109
+ Handles date/time parsing, availability checking, and event creation.
110
+ """
111
+ result = calendar_agent.invoke(
112
+ {"messages": [{"role": "user", "content": request}]},
113
+ config={"callbacks": [langfuse_handler]},
114
+ )
115
+ return result["messages"][-1].content
116
+
117
+ @tool
118
+ def manage_email(request: str) -> str:
119
+ """Send emails using natural language.
120
+
121
+ Use this when the user wants to send notifications, reminders, or any email
122
+ communication. Handles recipient extraction, subject generation, and email composition.
123
+ """
124
+ result = email_agent.invoke(
125
+ {"messages": [{"role": "user", "content": request}]},
126
+ config={"callbacks": [langfuse_handler]},
127
+ )
128
+ return result["messages"][-1].content
129
+
130
+ # Create supervisor agent with checkpointer for conversation memory
131
+ supervisor_agent = create_agent(
132
+ model,
133
+ tools=[schedule_event, manage_email],
134
+ system_prompt=SUPERVISOR_PROMPT,
135
+ checkpointer=InMemorySaver(),
136
+ )
137
+
138
+ # Send welcome message
139
+ yield (
140
+ "Hello! I'm your personal assistant. I can help you schedule events "
141
+ "and send emails. What would you like me to do?"
142
+ )
143
+
144
+ # Initialize conversation
145
+ thread_id = "personal_assistant_thread"
146
+ config = {"configurable": {"thread_id": thread_id}, "callbacks": [langfuse_handler]}
147
+
148
+ while True:
149
+ # Get user request
150
+ user_request = yield pixie.InputRequired(str)
151
+
152
+ # Check for exit
153
+ if user_request.lower() in {"exit", "quit", "bye", "goodbye"}:
154
+ yield "Goodbye! Let me know if you need anything else."
155
+ break
156
+
157
+ # Process request with supervisor
158
+ result = supervisor_agent.invoke(
159
+ {"messages": [{"role": "user", "content": user_request}]}, config # type: ignore
160
+ )
161
+
162
+ # Yield the supervisor's response
163
+ yield result["messages"][-1].content