pixie-examples 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. pixie_examples-0.0.1/PKG-INFO +112 -0
  2. pixie_examples-0.0.1/README.md +78 -0
  3. pixie_examples-0.0.1/examples/__init__.py +0 -0
  4. pixie_examples-0.0.1/examples/langchain/README.md +39 -0
  5. pixie_examples-0.0.1/examples/langchain/__init__.py +1 -0
  6. pixie_examples-0.0.1/examples/langchain/basic_agent.py +100 -0
  7. pixie_examples-0.0.1/examples/langchain/customer_support.py +238 -0
  8. pixie_examples-0.0.1/examples/langchain/personal_assistant.py +163 -0
  9. pixie_examples-0.0.1/examples/langchain/sql_agent.py +176 -0
  10. pixie_examples-0.0.1/examples/langgraph/__init__.py +0 -0
  11. pixie_examples-0.0.1/examples/langgraph/langgraph_rag.py +241 -0
  12. pixie_examples-0.0.1/examples/langgraph/langgraph_sql_agent.py +218 -0
  13. pixie_examples-0.0.1/examples/openai_agents_sdk/README.md +299 -0
  14. pixie_examples-0.0.1/examples/openai_agents_sdk/__init__.py +0 -0
  15. pixie_examples-0.0.1/examples/openai_agents_sdk/customer_service.py +258 -0
  16. pixie_examples-0.0.1/examples/openai_agents_sdk/financial_research_agent.py +328 -0
  17. pixie_examples-0.0.1/examples/openai_agents_sdk/llm_as_a_judge.py +108 -0
  18. pixie_examples-0.0.1/examples/openai_agents_sdk/routing.py +177 -0
  19. pixie_examples-0.0.1/examples/pydantic_ai/.env.example +26 -0
  20. pixie_examples-0.0.1/examples/pydantic_ai/README.md +246 -0
  21. pixie_examples-0.0.1/examples/pydantic_ai/__init__.py +0 -0
  22. pixie_examples-0.0.1/examples/pydantic_ai/bank_support.py +154 -0
  23. pixie_examples-0.0.1/examples/pydantic_ai/flight_booking.py +250 -0
  24. pixie_examples-0.0.1/examples/pydantic_ai/question_graph.py +152 -0
  25. pixie_examples-0.0.1/examples/pydantic_ai/sql_gen.py +182 -0
  26. pixie_examples-0.0.1/examples/pydantic_ai/structured_output.py +64 -0
  27. pixie_examples-0.0.1/examples/quickstart/__init__.py +0 -0
  28. pixie_examples-0.0.1/examples/quickstart/chatbot.py +25 -0
  29. pixie_examples-0.0.1/examples/quickstart/sleepy_poet.py +96 -0
  30. pixie_examples-0.0.1/examples/quickstart/weather_agent.py +110 -0
  31. pixie_examples-0.0.1/examples/sql_utils.py +241 -0
  32. pixie_examples-0.0.1/pyproject.toml +36 -0
@@ -0,0 +1,112 @@
1
+ Metadata-Version: 2.4
2
+ Name: pixie-examples
3
+ Version: 0.0.1
4
+ Summary: examples for using Pixie
5
+ License: MIT
6
+ Author: Yiou Li
7
+ Author-email: yol@gopixie.ai
8
+ Requires-Python: >=3.11,<3.14
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.11
12
+ Classifier: Programming Language :: Python :: 3.12
13
+ Classifier: Programming Language :: Python :: 3.13
14
+ Requires-Dist: beautifulsoup4 (>=4.14.3,<5.0.0)
15
+ Requires-Dist: langchain (>=1.2.3,<2.0.0)
16
+ Requires-Dist: langchain-openai (>=1.1.7,<2.0.0)
17
+ Requires-Dist: langchain-text-splitters (>=1.1.0,<2.0.0)
18
+ Requires-Dist: langgraph (>=1.0.5,<2.0.0)
19
+ Requires-Dist: lxml (>=6.0.2,<7.0.0)
20
+ Requires-Dist: numpy (>=2.4.1,<3.0.0)
21
+ Requires-Dist: openai-agents (>=0.6.5,<0.7.0)
22
+ Requires-Dist: openinference-instrumentation-crewai (>=0.1.17,<0.2.0)
23
+ Requires-Dist: openinference-instrumentation-dspy (>=0.1.33,<0.2.0)
24
+ Requires-Dist: openinference-instrumentation-google-adk (>=0.1.8,<0.2.0)
25
+ Requires-Dist: openinference-instrumentation-openai-agents (>=1.4.0,<2.0.0)
26
+ Requires-Dist: pixie-sdk (>=0.0.1b0,<0.0.2)
27
+ Requires-Dist: pydantic (>=2.7.4,<3.0.0)
28
+ Requires-Dist: pydantic-ai-slim (>=1.39.0,<2.0.0)
29
+ Requires-Dist: pymarkdownlnt (>=0.9.34,<0.10.0)
30
+ Requires-Dist: requests (>=2.32.5,<3.0.0)
31
+ Requires-Dist: sqlalchemy (>=2.0.45,<3.0.0)
32
+ Description-Content-Type: text/markdown
33
+
34
+ # Pixie Examples
35
+
36
+ [![MIT License](https://img.shields.io/badge/License-MIT-red.svg?style=flat-square)](https://opensource.org/licenses/MIT)
37
+ [![Python Version](https://img.shields.io/badge/python-3.11%2B-blue?style=flat-square)](https://www.python.org/downloads/)
38
+ [![Discord](https://img.shields.io/discord/1459772566528069715?style=flat-square&logo=Discord&logoColor=white&label=Discord&color=%23434EE4)](https://discord.gg/YMNYu6Z3)
39
+
40
+ This repository contains a collection of example applications integrated with [**Pixie SDK**](https://github.com/yiouli/pixie-sdk-py) for interactive debugging.
41
+
42
+ ## Get Started
43
+
44
+ > You can play with the demo site [here](https://gopixie.ai/?url=https://demo.yiouli.us/graphql) withou any setup.
45
+
46
+ ### 1. Setup
47
+
48
+ Clone this repository:
49
+
50
+ Install `pixie-examples` python package:
51
+
52
+ ```bash
53
+ pip install pixie-examples
54
+ ```
55
+
56
+ Create a `.env` file with your API keys:
57
+
58
+ ```ini
59
+ # .env
60
+ OPENAI_API_KEY=...
61
+ # Add other API keys as needed for specific examples
62
+ ```
63
+
64
+ Start the Pixie server:
65
+
66
+ ```bash
67
+ pixie
68
+ ```
69
+
70
+ ### 2. Debug with Web UI
71
+
72
+ Visit [gopixie.ai](https://gopixie.ai) to interact with and debug your applications through the web interface.
73
+
74
+ ## Important Links
75
+
76
+ - [**Pixie SDK**](https://github.com/yiouli/pixie-sdk-py)
77
+ - [**Documentation**](https://yiouli.github.io/pixie-sdk-py/)
78
+ - [**Discord**](https://discord.gg/YMNYu6Z3)
79
+
80
+ ## Examples Catelog
81
+
82
+ ### Quickstart
83
+
84
+ - **Basic Example**: Simple hello world application to get started with Pixie SDK
85
+
86
+ ### Pydantic AI Examples
87
+
88
+ - **Bank Support**: Multi-turn chatbot for banking customer support
89
+ - **Flight Booking**: Multi-agent system for flight booking
90
+ - **Question Graph**: Graph-based question answering system
91
+ - **SQL Generation**: Multi-step workflow for generating SQL queries
92
+ - **Structured Output**: Examples of structured data handling
93
+
94
+ ### OpenAI Agents SDK Examples
95
+
96
+ - **Customer Service**: Multi-agent customer service system
97
+ - **Financial Research Agent**: Multi-step financial research workflow
98
+ - **LLM-as-a-Judge**: Evaluation and judging patterns
99
+ - **Routing**: Agent routing and handoffs
100
+
101
+ ### LangChain Examples
102
+
103
+ - **Basic Agent**: Simple LangChain agent integration
104
+ - **Customer Support**: Customer support chatbot
105
+ - **Personal Assistant**: Multi-agent personal assistant
106
+ - **SQL Agent**: SQL query generation with LangChain
107
+
108
+ ### LangGraph Examples
109
+
110
+ - **RAG System**: Retrieval-augmented generation with LangGraph
111
+ - **SQL Agent**: SQL agent built with LangGraph state machines
112
+
@@ -0,0 +1,78 @@
1
+ # Pixie Examples
2
+
3
+ [![MIT License](https://img.shields.io/badge/License-MIT-red.svg?style=flat-square)](https://opensource.org/licenses/MIT)
4
+ [![Python Version](https://img.shields.io/badge/python-3.11%2B-blue?style=flat-square)](https://www.python.org/downloads/)
5
+ [![Discord](https://img.shields.io/discord/1459772566528069715?style=flat-square&logo=Discord&logoColor=white&label=Discord&color=%23434EE4)](https://discord.gg/YMNYu6Z3)
6
+
7
+ This repository contains a collection of example applications integrated with [**Pixie SDK**](https://github.com/yiouli/pixie-sdk-py) for interactive debugging.
8
+
9
+ ## Get Started
10
+
11
+ > You can play with the demo site [here](https://gopixie.ai/?url=https://demo.yiouli.us/graphql) withou any setup.
12
+
13
+ ### 1. Setup
14
+
15
+ Clone this repository:
16
+
17
+ Install `pixie-examples` python package:
18
+
19
+ ```bash
20
+ pip install pixie-examples
21
+ ```
22
+
23
+ Create a `.env` file with your API keys:
24
+
25
+ ```ini
26
+ # .env
27
+ OPENAI_API_KEY=...
28
+ # Add other API keys as needed for specific examples
29
+ ```
30
+
31
+ Start the Pixie server:
32
+
33
+ ```bash
34
+ pixie
35
+ ```
36
+
37
+ ### 2. Debug with Web UI
38
+
39
+ Visit [gopixie.ai](https://gopixie.ai) to interact with and debug your applications through the web interface.
40
+
41
+ ## Important Links
42
+
43
+ - [**Pixie SDK**](https://github.com/yiouli/pixie-sdk-py)
44
+ - [**Documentation**](https://yiouli.github.io/pixie-sdk-py/)
45
+ - [**Discord**](https://discord.gg/YMNYu6Z3)
46
+
47
+ ## Examples Catelog
48
+
49
+ ### Quickstart
50
+
51
+ - **Basic Example**: Simple hello world application to get started with Pixie SDK
52
+
53
+ ### Pydantic AI Examples
54
+
55
+ - **Bank Support**: Multi-turn chatbot for banking customer support
56
+ - **Flight Booking**: Multi-agent system for flight booking
57
+ - **Question Graph**: Graph-based question answering system
58
+ - **SQL Generation**: Multi-step workflow for generating SQL queries
59
+ - **Structured Output**: Examples of structured data handling
60
+
61
+ ### OpenAI Agents SDK Examples
62
+
63
+ - **Customer Service**: Multi-agent customer service system
64
+ - **Financial Research Agent**: Multi-step financial research workflow
65
+ - **LLM-as-a-Judge**: Evaluation and judging patterns
66
+ - **Routing**: Agent routing and handoffs
67
+
68
+ ### LangChain Examples
69
+
70
+ - **Basic Agent**: Simple LangChain agent integration
71
+ - **Customer Support**: Customer support chatbot
72
+ - **Personal Assistant**: Multi-agent personal assistant
73
+ - **SQL Agent**: SQL query generation with LangChain
74
+
75
+ ### LangGraph Examples
76
+
77
+ - **RAG System**: Retrieval-augmented generation with LangGraph
78
+ - **SQL Agent**: SQL agent built with LangGraph state machines
File without changes
@@ -0,0 +1,39 @@
1
+ # LangChain Examples
2
+
3
+ This directory contains LangChain examples integrated with Pixie SDK.
4
+
5
+ ## Examples
6
+
7
+ 1. **basic_agent.py** - A simple quickstart agent that can answer questions and call tools
8
+ 2. **personal_assistant.py** - Multi-agent personal assistant with subagents for calendar and email
9
+ 3. **customer_support.py** - Customer support agent with state machine pattern (handoffs)
10
+ 4. **sql_agent.py** - SQL database query agent (multi-turn & multi-step)
11
+ 5. **langgraph_sql_agent.py** - Custom SQL agent built with LangGraph primitives
12
+ 6. **langgraph_rag.py** - Custom RAG (Retrieval Augmented Generation) agent with LangGraph
13
+
14
+ ## Setup
15
+
16
+ Install the required dependencies:
17
+
18
+ ```bash
19
+ poetry add langchain langchain-openai langchain-community langgraph langchain-text-splitters beautifulsoup4
20
+ ```
21
+
22
+ Set up environment variables in `.env`:
23
+
24
+ ```bash
25
+ OPENAI_API_KEY=your_openai_api_key
26
+ ANTHROPIC_API_KEY=your_anthropic_api_key
27
+ LANGSMITH_API_KEY=your_langsmith_api_key # Optional, for tracing
28
+ LANGSMITH_TRACING=true # Optional
29
+ ```
30
+
31
+ ## Running Examples
32
+
33
+ Start the Pixie server:
34
+
35
+ ```bash
36
+ poetry run pixie
37
+ ```
38
+
39
+ Then use GraphiQL at `http://127.0.0.1:8000/graphql` to run the agents.
@@ -0,0 +1 @@
1
+ # LangChain examples for Pixie SDK
@@ -0,0 +1,100 @@
1
+ """
2
+ Basic LangChain Agent Example (Quickstart)
3
+
4
+ This example demonstrates a simple agent that can answer questions and call tools.
5
+ Based on: https://docs.langchain.com/oss/python/langchain/quickstart
6
+ """
7
+
8
+ from langchain.agents import create_agent
9
+ from langchain.chat_models import init_chat_model
10
+
11
+ from langfuse.langchain import CallbackHandler
12
+ import pixie
13
+
14
+
15
+ langfuse_handler = CallbackHandler()
16
+
17
+
18
+ def get_weather(city: str) -> str:
19
+ """Get weather for a given city."""
20
+ return f"It's always sunny in {city}!"
21
+
22
+
23
+ @pixie.app
24
+ async def langchain_basic_weather_agent(query: str) -> str:
25
+ """A simple weather agent that can answer questions using tools.
26
+
27
+ Args:
28
+ query: User's question about weather
29
+
30
+ Returns:
31
+ AI-generated response
32
+ """
33
+ # Initialize the model
34
+ model = init_chat_model("gpt-4o-mini", temperature=0)
35
+
36
+ # Create agent with the weather tool
37
+ agent = create_agent(
38
+ model=model,
39
+ tools=[get_weather],
40
+ system_prompt="You are a helpful weather assistant",
41
+ )
42
+
43
+ # Run the agent
44
+ result = agent.invoke(
45
+ {"messages": [{"role": "user", "content": query}]},
46
+ config={"callbacks": [langfuse_handler]},
47
+ )
48
+
49
+ # Return the final response
50
+ return result["messages"][-1].content
51
+
52
+
53
+ @pixie.app
54
+ async def langchain_interactive_weather_agent() -> pixie.PixieGenerator[str, str]:
55
+ """An interactive weather chatbot that maintains conversation.
56
+
57
+ This agent can have multi-turn conversations with the user.
58
+
59
+ Yields:
60
+ AI responses to user questions
61
+ """
62
+ # Initialize the model
63
+ model = init_chat_model("gpt-4o-mini", temperature=0)
64
+
65
+ # Create agent with the weather tool
66
+ agent = create_agent(
67
+ model=model,
68
+ tools=[get_weather],
69
+ system_prompt="You are a helpful weather assistant that answers questions about weather.",
70
+ )
71
+
72
+ # Send welcome message
73
+ yield "Hello! I'm a weather assistant. Ask me about the weather in any city!"
74
+
75
+ # Initialize conversation history
76
+ messages = []
77
+
78
+ while True:
79
+ # Get user input
80
+ user_query = yield pixie.InputRequired(str)
81
+
82
+ # Check for exit commands
83
+ if user_query.lower() in {"exit", "quit", "bye", "goodbye"}:
84
+ yield "Goodbye! Have a great day!"
85
+ break
86
+
87
+ # Add user message to history
88
+ messages.append({"role": "user", "content": user_query})
89
+
90
+ # Run agent with full conversation history
91
+ result = agent.invoke(
92
+ {"messages": messages}, config={"callbacks": [langfuse_handler]}
93
+ )
94
+
95
+ # Update history with AI response
96
+ messages = result["messages"]
97
+
98
+ # Yield the AI's response
99
+ ai_response = result["messages"][-1].content
100
+ yield ai_response
@@ -0,0 +1,238 @@
1
+ """
2
+ Customer Support with Handoffs (State Machine)
3
+
4
+ This example demonstrates the state machine pattern where an agent's behavior changes
5
+ as it moves through different states of a workflow.
6
+
7
+ Based on: https://docs.langchain.com/oss/python/langchain/multi-agent/handoffs-customer-support
8
+ """
9
+
10
+ from typing import Literal, NotRequired
11
+ from langchain.agents import create_agent, AgentState
12
+ from langchain.chat_models import init_chat_model
13
+ from langchain.tools import tool, ToolRuntime
14
+ from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse
15
+ from langgraph.checkpoint.memory import InMemorySaver
16
+ from langgraph.types import Command
17
+ from typing import Callable
18
+
19
+ from langfuse.langchain import CallbackHandler
20
+ import pixie
21
+
22
+
23
+ langfuse_handler = CallbackHandler()
24
+
25
+
26
+ # Define the possible workflow steps
27
+ SupportStep = Literal["warranty_collector", "issue_classifier", "resolution_specialist"]
28
+
29
+
30
+ class SupportState(AgentState):
31
+ """State for customer support workflow."""
32
+
33
+ current_step: NotRequired[SupportStep]
34
+ warranty_status: NotRequired[Literal["in_warranty", "out_of_warranty"]]
35
+ issue_type: NotRequired[Literal["hardware", "software"]]
36
+
37
+
38
+ # Define tools that manage workflow state
39
+ @tool
40
+ def record_warranty_status(
41
+ status: Literal["in_warranty", "out_of_warranty"],
42
+ runtime: ToolRuntime[None, SupportState],
43
+ ) -> Command:
44
+ """Record the customer's warranty status and transition to issue classification."""
45
+ return Command(
46
+ update={
47
+ "messages": [
48
+ {
49
+ "role": "tool",
50
+ "content": f"Warranty status recorded as: {status}",
51
+ "tool_call_id": runtime.tool_call_id,
52
+ }
53
+ ],
54
+ "warranty_status": status,
55
+ "current_step": "issue_classifier",
56
+ }
57
+ )
58
+
59
+
60
+ @tool
61
+ def record_issue_type(
62
+ issue_type: Literal["hardware", "software"],
63
+ runtime: ToolRuntime[None, SupportState],
64
+ ) -> Command:
65
+ """Record the type of issue and transition to resolution specialist."""
66
+ return Command(
67
+ update={
68
+ "messages": [
69
+ {
70
+ "role": "tool",
71
+ "content": f"Issue type recorded as: {issue_type}",
72
+ "tool_call_id": runtime.tool_call_id,
73
+ }
74
+ ],
75
+ "issue_type": issue_type,
76
+ "current_step": "resolution_specialist",
77
+ }
78
+ )
79
+
80
+
81
+ @tool
82
+ def escalate_to_human(reason: str) -> str:
83
+ """Escalate the case to a human support specialist."""
84
+ return f"Escalating to human support. Reason: {reason}"
85
+
86
+
87
+ @tool
88
+ def provide_solution(solution: str) -> str:
89
+ """Provide a solution to the customer's issue."""
90
+ return f"Solution provided: {solution}"
91
+
92
+
93
+ # Step prompts
94
+ WARRANTY_COLLECTOR_PROMPT = """You are a customer support agent collecting warranty information.
95
+
96
+ CURRENT STAGE: Warranty Verification
97
+
98
+ Ask the customer if their device is under warranty. Once you have this information,
99
+ use the record_warranty_status tool to record it and move to the next step.
100
+
101
+ Be polite and professional."""
102
+
103
+ ISSUE_CLASSIFIER_PROMPT = """You are a customer support agent classifying technical issues.
104
+
105
+ CURRENT STAGE: Issue Classification
106
+ CUSTOMER INFO: Warranty status is {warranty_status}
107
+
108
+ Ask the customer to describe their issue, then determine if it's:
109
+ - HARDWARE: Physical problems (cracked screen, battery, ports, buttons)
110
+ - SOFTWARE: App crashes, performance, settings, updates
111
+
112
+ Use record_issue_type to record the classification and move to resolution."""
113
+
114
+ RESOLUTION_SPECIALIST_PROMPT = """You are a customer support agent helping with device issues.
115
+
116
+ CURRENT STAGE: Resolution
117
+ CUSTOMER INFO: Warranty status is {warranty_status}, issue type is {issue_type}
118
+
119
+ At this step, you need to:
120
+ 1. For SOFTWARE issues: provide troubleshooting steps using provide_solution
121
+ 2. For HARDWARE issues:
122
+ - If IN WARRANTY: explain warranty repair process using provide_solution
123
+ - If OUT OF WARRANTY: escalate_to_human for paid repair options
124
+
125
+ Be specific and helpful in your solutions."""
126
+
127
+ # Step configuration
128
+ STEP_CONFIG = {
129
+ "warranty_collector": {
130
+ "prompt": WARRANTY_COLLECTOR_PROMPT,
131
+ "tools": [record_warranty_status],
132
+ "requires": [],
133
+ },
134
+ "issue_classifier": {
135
+ "prompt": ISSUE_CLASSIFIER_PROMPT,
136
+ "tools": [record_issue_type],
137
+ "requires": ["warranty_status"],
138
+ },
139
+ "resolution_specialist": {
140
+ "prompt": RESOLUTION_SPECIALIST_PROMPT,
141
+ "tools": [provide_solution, escalate_to_human],
142
+ "requires": ["warranty_status", "issue_type"],
143
+ },
144
+ }
145
+
146
+
147
+ # Create step-based middleware
148
+ @wrap_model_call
149
+ def apply_step_config(
150
+ request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
151
+ ) -> ModelResponse:
152
+ """Configure agent behavior based on the current step."""
153
+ # Get current step (defaults to warranty_collector for first interaction)
154
+ current_step = request.state.get("current_step", "warranty_collector")
155
+
156
+ # Look up step configuration
157
+ stage_config = STEP_CONFIG[current_step]
158
+
159
+ # Validate required state exists
160
+ for key in stage_config["requires"]:
161
+ if request.state.get(key) is None:
162
+ raise ValueError(f"{key} must be set before reaching {current_step}")
163
+
164
+ # Format prompt with state values
165
+ # Note: In a production implementation, you would inject the formatted prompt
166
+ # and tools into the request. For simplicity, we'll let the handler process
167
+ # the request and handle tool selection based on state.
168
+ _ = stage_config["prompt"].format(**request.state)
169
+
170
+ # The middleware pattern here would need deeper integration with LangChain's
171
+ # internal APIs. For now, we pass through to the handler.
172
+ return handler(request)
173
+
174
+
175
+ @pixie.app
176
+ async def langchain_customer_support() -> pixie.PixieGenerator[str, str]:
177
+ """Customer support agent with state machine workflow.
178
+
179
+ The agent progresses through three stages:
180
+ 1. Warranty verification
181
+ 2. Issue classification (hardware/software)
182
+ 3. Resolution (solution or escalation)
183
+
184
+ Yields:
185
+ AI responses guiding the support workflow
186
+ """
187
+ # Initialize model
188
+ model = init_chat_model("gpt-4o-mini", temperature=0)
189
+
190
+ # Collect all tools
191
+ all_tools = [
192
+ record_warranty_status,
193
+ record_issue_type,
194
+ provide_solution,
195
+ escalate_to_human,
196
+ ]
197
+
198
+ # Create agent with step-based configuration
199
+ agent = create_agent(
200
+ model,
201
+ tools=all_tools,
202
+ state_schema=SupportState,
203
+ middleware=[apply_step_config],
204
+ checkpointer=InMemorySaver(),
205
+ )
206
+
207
+ # Send welcome message
208
+ yield "Welcome to customer support! I'm here to help with your device issue."
209
+
210
+ # Initialize conversation
211
+ thread_id = "support_thread"
212
+ config = {"configurable": {"thread_id": thread_id}, "callbacks": [langfuse_handler]}
213
+
214
+ while True:
215
+ # Get user input
216
+ user_message = yield pixie.InputRequired(str)
217
+
218
+ # Check for exit
219
+ if user_message.lower() in {"exit", "quit", "bye"}:
220
+ yield "Thank you for contacting support. Have a great day!"
221
+ break
222
+
223
+ # Process with agent
224
+ result = agent.invoke(
225
+ {"messages": [{"role": "user", "content": user_message}]}, config # type: ignore
226
+ )
227
+
228
+ # Yield the agent's response
229
+ yield result["messages"][-1].content
230
+
231
+ # Check if we've reached a resolution
232
+ current_state = result
233
+ if current_state.get("current_step") == "resolution_specialist" and any(
234
+ msg.get("role") == "tool"
235
+ and msg.get("name") in ["provide_solution", "escalate_to_human"]
236
+ for msg in result.get("messages", [])
237
+ ):
238
+ yield "Is there anything else I can help you with? (Type 'exit' to end)"