mbxai 2.1.1__py3-none-any.whl → 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mbxai/__init__.py +1 -1
- mbxai/agent/client.py +39 -13
- mbxai/examples/dialog_agent_example.py +15 -0
- mbxai/examples/optional_prompt_example.py +164 -0
- mbxai/mcp/server.py +1 -1
- {mbxai-2.1.1.dist-info → mbxai-2.1.2.dist-info}/METADATA +1 -1
- {mbxai-2.1.1.dist-info → mbxai-2.1.2.dist-info}/RECORD +9 -8
- {mbxai-2.1.1.dist-info → mbxai-2.1.2.dist-info}/WHEEL +0 -0
- {mbxai-2.1.1.dist-info → mbxai-2.1.2.dist-info}/licenses/LICENSE +0 -0
mbxai/__init__.py
CHANGED
mbxai/agent/client.py
CHANGED
@@ -184,8 +184,8 @@ class AgentClient:
|
|
184
184
|
|
185
185
|
def agent(
|
186
186
|
self,
|
187
|
-
prompt: str,
|
188
|
-
final_response_structure: Type[BaseModel],
|
187
|
+
prompt: str = None,
|
188
|
+
final_response_structure: Type[BaseModel] = None,
|
189
189
|
ask_questions: bool = True,
|
190
190
|
agent_id: str = None,
|
191
191
|
answers: AnswerList = None
|
@@ -194,29 +194,53 @@ class AgentClient:
|
|
194
194
|
Process a prompt through the agent's thinking process.
|
195
195
|
|
196
196
|
Args:
|
197
|
-
prompt: The
|
198
|
-
final_response_structure: Pydantic model defining the expected final response format
|
197
|
+
prompt: The prompt from the user (optional if agent_id exists with history)
|
198
|
+
final_response_structure: Pydantic model defining the expected final response format (required for new sessions)
|
199
199
|
ask_questions: Whether to ask clarifying questions (default: True)
|
200
200
|
agent_id: Optional agent session ID to continue an existing conversation
|
201
201
|
answers: Optional answers to questions (when continuing a conversation with questions)
|
202
202
|
|
203
203
|
Returns:
|
204
204
|
AgentResponse containing either questions to ask or the final response
|
205
|
+
|
206
|
+
Raises:
|
207
|
+
ValueError: If neither prompt nor agent_id with history is provided, or if final_response_structure is missing for new sessions
|
205
208
|
"""
|
206
|
-
#
|
207
|
-
|
208
|
-
|
209
|
+
# Validate inputs and determine session type
|
210
|
+
is_existing_session = agent_id is not None and agent_id in self._agent_sessions
|
211
|
+
existing_session = self._agent_sessions.get(agent_id, {}) if agent_id else {}
|
212
|
+
conversation_history = existing_session.get("conversation_history", []).copy()
|
213
|
+
|
214
|
+
# Validation logic
|
215
|
+
if not is_existing_session:
|
216
|
+
# New session - both prompt and final_response_structure are required
|
217
|
+
if not prompt:
|
218
|
+
raise ValueError("Prompt is required when starting a new agent session")
|
219
|
+
if not final_response_structure:
|
220
|
+
raise ValueError("final_response_structure is required when starting a new agent session")
|
221
|
+
|
222
|
+
# Create new agent_id if not provided
|
223
|
+
if agent_id is None:
|
224
|
+
agent_id = str(__import__("uuid").uuid4())
|
209
225
|
logger.info(f"🚀 Starting new agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
210
226
|
else:
|
211
|
-
|
227
|
+
# Existing session - use previous final_response_structure if not provided
|
228
|
+
if not final_response_structure:
|
229
|
+
final_response_structure = existing_session.get("final_response_structure")
|
230
|
+
if not final_response_structure:
|
231
|
+
raise ValueError("final_response_structure not found in existing session and not provided")
|
232
|
+
|
233
|
+
# Handle optional prompt for existing sessions
|
234
|
+
if not prompt:
|
235
|
+
# Use conversation history to continue without explicit prompt
|
236
|
+
prompt = "[Continue conversation based on history]"
|
237
|
+
logger.info(f"🔄 Continuing agent process (ID: {agent_id}) without explicit prompt (using history)")
|
238
|
+
else:
|
239
|
+
logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
212
240
|
|
213
241
|
# Initialize token summary
|
214
242
|
token_summary = TokenSummary()
|
215
243
|
|
216
|
-
# Check if this is a continuing conversation
|
217
|
-
existing_session = self._agent_sessions.get(agent_id, {})
|
218
|
-
conversation_history = existing_session.get("conversation_history", []).copy()
|
219
|
-
|
220
244
|
if conversation_history:
|
221
245
|
logger.info(f"📜 Agent {agent_id}: Loaded conversation history with {len(conversation_history)} messages")
|
222
246
|
|
@@ -438,6 +462,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
438
462
|
self._agent_sessions[agent_id]["conversation_history"] = updated_history
|
439
463
|
self._agent_sessions[agent_id]["step"] = "completed"
|
440
464
|
self._agent_sessions[agent_id]["token_summary"] = token_summary
|
465
|
+
self._agent_sessions[agent_id]["final_response_structure"] = final_response_structure
|
441
466
|
logger.info(f"💾 Agent {agent_id}: Updated session with conversation history ({len(updated_history)} messages)")
|
442
467
|
else:
|
443
468
|
# Create new session if it doesn't exist
|
@@ -447,7 +472,8 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
447
472
|
self._agent_sessions[agent_id] = {
|
448
473
|
"step": "completed",
|
449
474
|
"conversation_history": updated_history,
|
450
|
-
"token_summary": token_summary
|
475
|
+
"token_summary": token_summary,
|
476
|
+
"final_response_structure": final_response_structure
|
451
477
|
}
|
452
478
|
logger.info(f"💾 Agent {agent_id}: Created new session with conversation history ({len(updated_history)} messages)")
|
453
479
|
|
@@ -135,6 +135,21 @@ def demonstrate_dialog_with_questions():
|
|
135
135
|
print(f"Genre: {book_rec2.genre}")
|
136
136
|
print(f"Reason: {book_rec2.reason}")
|
137
137
|
print(f"Connection to previous: {book_rec2.connection_to_previous}")
|
138
|
+
|
139
|
+
# Continue WITHOUT explicit prompt - using only conversation history
|
140
|
+
print(f"\n3️⃣ Continuing conversation WITHOUT explicit prompt (history-based):")
|
141
|
+
try:
|
142
|
+
response3 = agent.agent(agent_id=agent_id, ask_questions=False) # No prompt provided
|
143
|
+
|
144
|
+
if response3.is_complete():
|
145
|
+
book_rec3 = response3.final_response
|
146
|
+
print(f"History-based recommendation:")
|
147
|
+
print(f"Book: {book_rec3.title} by {book_rec3.author}")
|
148
|
+
print(f"Genre: {book_rec3.genre}")
|
149
|
+
print(f"Reason: {book_rec3.reason}")
|
150
|
+
print(f"Connection to previous: {book_rec3.connection_to_previous}")
|
151
|
+
except Exception as e:
|
152
|
+
print(f"Error with history-based continuation: {e}")
|
138
153
|
|
139
154
|
# Session cleanup
|
140
155
|
print(f"\n🗑️ Session cleanup...")
|
@@ -0,0 +1,164 @@
|
|
1
|
+
"""
|
2
|
+
Example demonstrating optional prompt functionality with existing agent sessions.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class ConversationResponse(BaseModel):
|
11
|
+
"""A general conversation response."""
|
12
|
+
response: str = Field(description="The response to the conversation")
|
13
|
+
context_used: str = Field(description="How previous conversation history was used")
|
14
|
+
continuation_type: str = Field(description="Type of continuation (new topic, follow-up, etc.)")
|
15
|
+
|
16
|
+
|
17
|
+
class StoryResponse(BaseModel):
|
18
|
+
"""A story response."""
|
19
|
+
story_continuation: str = Field(description="The next part of the story")
|
20
|
+
character_development: str = Field(description="How characters developed in this part")
|
21
|
+
plot_advancement: str = Field(description="How the plot advanced")
|
22
|
+
|
23
|
+
|
24
|
+
def demonstrate_optional_prompt():
|
25
|
+
"""Demonstrate using agent with optional prompts for existing sessions."""
|
26
|
+
print("🎭 DEMO: Optional Prompt with Existing Agent Sessions")
|
27
|
+
print("=" * 60)
|
28
|
+
|
29
|
+
# Initialize the clients
|
30
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
31
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
32
|
+
|
33
|
+
# First interaction - establish a story context
|
34
|
+
print("\n1️⃣ First interaction - establishing story context:")
|
35
|
+
response1 = agent.agent(
|
36
|
+
prompt="Let's create a story about a detective solving a mystery in a small town",
|
37
|
+
final_response_structure=StoryResponse,
|
38
|
+
ask_questions=False
|
39
|
+
)
|
40
|
+
|
41
|
+
agent_id = response1.agent_id
|
42
|
+
print(f"Agent ID: {agent_id}")
|
43
|
+
|
44
|
+
if response1.is_complete():
|
45
|
+
story1 = response1.final_response
|
46
|
+
print(f"Story start: {story1.story_continuation[:150]}...")
|
47
|
+
print(f"Characters: {story1.character_development}")
|
48
|
+
|
49
|
+
# Second interaction - continue with explicit prompt
|
50
|
+
print(f"\n2️⃣ Second interaction - continue with explicit prompt:")
|
51
|
+
response2 = agent.agent(
|
52
|
+
prompt="The detective finds a mysterious letter. What does it say?",
|
53
|
+
agent_id=agent_id,
|
54
|
+
ask_questions=False
|
55
|
+
)
|
56
|
+
|
57
|
+
if response2.is_complete():
|
58
|
+
story2 = response2.final_response
|
59
|
+
print(f"Story continuation: {story2.story_continuation[:150]}...")
|
60
|
+
print(f"Plot advancement: {story2.plot_advancement}")
|
61
|
+
|
62
|
+
# Third interaction - continue WITHOUT explicit prompt (using only history)
|
63
|
+
print(f"\n3️⃣ Third interaction - continue WITHOUT explicit prompt (history-based):")
|
64
|
+
try:
|
65
|
+
response3 = agent.agent(
|
66
|
+
agent_id=agent_id, # Only provide agent_id, no prompt
|
67
|
+
ask_questions=False
|
68
|
+
)
|
69
|
+
|
70
|
+
if response3.is_complete():
|
71
|
+
story3 = response3.final_response
|
72
|
+
print(f"History-based continuation: {story3.story_continuation[:150]}...")
|
73
|
+
print(f"Character development: {story3.character_development}")
|
74
|
+
print(f"Plot advancement: {story3.plot_advancement}")
|
75
|
+
|
76
|
+
except Exception as e:
|
77
|
+
print(f"Error with history-based continuation: {e}")
|
78
|
+
|
79
|
+
# Fourth interaction - switch response format but use same session
|
80
|
+
print(f"\n4️⃣ Fourth interaction - switch to conversation format:")
|
81
|
+
response4 = agent.agent(
|
82
|
+
prompt="What do you think about this story so far?",
|
83
|
+
final_response_structure=ConversationResponse,
|
84
|
+
agent_id=agent_id,
|
85
|
+
ask_questions=False
|
86
|
+
)
|
87
|
+
|
88
|
+
if response4.is_complete():
|
89
|
+
conv4 = response4.final_response
|
90
|
+
print(f"Analysis: {conv4.response[:150]}...")
|
91
|
+
print(f"Context used: {conv4.context_used}")
|
92
|
+
print(f"Continuation type: {conv4.continuation_type}")
|
93
|
+
|
94
|
+
# Fifth interaction - continue conversation without prompt
|
95
|
+
print(f"\n5️⃣ Fifth interaction - continue conversation analysis without prompt:")
|
96
|
+
try:
|
97
|
+
response5 = agent.agent(
|
98
|
+
agent_id=agent_id,
|
99
|
+
ask_questions=False
|
100
|
+
)
|
101
|
+
|
102
|
+
if response5.is_complete():
|
103
|
+
conv5 = response5.final_response
|
104
|
+
print(f"Continued analysis: {conv5.response[:150]}...")
|
105
|
+
print(f"Context used: {conv5.context_used}")
|
106
|
+
print(f"Continuation type: {conv5.continuation_type}")
|
107
|
+
|
108
|
+
except Exception as e:
|
109
|
+
print(f"Error with continued conversation: {e}")
|
110
|
+
|
111
|
+
# Show final session state
|
112
|
+
session_info = agent.get_session_info(agent_id)
|
113
|
+
print(f"\n📊 Final session state:")
|
114
|
+
print(f" - Total messages: {session_info['conversation_length']}")
|
115
|
+
print(f" - Session step: {session_info.get('step', 'unknown')}")
|
116
|
+
print(f" - Has final_response_structure: {'final_response_structure' in session_info}")
|
117
|
+
|
118
|
+
# Cleanup
|
119
|
+
agent.delete_session(agent_id)
|
120
|
+
|
121
|
+
|
122
|
+
def demonstrate_error_cases():
|
123
|
+
"""Demonstrate error cases with optional prompt."""
|
124
|
+
print("\n🚨 DEMO: Error Cases with Optional Prompt")
|
125
|
+
print("=" * 50)
|
126
|
+
|
127
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
128
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
129
|
+
|
130
|
+
# Error case 1: No prompt and no existing session
|
131
|
+
print("\n❌ Error case 1: No prompt, no existing session")
|
132
|
+
try:
|
133
|
+
response = agent.agent(ask_questions=False)
|
134
|
+
print("This should have failed!")
|
135
|
+
except ValueError as e:
|
136
|
+
print(f"Expected error: {e}")
|
137
|
+
|
138
|
+
# Error case 2: No prompt and no final_response_structure for new session
|
139
|
+
print("\n❌ Error case 2: No final_response_structure for new session")
|
140
|
+
try:
|
141
|
+
response = agent.agent(prompt="Test", ask_questions=False)
|
142
|
+
print("This should have failed!")
|
143
|
+
except ValueError as e:
|
144
|
+
print(f"Expected error: {e}")
|
145
|
+
|
146
|
+
# Error case 3: Unknown agent_id without prompt
|
147
|
+
print("\n❌ Error case 3: Unknown agent_id without prompt")
|
148
|
+
try:
|
149
|
+
response = agent.agent(agent_id="unknown-id", ask_questions=False)
|
150
|
+
print("This should have failed!")
|
151
|
+
except ValueError as e:
|
152
|
+
print(f"Expected error: {e}")
|
153
|
+
|
154
|
+
print("\n✅ All error cases handled correctly!")
|
155
|
+
|
156
|
+
|
157
|
+
if __name__ == "__main__":
|
158
|
+
try:
|
159
|
+
demonstrate_optional_prompt()
|
160
|
+
demonstrate_error_cases()
|
161
|
+
except Exception as e:
|
162
|
+
print(f"Unexpected error: {e}")
|
163
|
+
import traceback
|
164
|
+
traceback.print_exc()
|
mbxai/mcp/server.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
|
-
mbxai/__init__.py,sha256=
|
1
|
+
mbxai/__init__.py,sha256=k465X578GCB1IDy8P5Uwfh-ae2VpoZ2y9B2GN7F7QrE,407
|
2
2
|
mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
|
3
3
|
mbxai/agent/__init__.py,sha256=5j3mW2NZtAU1s2w8n833axWBQsxW8U0qKwoQ9JtQZ4k,289
|
4
|
-
mbxai/agent/client.py,sha256=
|
4
|
+
mbxai/agent/client.py,sha256=S3wWA47bEJkQF69TOA1Ba1t66jVE_CY2f76JbVKlAW0,36455
|
5
5
|
mbxai/agent/models.py,sha256=sjBtaAENDABHl8IqTON1gxFFSZIaQYUCBFHB5804_Fw,5780
|
6
6
|
mbxai/examples/agent_example.py,sha256=7gQHcMVWBu2xdxnVNzz4UfW0lkUnw9a5DN2-YoIRxXE,7420
|
7
7
|
mbxai/examples/agent_iterations_example.py,sha256=xMqZhBWS67EkRkArjOAY2fCgLkQ32Qn9E4CSfEKW4MU,7905
|
@@ -10,8 +10,9 @@ mbxai/examples/agent_tool_registration_example.py,sha256=oWm0-d4mdba-VQ3HobiCIR0
|
|
10
10
|
mbxai/examples/agent_validation_example.py,sha256=xlEf5Mwq5_Iu8bNU4cuHGZVYvAyZNhO2GMFmOom-CLo,4185
|
11
11
|
mbxai/examples/auto_schema_example.py,sha256=ymuJJqqDxYznZT2VN6zVFEM7m_lDuccZ1AKSx-xzLTM,8174
|
12
12
|
mbxai/examples/conversation_history_test.py,sha256=TpOh5ruQlXDPTPEu_0qTACAaQPSklKp8RYiOm1UzqPI,7773
|
13
|
-
mbxai/examples/dialog_agent_example.py,sha256=
|
13
|
+
mbxai/examples/dialog_agent_example.py,sha256=Za4m_JPusn3f60xYE0DTfqGwyz0rXoiCHLP-AFkiQYQ,7884
|
14
14
|
mbxai/examples/openrouter_example.py,sha256=-grXHKMmFLoh-yUIEMc31n8Gg1S7uSazBWCIOWxgbyQ,1317
|
15
|
+
mbxai/examples/optional_prompt_example.py,sha256=dG9aRKZL_xIZX2OgbnYAJX-4_QGi3op31nn2fRthYKo,6386
|
15
16
|
mbxai/examples/parse_example.py,sha256=eCKMJoOl6qwo8sDP6Trc6ncgjPlgTqi5tPE2kB5_P0k,3821
|
16
17
|
mbxai/examples/parse_tool_example.py,sha256=duHN8scI9ZK6XZ5hdiz1Adzyc-_7tH9Ls9qP4S0bf5s,5477
|
17
18
|
mbxai/examples/request.json,sha256=fjVMses305wVUXgcmjESCvPgP81Js8Kk6zHjZ8EDyEg,5434
|
@@ -25,7 +26,7 @@ mbxai/examples/mcp/mcp_server_example.py,sha256=nFfg22Jnc6HMW_ezLO3So1xwDdx2_rIt
|
|
25
26
|
mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
|
26
27
|
mbxai/mcp/client.py,sha256=QRzId6o4_WRWVv3rtm8cfZZGaoY_UlaOO-oqNjY-tmw,5219
|
27
28
|
mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
|
28
|
-
mbxai/mcp/server.py,sha256=
|
29
|
+
mbxai/mcp/server.py,sha256=b3NCIt1AZdsuDm22LY0qXNmmirqVnvrSrA0v921BASc,3332
|
29
30
|
mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
|
30
31
|
mbxai/openrouter/client.py,sha256=3LD6WDJ8wjo_nefH5d1NJCsrWPvBc_KBf2NsItUoSt8,18302
|
31
32
|
mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
|
@@ -35,7 +36,7 @@ mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
|
|
35
36
|
mbxai/tools/client.py,sha256=2wFPD-UN3Y2DSyrnqxt2vvFgTYHzUl14_y0r6fhAWmM,17198
|
36
37
|
mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
|
37
38
|
mbxai/tools/types.py,sha256=OFfM7scDGTm4FOcJA2ecj-fxL1MEBkqPsT3hqCL1Jto,9505
|
38
|
-
mbxai-2.1.
|
39
|
-
mbxai-2.1.
|
40
|
-
mbxai-2.1.
|
41
|
-
mbxai-2.1.
|
39
|
+
mbxai-2.1.2.dist-info/METADATA,sha256=gC6y6zxmJ4MrP0ohbqVhZpTCGDvhqI-E_PGcZvgki3s,10018
|
40
|
+
mbxai-2.1.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
41
|
+
mbxai-2.1.2.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
|
42
|
+
mbxai-2.1.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|