chuk-ai-session-manager 0.1.1__py3-none-any.whl → 0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +336 -34
- chuk_ai_session_manager/api/__init__.py +1 -0
- chuk_ai_session_manager/api/simple_api.py +376 -0
- chuk_ai_session_manager/infinite_conversation.py +7 -4
- chuk_ai_session_manager/models/session.py +27 -18
- chuk_ai_session_manager/session_aware_tool_processor.py +6 -4
- chuk_ai_session_manager/session_prompt_builder.py +6 -4
- chuk_ai_session_manager/session_storage.py +176 -0
- chuk_ai_session_manager/utils/status_display_utils.py +474 -0
- {chuk_ai_session_manager-0.1.1.dist-info → chuk_ai_session_manager-0.2.dist-info}/METADATA +9 -8
- chuk_ai_session_manager-0.2.dist-info/RECORD +23 -0
- chuk_ai_session_manager/storage/__init__.py +0 -44
- chuk_ai_session_manager/storage/base.py +0 -50
- chuk_ai_session_manager/storage/providers/file.py +0 -348
- chuk_ai_session_manager/storage/providers/memory.py +0 -96
- chuk_ai_session_manager/storage/providers/redis.py +0 -295
- chuk_ai_session_manager-0.1.1.dist-info/RECORD +0 -24
- /chuk_ai_session_manager/{storage/providers → utils}/__init__.py +0 -0
- {chuk_ai_session_manager-0.1.1.dist-info → chuk_ai_session_manager-0.2.dist-info}/WHEEL +0 -0
- {chuk_ai_session_manager-0.1.1.dist-info → chuk_ai_session_manager-0.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
# chuk_ai_session_manager/session_storage.py
|
|
2
|
+
"""
|
|
3
|
+
CHUK Sessions storage backend for AI Session Manager.
|
|
4
|
+
|
|
5
|
+
Simple integration that stores AI sessions as JSON blobs in CHUK Sessions.
|
|
6
|
+
CHUK Sessions handles all storage concerns (memory, Redis, TTL, multi-tenancy).
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
from typing import Any, Dict, List, Optional
|
|
13
|
+
|
|
14
|
+
from chuk_sessions import SessionManager as ChukSessionManager
|
|
15
|
+
from chuk_ai_session_manager.models.session import Session
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class SessionStorage:
|
|
21
|
+
"""
|
|
22
|
+
CHUK Sessions backend for AI Session Manager.
|
|
23
|
+
|
|
24
|
+
Stores AI sessions as JSON in CHUK Sessions custom metadata.
|
|
25
|
+
All provider logic is handled by CHUK Sessions.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
sandbox_id: str = "ai-session-manager",
|
|
31
|
+
default_ttl_hours: int = 24
|
|
32
|
+
):
|
|
33
|
+
self.chuk = ChukSessionManager(
|
|
34
|
+
sandbox_id=sandbox_id,
|
|
35
|
+
default_ttl_hours=default_ttl_hours
|
|
36
|
+
)
|
|
37
|
+
self.sandbox_id = sandbox_id
|
|
38
|
+
self._cache: Dict[str, Session] = {}
|
|
39
|
+
|
|
40
|
+
logger.info(f"AI Session Manager using CHUK Sessions (sandbox: {sandbox_id})")
|
|
41
|
+
|
|
42
|
+
async def get(self, session_id: str) -> Optional[Session]:
|
|
43
|
+
"""Get AI session by ID."""
|
|
44
|
+
if session_id in self._cache:
|
|
45
|
+
return self._cache[session_id]
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
if not await self.chuk.validate_session(session_id):
|
|
49
|
+
return None
|
|
50
|
+
|
|
51
|
+
info = await self.chuk.get_session_info(session_id)
|
|
52
|
+
if not info:
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
custom_metadata = info.get('custom_metadata', {})
|
|
56
|
+
ai_session_json = custom_metadata.get('ai_session_data')
|
|
57
|
+
|
|
58
|
+
if not ai_session_json:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
session_data = json.loads(ai_session_json)
|
|
62
|
+
ai_session = Session.model_validate(session_data)
|
|
63
|
+
|
|
64
|
+
self._cache[session_id] = ai_session
|
|
65
|
+
return ai_session
|
|
66
|
+
|
|
67
|
+
except Exception as e:
|
|
68
|
+
logger.error(f"Failed to get AI session {session_id}: {e}")
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
async def save(self, session: Session) -> None:
|
|
72
|
+
"""Save AI session to CHUK Sessions."""
|
|
73
|
+
try:
|
|
74
|
+
# Use Pydantic's model_dump_json which handles datetime serialization properly
|
|
75
|
+
session_json = session.model_dump_json()
|
|
76
|
+
user_id = self._extract_user_id(session)
|
|
77
|
+
|
|
78
|
+
custom_metadata = {
|
|
79
|
+
'ai_session_data': session_json,
|
|
80
|
+
'event_count': len(session.events),
|
|
81
|
+
'session_type': 'ai_session_manager'
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
await self.chuk.allocate_session(
|
|
85
|
+
session_id=session.id,
|
|
86
|
+
user_id=user_id,
|
|
87
|
+
custom_metadata=custom_metadata
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
self._cache[session.id] = session
|
|
91
|
+
|
|
92
|
+
except Exception as e:
|
|
93
|
+
logger.error(f"Failed to save AI session {session.id}: {e}")
|
|
94
|
+
raise
|
|
95
|
+
|
|
96
|
+
async def delete(self, session_id: str) -> None:
|
|
97
|
+
"""Delete AI session."""
|
|
98
|
+
try:
|
|
99
|
+
await self.chuk.delete_session(session_id)
|
|
100
|
+
self._cache.pop(session_id, None)
|
|
101
|
+
except Exception as e:
|
|
102
|
+
logger.error(f"Failed to delete AI session {session_id}: {e}")
|
|
103
|
+
raise
|
|
104
|
+
|
|
105
|
+
async def list_sessions(self, prefix: str = "") -> List[str]:
|
|
106
|
+
"""List AI session IDs."""
|
|
107
|
+
session_ids = list(self._cache.keys())
|
|
108
|
+
if prefix:
|
|
109
|
+
session_ids = [sid for sid in session_ids if sid.startswith(prefix)]
|
|
110
|
+
return session_ids
|
|
111
|
+
|
|
112
|
+
def _extract_user_id(self, session: Session) -> Optional[str]:
|
|
113
|
+
"""Extract user ID from AI session metadata."""
|
|
114
|
+
try:
|
|
115
|
+
if hasattr(session.metadata, 'properties'):
|
|
116
|
+
return session.metadata.properties.get('user_id')
|
|
117
|
+
except:
|
|
118
|
+
pass
|
|
119
|
+
return None
|
|
120
|
+
|
|
121
|
+
async def extend_session_ttl(self, session_id: str, additional_hours: int) -> bool:
|
|
122
|
+
"""Extend session TTL."""
|
|
123
|
+
return await self.chuk.extend_session_ttl(session_id, additional_hours)
|
|
124
|
+
|
|
125
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
126
|
+
"""Get storage statistics."""
|
|
127
|
+
return {
|
|
128
|
+
'backend': 'chuk_sessions',
|
|
129
|
+
'sandbox_id': self.sandbox_id,
|
|
130
|
+
'cached_ai_sessions': len(self._cache),
|
|
131
|
+
'chuk_sessions_stats': self.chuk.get_cache_stats()
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
# Global backend
|
|
136
|
+
_backend: Optional[SessionStorage] = None
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def get_backend() -> SessionStorage:
|
|
140
|
+
"""Get the global CHUK Sessions backend."""
|
|
141
|
+
global _backend
|
|
142
|
+
if _backend is None:
|
|
143
|
+
_backend = SessionStorage()
|
|
144
|
+
return _backend
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def setup_chuk_sessions_storage(
|
|
148
|
+
sandbox_id: str = "ai-session-manager",
|
|
149
|
+
default_ttl_hours: int = 24
|
|
150
|
+
) -> SessionStorage:
|
|
151
|
+
"""Set up CHUK Sessions as the storage backend."""
|
|
152
|
+
global _backend
|
|
153
|
+
_backend = SessionStorage(
|
|
154
|
+
sandbox_id=sandbox_id,
|
|
155
|
+
default_ttl_hours=default_ttl_hours
|
|
156
|
+
)
|
|
157
|
+
return _backend
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
class ChukSessionsStore:
|
|
161
|
+
"""Storage interface adapter for CHUK Sessions."""
|
|
162
|
+
|
|
163
|
+
def __init__(self, backend: Optional[SessionStorage] = None):
|
|
164
|
+
self.backend = backend or get_backend()
|
|
165
|
+
|
|
166
|
+
async def get(self, session_id: str) -> Optional[Session]:
|
|
167
|
+
return await self.backend.get(session_id)
|
|
168
|
+
|
|
169
|
+
async def save(self, session: Session) -> None:
|
|
170
|
+
await self.backend.save(session)
|
|
171
|
+
|
|
172
|
+
async def delete(self, session_id: str) -> None:
|
|
173
|
+
await self.backend.delete(session_id)
|
|
174
|
+
|
|
175
|
+
async def list_sessions(self, prefix: str = "") -> List[str]:
|
|
176
|
+
return await self.backend.list_sessions(prefix)
|
|
@@ -0,0 +1,474 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
examples/retry_prompt_demo_fixed.py
|
|
4
|
+
──────────────────────────────────
|
|
5
|
+
Demonstrates LLM-level retry patterns with chuk_ai_session_manager.
|
|
6
|
+
|
|
7
|
+
This shows:
|
|
8
|
+
• Retrying LLM calls until they produce valid tool calls
|
|
9
|
+
• Using chuk_tool_processor's built-in reliability features
|
|
10
|
+
• Session tracking of the entire retry process
|
|
11
|
+
• Proper separation: LLM retries vs tool execution reliability
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import json
|
|
18
|
+
import logging
|
|
19
|
+
import pprint
|
|
20
|
+
import sys
|
|
21
|
+
import os
|
|
22
|
+
from typing import Dict, List
|
|
23
|
+
|
|
24
|
+
# Add current directory to path
|
|
25
|
+
sys.path.insert(0, os.getcwd())
|
|
26
|
+
|
|
27
|
+
logging.basicConfig(level=logging.INFO, format="%(levelname)s | %(message)s")
|
|
28
|
+
|
|
29
|
+
# Session imports - FIXED import paths
|
|
30
|
+
from chuk_ai_session_manager.session_storage import get_backend, ChukSessionsStore, setup_chuk_sessions_storage
|
|
31
|
+
from chuk_ai_session_manager.models.session import Session
|
|
32
|
+
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
33
|
+
from chuk_ai_session_manager.models.event_source import EventSource
|
|
34
|
+
from chuk_ai_session_manager.models.event_type import EventType
|
|
35
|
+
from chuk_ai_session_manager.session_prompt_builder import build_prompt_from_session
|
|
36
|
+
|
|
37
|
+
# Status display utilities
|
|
38
|
+
def format_status(success: bool, success_msg: str = "SUCCESS", failure_msg: str = "FAILED") -> str:
|
|
39
|
+
"""Format status with correct emoji."""
|
|
40
|
+
if success:
|
|
41
|
+
return f"✅ {success_msg}"
|
|
42
|
+
else:
|
|
43
|
+
return f"❌ {failure_msg}"
|
|
44
|
+
|
|
45
|
+
# Import from chuk_tool_processor (using the working pattern)
|
|
46
|
+
from chuk_tool_processor.registry import initialize, get_default_registry
|
|
47
|
+
from chuk_tool_processor.models.tool_call import ToolCall
|
|
48
|
+
from chuk_tool_processor.execution.strategies.inprocess_strategy import InProcessStrategy
|
|
49
|
+
from chuk_tool_processor.execution.tool_executor import ToolExecutor
|
|
50
|
+
|
|
51
|
+
# Import sample tools - this will trigger registration
|
|
52
|
+
import sample_tools
|
|
53
|
+
|
|
54
|
+
##############################################################################
|
|
55
|
+
# Custom Tool Processor (based on working OpenAI demo pattern)
|
|
56
|
+
##############################################################################
|
|
57
|
+
|
|
58
|
+
class CustomSessionAwareToolProcessor:
|
|
59
|
+
"""Custom tool processor that properly integrates with chuk_tool_processor."""
|
|
60
|
+
|
|
61
|
+
def __init__(self, session_id: str, registry, executor):
|
|
62
|
+
self.session_id = session_id
|
|
63
|
+
self.registry = registry
|
|
64
|
+
self.executor = executor
|
|
65
|
+
|
|
66
|
+
@classmethod
|
|
67
|
+
async def create(cls, session_id: str):
|
|
68
|
+
"""Create a custom session-aware tool processor."""
|
|
69
|
+
# Get the registry
|
|
70
|
+
registry = await get_default_registry()
|
|
71
|
+
|
|
72
|
+
# Create execution strategy and executor
|
|
73
|
+
strategy = InProcessStrategy(registry)
|
|
74
|
+
executor = ToolExecutor(registry=registry, strategy=strategy)
|
|
75
|
+
|
|
76
|
+
return cls(session_id, registry, executor)
|
|
77
|
+
|
|
78
|
+
async def process_llm_message(self, llm_msg: dict) -> list:
|
|
79
|
+
"""Process tool calls from an LLM message."""
|
|
80
|
+
# Get the session
|
|
81
|
+
backend = get_backend()
|
|
82
|
+
store = ChukSessionsStore(backend)
|
|
83
|
+
session = await store.get(self.session_id)
|
|
84
|
+
if not session:
|
|
85
|
+
raise ValueError(f"Session {self.session_id} not found")
|
|
86
|
+
|
|
87
|
+
# Add the LLM message as an event
|
|
88
|
+
llm_event = await SessionEvent.create_with_tokens(
|
|
89
|
+
message=llm_msg,
|
|
90
|
+
prompt="",
|
|
91
|
+
completion=json.dumps(llm_msg, ensure_ascii=False),
|
|
92
|
+
model="gpt-4o-mini",
|
|
93
|
+
source=EventSource.LLM,
|
|
94
|
+
type=EventType.MESSAGE,
|
|
95
|
+
)
|
|
96
|
+
await session.add_event_and_save(llm_event)
|
|
97
|
+
|
|
98
|
+
# Extract tool calls
|
|
99
|
+
tool_calls = llm_msg.get('tool_calls', [])
|
|
100
|
+
if not tool_calls:
|
|
101
|
+
return []
|
|
102
|
+
|
|
103
|
+
# Convert to ToolCall objects
|
|
104
|
+
chuk_tool_calls = []
|
|
105
|
+
for call in tool_calls:
|
|
106
|
+
func = call.get('function', {})
|
|
107
|
+
tool_name = func.get('name', '')
|
|
108
|
+
try:
|
|
109
|
+
arguments = json.loads(func.get('arguments', '{}'))
|
|
110
|
+
except json.JSONDecodeError:
|
|
111
|
+
arguments = {}
|
|
112
|
+
|
|
113
|
+
chuk_tool_calls.append(ToolCall(
|
|
114
|
+
tool=tool_name,
|
|
115
|
+
arguments=arguments
|
|
116
|
+
))
|
|
117
|
+
|
|
118
|
+
# Execute the tools
|
|
119
|
+
print(f"🔧 Executing {len(chuk_tool_calls)} tools...")
|
|
120
|
+
results = await self.executor.execute(chuk_tool_calls)
|
|
121
|
+
|
|
122
|
+
# Log each result as a session event
|
|
123
|
+
for result in results:
|
|
124
|
+
# Convert result to string for session storage
|
|
125
|
+
result_str = str(result.result) if result.result is not None else "null"
|
|
126
|
+
|
|
127
|
+
tool_event = await SessionEvent.create_with_tokens(
|
|
128
|
+
message={
|
|
129
|
+
"tool": result.tool,
|
|
130
|
+
"arguments": getattr(result, "arguments", None),
|
|
131
|
+
"result": result.result,
|
|
132
|
+
"error": result.error,
|
|
133
|
+
},
|
|
134
|
+
prompt=f"{result.tool}({json.dumps(getattr(result, 'arguments', None), default=str)})",
|
|
135
|
+
completion=result_str,
|
|
136
|
+
model="tool-execution",
|
|
137
|
+
source=EventSource.SYSTEM,
|
|
138
|
+
type=EventType.TOOL_CALL,
|
|
139
|
+
)
|
|
140
|
+
await tool_event.set_metadata("parent_event_id", llm_event.id)
|
|
141
|
+
await session.add_event_and_save(tool_event)
|
|
142
|
+
|
|
143
|
+
return results
|
|
144
|
+
|
|
145
|
+
##############################################################################
|
|
146
|
+
# LLM Simulation: Unreliable at first, then cooperative
|
|
147
|
+
##############################################################################
|
|
148
|
+
|
|
149
|
+
class UnreliableLLM:
|
|
150
|
+
"""Simulates an LLM that sometimes doesn't follow tool-calling instructions."""
|
|
151
|
+
|
|
152
|
+
def __init__(self):
|
|
153
|
+
self.call_count = 0
|
|
154
|
+
self.scenarios = [
|
|
155
|
+
# Scenario 1: Refuses to use tools
|
|
156
|
+
{
|
|
157
|
+
"role": "assistant",
|
|
158
|
+
"content": "I don't need to use any tools. The weather in London is probably fine!",
|
|
159
|
+
"tool_calls": []
|
|
160
|
+
},
|
|
161
|
+
# Scenario 2: Tries to use non-existent tool
|
|
162
|
+
{
|
|
163
|
+
"role": "assistant",
|
|
164
|
+
"content": None,
|
|
165
|
+
"tool_calls": [
|
|
166
|
+
{
|
|
167
|
+
"id": "call_1",
|
|
168
|
+
"type": "function",
|
|
169
|
+
"function": {
|
|
170
|
+
"name": "nonexistent_weather_api",
|
|
171
|
+
"arguments": '{"city": "London"}'
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
]
|
|
175
|
+
},
|
|
176
|
+
# Scenario 3: Invalid JSON in arguments
|
|
177
|
+
{
|
|
178
|
+
"role": "assistant",
|
|
179
|
+
"content": None,
|
|
180
|
+
"tool_calls": [
|
|
181
|
+
{
|
|
182
|
+
"id": "call_2",
|
|
183
|
+
"type": "function",
|
|
184
|
+
"function": {
|
|
185
|
+
"name": "weather",
|
|
186
|
+
"arguments": '{"location": London}' # Missing quotes - invalid JSON
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
]
|
|
190
|
+
},
|
|
191
|
+
# Scenario 4: Finally cooperates correctly
|
|
192
|
+
{
|
|
193
|
+
"role": "assistant",
|
|
194
|
+
"content": None,
|
|
195
|
+
"tool_calls": [
|
|
196
|
+
{
|
|
197
|
+
"id": "call_3",
|
|
198
|
+
"type": "function",
|
|
199
|
+
"function": {
|
|
200
|
+
"name": "weather",
|
|
201
|
+
"arguments": '{"location": "London"}'
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
]
|
|
205
|
+
}
|
|
206
|
+
]
|
|
207
|
+
|
|
208
|
+
async def chat_completion(self, messages: List[Dict], **kwargs) -> Dict:
|
|
209
|
+
"""Simulate OpenAI chat completion with unreliable behavior."""
|
|
210
|
+
self.call_count += 1
|
|
211
|
+
|
|
212
|
+
if self.call_count <= len(self.scenarios):
|
|
213
|
+
response = self.scenarios[self.call_count - 1]
|
|
214
|
+
print(f" 📞 LLM Call {self.call_count}: {self._describe_response(response)}")
|
|
215
|
+
return response
|
|
216
|
+
else:
|
|
217
|
+
# After all scenarios, always cooperate
|
|
218
|
+
return self.scenarios[-1]
|
|
219
|
+
|
|
220
|
+
def _describe_response(self, response: Dict) -> str:
|
|
221
|
+
"""Describe what the LLM response contains."""
|
|
222
|
+
if response.get("tool_calls"):
|
|
223
|
+
tool_calls = response["tool_calls"]
|
|
224
|
+
if len(tool_calls) == 1:
|
|
225
|
+
func_name = tool_calls[0].get("function", {}).get("name", "unknown")
|
|
226
|
+
return f"Wants to call '{func_name}'"
|
|
227
|
+
else:
|
|
228
|
+
return f"Wants to call {len(tool_calls)} tools"
|
|
229
|
+
elif response.get("content"):
|
|
230
|
+
return f"Text response: '{response['content'][:50]}...'"
|
|
231
|
+
else:
|
|
232
|
+
return "Empty response"
|
|
233
|
+
|
|
234
|
+
##############################################################################
|
|
235
|
+
# Retry Logic for LLM Cooperation
|
|
236
|
+
##############################################################################
|
|
237
|
+
|
|
238
|
+
class LLMRetryManager:
|
|
239
|
+
"""Manages retrying LLM calls until they produce valid, executable tool calls."""
|
|
240
|
+
|
|
241
|
+
def __init__(self, session_id: str, max_attempts: int = 5):
|
|
242
|
+
self.session_id = session_id
|
|
243
|
+
self.max_attempts = max_attempts
|
|
244
|
+
|
|
245
|
+
async def get_valid_tool_calls(self, llm, messages: List[Dict], processor: CustomSessionAwareToolProcessor) -> tuple[Dict, List]:
|
|
246
|
+
"""
|
|
247
|
+
Keep calling the LLM until it produces valid, executable tool calls.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
Tuple of (successful_llm_response, tool_results)
|
|
251
|
+
"""
|
|
252
|
+
backend = get_backend()
|
|
253
|
+
store = ChukSessionsStore(backend)
|
|
254
|
+
session = await store.get(self.session_id)
|
|
255
|
+
|
|
256
|
+
for attempt in range(1, self.max_attempts + 1):
|
|
257
|
+
print(f"\n🔄 LLM Attempt {attempt}/{self.max_attempts}")
|
|
258
|
+
|
|
259
|
+
# Call LLM
|
|
260
|
+
response = await llm.chat_completion(messages)
|
|
261
|
+
|
|
262
|
+
# Log the LLM response attempt
|
|
263
|
+
attempt_event = SessionEvent(
|
|
264
|
+
message={
|
|
265
|
+
"attempt": attempt,
|
|
266
|
+
"response": response,
|
|
267
|
+
"success": False # Will update if successful
|
|
268
|
+
},
|
|
269
|
+
type=EventType.MESSAGE,
|
|
270
|
+
source=EventSource.LLM,
|
|
271
|
+
)
|
|
272
|
+
await session.add_event_and_save(attempt_event)
|
|
273
|
+
|
|
274
|
+
# Check if response has tool calls
|
|
275
|
+
tool_calls = response.get("tool_calls", [])
|
|
276
|
+
if not tool_calls:
|
|
277
|
+
print(f" {format_status(False, failure_msg='No tool calls in response')}")
|
|
278
|
+
continue
|
|
279
|
+
|
|
280
|
+
# Try to execute the tool calls
|
|
281
|
+
try:
|
|
282
|
+
print(f" 🔧 Attempting to execute {len(tool_calls)} tool calls...")
|
|
283
|
+
|
|
284
|
+
# Check what tools are available vs requested
|
|
285
|
+
registry = await get_default_registry()
|
|
286
|
+
tools_list = await registry.list_tools()
|
|
287
|
+
available_tools = [name for namespace, name in tools_list]
|
|
288
|
+
requested_tool = tool_calls[0].get("function", {}).get("name", "unknown")
|
|
289
|
+
print(f" 🔍 Requested tool: {requested_tool}")
|
|
290
|
+
print(f" 🔍 Available tools: {available_tools}")
|
|
291
|
+
|
|
292
|
+
tool_results = await processor.process_llm_message(response)
|
|
293
|
+
|
|
294
|
+
# Check if all tools executed successfully
|
|
295
|
+
failed_tools = [r for r in tool_results if r.error]
|
|
296
|
+
if failed_tools:
|
|
297
|
+
print(f" {format_status(False, failure_msg=f'{len(failed_tools)} tools failed:')}")
|
|
298
|
+
for failed in failed_tools:
|
|
299
|
+
print(f" • {failed.tool}: {failed.error}")
|
|
300
|
+
continue
|
|
301
|
+
|
|
302
|
+
# Success! All tools executed
|
|
303
|
+
print(f" {format_status(True, success_msg=f'All {len(tool_results)} tools executed successfully')}")
|
|
304
|
+
|
|
305
|
+
# Update the last event to mark success
|
|
306
|
+
session = await store.get(self.session_id)
|
|
307
|
+
if session.events:
|
|
308
|
+
# Find the most recent LLM attempt event
|
|
309
|
+
for event in reversed(session.events):
|
|
310
|
+
if (event.type == EventType.MESSAGE and
|
|
311
|
+
event.source == EventSource.LLM and
|
|
312
|
+
isinstance(event.message, dict) and
|
|
313
|
+
"attempt" in event.message):
|
|
314
|
+
event.message["success"] = True
|
|
315
|
+
await store.save(session)
|
|
316
|
+
break
|
|
317
|
+
|
|
318
|
+
return response, tool_results
|
|
319
|
+
|
|
320
|
+
except Exception as e:
|
|
321
|
+
print(f" {format_status(False, failure_msg=f'Tool execution failed: {e}')}")
|
|
322
|
+
continue
|
|
323
|
+
|
|
324
|
+
# If we get here, all attempts failed
|
|
325
|
+
raise RuntimeError(f"Failed to get valid tool calls after {self.max_attempts} attempts")
|
|
326
|
+
|
|
327
|
+
##############################################################################
|
|
328
|
+
# Demo Flow
|
|
329
|
+
##############################################################################
|
|
330
|
+
|
|
331
|
+
async def main() -> None:
|
|
332
|
+
print("🚀 Starting LLM Retry Demo")
|
|
333
|
+
print(" (Demonstrates retry logic for uncooperative LLMs)")
|
|
334
|
+
print(" (Tool execution uses chuk_tool_processor's built-in reliability)")
|
|
335
|
+
|
|
336
|
+
# Setup session storage - FIXED
|
|
337
|
+
setup_chuk_sessions_storage(sandbox_id="retry-prompt-demo", default_ttl_hours=1)
|
|
338
|
+
backend = get_backend()
|
|
339
|
+
store = ChukSessionsStore(backend)
|
|
340
|
+
|
|
341
|
+
# Initialize tool registry first
|
|
342
|
+
print("\n🔧 Initializing tool registry...")
|
|
343
|
+
registry = await initialize()
|
|
344
|
+
tools_list = await registry.list_tools()
|
|
345
|
+
print(f"📋 Found {len(tools_list)} registered tools:")
|
|
346
|
+
for namespace, tool_name in tools_list:
|
|
347
|
+
print(f" • {namespace}.{tool_name}")
|
|
348
|
+
|
|
349
|
+
# Create session
|
|
350
|
+
session = await Session.create()
|
|
351
|
+
await session.metadata.set_property("demo", "retry_prompt")
|
|
352
|
+
await store.save(session)
|
|
353
|
+
|
|
354
|
+
# Add user request
|
|
355
|
+
user_prompt = "What's the weather like in London? I need to know if I should bring an umbrella."
|
|
356
|
+
user_event = await SessionEvent.create_with_tokens(
|
|
357
|
+
message=user_prompt,
|
|
358
|
+
prompt=user_prompt,
|
|
359
|
+
model="gpt-4o-mini",
|
|
360
|
+
source=EventSource.USER,
|
|
361
|
+
type=EventType.MESSAGE
|
|
362
|
+
)
|
|
363
|
+
await session.add_event_and_save(user_event)
|
|
364
|
+
print(f"\n👤 User: {user_prompt}")
|
|
365
|
+
|
|
366
|
+
# Create components
|
|
367
|
+
llm = UnreliableLLM()
|
|
368
|
+
processor = await CustomSessionAwareToolProcessor.create(session_id=session.id)
|
|
369
|
+
retry_manager = LLMRetryManager(session_id=session.id, max_attempts=6)
|
|
370
|
+
|
|
371
|
+
# Build initial messages for LLM
|
|
372
|
+
messages = [
|
|
373
|
+
{"role": "system", "content": "You are a helpful assistant. When users ask about weather, use the weather tool to get current information."},
|
|
374
|
+
{"role": "user", "content": user_prompt}
|
|
375
|
+
]
|
|
376
|
+
|
|
377
|
+
# Attempt to get valid tool calls with retries
|
|
378
|
+
try:
|
|
379
|
+
print(f"\n🎯 Attempting to get valid tool calls (max {retry_manager.max_attempts} attempts)...")
|
|
380
|
+
final_response, tool_results = await retry_manager.get_valid_tool_calls(llm, messages, processor)
|
|
381
|
+
|
|
382
|
+
print(f"\n{'='*60}")
|
|
383
|
+
print("🎉 SUCCESS! LLM cooperated and tools executed successfully")
|
|
384
|
+
print(f"{'='*60}")
|
|
385
|
+
|
|
386
|
+
# Show tool results
|
|
387
|
+
print("\n🛠️ Tool Results:")
|
|
388
|
+
for i, result in enumerate(tool_results, 1):
|
|
389
|
+
print(f"\n Tool {i}: {result.tool}")
|
|
390
|
+
if result.error:
|
|
391
|
+
print(f" ❌ Error: {result.error}")
|
|
392
|
+
elif isinstance(result.result, dict):
|
|
393
|
+
print(f" 📊 Result:")
|
|
394
|
+
for key, value in result.result.items():
|
|
395
|
+
print(f" {key}: {value}")
|
|
396
|
+
else:
|
|
397
|
+
print(f" 📊 Result: {result.result}")
|
|
398
|
+
|
|
399
|
+
except RuntimeError as e:
|
|
400
|
+
print(f"\n❌ FAILED: {e}")
|
|
401
|
+
|
|
402
|
+
# Still show the session events for debugging
|
|
403
|
+
print("\n🔍 Debugging: Session events created:")
|
|
404
|
+
session = await store.get(session.id)
|
|
405
|
+
for i, event in enumerate(session.events, 1):
|
|
406
|
+
print(f" {i}. {event.type.value}/{event.source.value}: {str(event.message)[:100]}...")
|
|
407
|
+
return
|
|
408
|
+
|
|
409
|
+
# Show session event tree
|
|
410
|
+
session = await store.get(session.id)
|
|
411
|
+
print(f"\n{'='*60}")
|
|
412
|
+
print("📊 Session Event Tree (Complete Retry History):")
|
|
413
|
+
print(f"{'='*60}")
|
|
414
|
+
|
|
415
|
+
for i, event in enumerate(session.events, 1):
|
|
416
|
+
event_id = event.id[:8] + "..."
|
|
417
|
+
if event.type == EventType.MESSAGE and event.source == EventSource.USER:
|
|
418
|
+
print(f"{i}. USER MESSAGE [{event_id}]")
|
|
419
|
+
print(f" Content: {event.message}")
|
|
420
|
+
elif event.type == EventType.MESSAGE and event.source == EventSource.LLM:
|
|
421
|
+
if isinstance(event.message, dict) and "attempt" in event.message:
|
|
422
|
+
attempt = event.message["attempt"]
|
|
423
|
+
success = event.message.get("success", False)
|
|
424
|
+
status = "✅ SUCCESS" if success else "❌ FAILED"
|
|
425
|
+
print(f"{i}. LLM ATTEMPT {attempt} [{event_id}] - {status}")
|
|
426
|
+
else:
|
|
427
|
+
print(f"{i}. LLM MESSAGE [{event_id}]")
|
|
428
|
+
elif event.type == EventType.TOOL_CALL:
|
|
429
|
+
tool_msg = event.message or {}
|
|
430
|
+
tool_name = tool_msg.get("tool", "unknown")
|
|
431
|
+
error = tool_msg.get("error")
|
|
432
|
+
print(f"{i}. TOOL CALL [{event_id}] - {tool_name}")
|
|
433
|
+
if error:
|
|
434
|
+
print(f" ❌ Error: {error}")
|
|
435
|
+
else:
|
|
436
|
+
print(f" ✅ Success")
|
|
437
|
+
|
|
438
|
+
# Show final prompt for next turn
|
|
439
|
+
print(f"\n{'='*60}")
|
|
440
|
+
print("🔄 Final Prompt for Next LLM Turn:")
|
|
441
|
+
print(f"{'='*60}")
|
|
442
|
+
next_prompt = await build_prompt_from_session(session)
|
|
443
|
+
pprint.pp(next_prompt, width=80)
|
|
444
|
+
|
|
445
|
+
# Show session statistics
|
|
446
|
+
print(f"\n{'='*60}")
|
|
447
|
+
print("📈 Session Statistics:")
|
|
448
|
+
print(f"{'='*60}")
|
|
449
|
+
print(f" Session ID: {session.id}")
|
|
450
|
+
print(f" Total events: {len(session.events)}")
|
|
451
|
+
print(f" Total tokens: {session.total_tokens}")
|
|
452
|
+
print(f" Estimated cost: ${session.total_cost:.6f}")
|
|
453
|
+
|
|
454
|
+
# Event breakdown
|
|
455
|
+
event_types = {}
|
|
456
|
+
for event in session.events:
|
|
457
|
+
event_type = f"{event.source.value}:{event.type.value}"
|
|
458
|
+
event_types[event_type] = event_types.get(event_type, 0) + 1
|
|
459
|
+
|
|
460
|
+
print(f" Event breakdown:")
|
|
461
|
+
for event_type, count in event_types.items():
|
|
462
|
+
print(f" {event_type}: {count}")
|
|
463
|
+
|
|
464
|
+
print(f"\n{'='*60}")
|
|
465
|
+
print("🎯 Key Takeaways:")
|
|
466
|
+
print(" • LLM retries handled at application level")
|
|
467
|
+
print(" • Tool execution reliability handled by chuk_tool_processor")
|
|
468
|
+
print(" • Complete audit trail in session events")
|
|
469
|
+
print(" • Separation of concerns: LLM cooperation vs tool reliability")
|
|
470
|
+
print(" • Session tracks all attempts for debugging and analytics")
|
|
471
|
+
print(f"{'='*60}")
|
|
472
|
+
|
|
473
|
+
if __name__ == "__main__":
|
|
474
|
+
asyncio.run(main())
|