chuk-ai-session-manager 0.3__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,474 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- examples/retry_prompt_demo_fixed.py
4
- ──────────────────────────────────
5
- Demonstrates LLM-level retry patterns with chuk_ai_session_manager.
6
-
7
- This shows:
8
- • Retrying LLM calls until they produce valid tool calls
9
- • Using chuk_tool_processor's built-in reliability features
10
- • Session tracking of the entire retry process
11
- • Proper separation: LLM retries vs tool execution reliability
12
- """
13
-
14
- from __future__ import annotations
15
-
16
- import asyncio
17
- import json
18
- import logging
19
- import pprint
20
- import sys
21
- import os
22
- from typing import Dict, List
23
-
24
- # Add current directory to path
25
- sys.path.insert(0, os.getcwd())
26
-
27
- logging.basicConfig(level=logging.INFO, format="%(levelname)s | %(message)s")
28
-
29
- # Session imports - FIXED import paths
30
- from chuk_ai_session_manager.session_storage import get_backend, ChukSessionsStore, setup_chuk_sessions_storage
31
- from chuk_ai_session_manager.models.session import Session
32
- from chuk_ai_session_manager.models.session_event import SessionEvent
33
- from chuk_ai_session_manager.models.event_source import EventSource
34
- from chuk_ai_session_manager.models.event_type import EventType
35
- from chuk_ai_session_manager.session_prompt_builder import build_prompt_from_session
36
-
37
- # Status display utilities
38
- def format_status(success: bool, success_msg: str = "SUCCESS", failure_msg: str = "FAILED") -> str:
39
- """Format status with correct emoji."""
40
- if success:
41
- return f"✅ {success_msg}"
42
- else:
43
- return f"❌ {failure_msg}"
44
-
45
- # Import from chuk_tool_processor (using the working pattern)
46
- from chuk_tool_processor.registry import initialize, get_default_registry
47
- from chuk_tool_processor.models.tool_call import ToolCall
48
- from chuk_tool_processor.execution.strategies.inprocess_strategy import InProcessStrategy
49
- from chuk_tool_processor.execution.tool_executor import ToolExecutor
50
-
51
- # Import sample tools - this will trigger registration
52
- import sample_tools
53
-
54
- ##############################################################################
55
- # Custom Tool Processor (based on working OpenAI demo pattern)
56
- ##############################################################################
57
-
58
- class CustomSessionAwareToolProcessor:
59
- """Custom tool processor that properly integrates with chuk_tool_processor."""
60
-
61
- def __init__(self, session_id: str, registry, executor):
62
- self.session_id = session_id
63
- self.registry = registry
64
- self.executor = executor
65
-
66
- @classmethod
67
- async def create(cls, session_id: str):
68
- """Create a custom session-aware tool processor."""
69
- # Get the registry
70
- registry = await get_default_registry()
71
-
72
- # Create execution strategy and executor
73
- strategy = InProcessStrategy(registry)
74
- executor = ToolExecutor(registry=registry, strategy=strategy)
75
-
76
- return cls(session_id, registry, executor)
77
-
78
- async def process_llm_message(self, llm_msg: dict) -> list:
79
- """Process tool calls from an LLM message."""
80
- # Get the session
81
- backend = get_backend()
82
- store = ChukSessionsStore(backend)
83
- session = await store.get(self.session_id)
84
- if not session:
85
- raise ValueError(f"Session {self.session_id} not found")
86
-
87
- # Add the LLM message as an event
88
- llm_event = await SessionEvent.create_with_tokens(
89
- message=llm_msg,
90
- prompt="",
91
- completion=json.dumps(llm_msg, ensure_ascii=False),
92
- model="gpt-4o-mini",
93
- source=EventSource.LLM,
94
- type=EventType.MESSAGE,
95
- )
96
- await session.add_event_and_save(llm_event)
97
-
98
- # Extract tool calls
99
- tool_calls = llm_msg.get('tool_calls', [])
100
- if not tool_calls:
101
- return []
102
-
103
- # Convert to ToolCall objects
104
- chuk_tool_calls = []
105
- for call in tool_calls:
106
- func = call.get('function', {})
107
- tool_name = func.get('name', '')
108
- try:
109
- arguments = json.loads(func.get('arguments', '{}'))
110
- except json.JSONDecodeError:
111
- arguments = {}
112
-
113
- chuk_tool_calls.append(ToolCall(
114
- tool=tool_name,
115
- arguments=arguments
116
- ))
117
-
118
- # Execute the tools
119
- print(f"🔧 Executing {len(chuk_tool_calls)} tools...")
120
- results = await self.executor.execute(chuk_tool_calls)
121
-
122
- # Log each result as a session event
123
- for result in results:
124
- # Convert result to string for session storage
125
- result_str = str(result.result) if result.result is not None else "null"
126
-
127
- tool_event = await SessionEvent.create_with_tokens(
128
- message={
129
- "tool": result.tool,
130
- "arguments": getattr(result, "arguments", None),
131
- "result": result.result,
132
- "error": result.error,
133
- },
134
- prompt=f"{result.tool}({json.dumps(getattr(result, 'arguments', None), default=str)})",
135
- completion=result_str,
136
- model="tool-execution",
137
- source=EventSource.SYSTEM,
138
- type=EventType.TOOL_CALL,
139
- )
140
- await tool_event.set_metadata("parent_event_id", llm_event.id)
141
- await session.add_event_and_save(tool_event)
142
-
143
- return results
144
-
145
- ##############################################################################
146
- # LLM Simulation: Unreliable at first, then cooperative
147
- ##############################################################################
148
-
149
- class UnreliableLLM:
150
- """Simulates an LLM that sometimes doesn't follow tool-calling instructions."""
151
-
152
- def __init__(self):
153
- self.call_count = 0
154
- self.scenarios = [
155
- # Scenario 1: Refuses to use tools
156
- {
157
- "role": "assistant",
158
- "content": "I don't need to use any tools. The weather in London is probably fine!",
159
- "tool_calls": []
160
- },
161
- # Scenario 2: Tries to use non-existent tool
162
- {
163
- "role": "assistant",
164
- "content": None,
165
- "tool_calls": [
166
- {
167
- "id": "call_1",
168
- "type": "function",
169
- "function": {
170
- "name": "nonexistent_weather_api",
171
- "arguments": '{"city": "London"}'
172
- }
173
- }
174
- ]
175
- },
176
- # Scenario 3: Invalid JSON in arguments
177
- {
178
- "role": "assistant",
179
- "content": None,
180
- "tool_calls": [
181
- {
182
- "id": "call_2",
183
- "type": "function",
184
- "function": {
185
- "name": "weather",
186
- "arguments": '{"location": London}' # Missing quotes - invalid JSON
187
- }
188
- }
189
- ]
190
- },
191
- # Scenario 4: Finally cooperates correctly
192
- {
193
- "role": "assistant",
194
- "content": None,
195
- "tool_calls": [
196
- {
197
- "id": "call_3",
198
- "type": "function",
199
- "function": {
200
- "name": "weather",
201
- "arguments": '{"location": "London"}'
202
- }
203
- }
204
- ]
205
- }
206
- ]
207
-
208
- async def chat_completion(self, messages: List[Dict], **kwargs) -> Dict:
209
- """Simulate OpenAI chat completion with unreliable behavior."""
210
- self.call_count += 1
211
-
212
- if self.call_count <= len(self.scenarios):
213
- response = self.scenarios[self.call_count - 1]
214
- print(f" 📞 LLM Call {self.call_count}: {self._describe_response(response)}")
215
- return response
216
- else:
217
- # After all scenarios, always cooperate
218
- return self.scenarios[-1]
219
-
220
- def _describe_response(self, response: Dict) -> str:
221
- """Describe what the LLM response contains."""
222
- if response.get("tool_calls"):
223
- tool_calls = response["tool_calls"]
224
- if len(tool_calls) == 1:
225
- func_name = tool_calls[0].get("function", {}).get("name", "unknown")
226
- return f"Wants to call '{func_name}'"
227
- else:
228
- return f"Wants to call {len(tool_calls)} tools"
229
- elif response.get("content"):
230
- return f"Text response: '{response['content'][:50]}...'"
231
- else:
232
- return "Empty response"
233
-
234
- ##############################################################################
235
- # Retry Logic for LLM Cooperation
236
- ##############################################################################
237
-
238
- class LLMRetryManager:
239
- """Manages retrying LLM calls until they produce valid, executable tool calls."""
240
-
241
- def __init__(self, session_id: str, max_attempts: int = 5):
242
- self.session_id = session_id
243
- self.max_attempts = max_attempts
244
-
245
- async def get_valid_tool_calls(self, llm, messages: List[Dict], processor: CustomSessionAwareToolProcessor) -> tuple[Dict, List]:
246
- """
247
- Keep calling the LLM until it produces valid, executable tool calls.
248
-
249
- Returns:
250
- Tuple of (successful_llm_response, tool_results)
251
- """
252
- backend = get_backend()
253
- store = ChukSessionsStore(backend)
254
- session = await store.get(self.session_id)
255
-
256
- for attempt in range(1, self.max_attempts + 1):
257
- print(f"\n🔄 LLM Attempt {attempt}/{self.max_attempts}")
258
-
259
- # Call LLM
260
- response = await llm.chat_completion(messages)
261
-
262
- # Log the LLM response attempt
263
- attempt_event = SessionEvent(
264
- message={
265
- "attempt": attempt,
266
- "response": response,
267
- "success": False # Will update if successful
268
- },
269
- type=EventType.MESSAGE,
270
- source=EventSource.LLM,
271
- )
272
- await session.add_event_and_save(attempt_event)
273
-
274
- # Check if response has tool calls
275
- tool_calls = response.get("tool_calls", [])
276
- if not tool_calls:
277
- print(f" {format_status(False, failure_msg='No tool calls in response')}")
278
- continue
279
-
280
- # Try to execute the tool calls
281
- try:
282
- print(f" 🔧 Attempting to execute {len(tool_calls)} tool calls...")
283
-
284
- # Check what tools are available vs requested
285
- registry = await get_default_registry()
286
- tools_list = await registry.list_tools()
287
- available_tools = [name for namespace, name in tools_list]
288
- requested_tool = tool_calls[0].get("function", {}).get("name", "unknown")
289
- print(f" 🔍 Requested tool: {requested_tool}")
290
- print(f" 🔍 Available tools: {available_tools}")
291
-
292
- tool_results = await processor.process_llm_message(response)
293
-
294
- # Check if all tools executed successfully
295
- failed_tools = [r for r in tool_results if r.error]
296
- if failed_tools:
297
- print(f" {format_status(False, failure_msg=f'{len(failed_tools)} tools failed:')}")
298
- for failed in failed_tools:
299
- print(f" • {failed.tool}: {failed.error}")
300
- continue
301
-
302
- # Success! All tools executed
303
- print(f" {format_status(True, success_msg=f'All {len(tool_results)} tools executed successfully')}")
304
-
305
- # Update the last event to mark success
306
- session = await store.get(self.session_id)
307
- if session.events:
308
- # Find the most recent LLM attempt event
309
- for event in reversed(session.events):
310
- if (event.type == EventType.MESSAGE and
311
- event.source == EventSource.LLM and
312
- isinstance(event.message, dict) and
313
- "attempt" in event.message):
314
- event.message["success"] = True
315
- await store.save(session)
316
- break
317
-
318
- return response, tool_results
319
-
320
- except Exception as e:
321
- print(f" {format_status(False, failure_msg=f'Tool execution failed: {e}')}")
322
- continue
323
-
324
- # If we get here, all attempts failed
325
- raise RuntimeError(f"Failed to get valid tool calls after {self.max_attempts} attempts")
326
-
327
- ##############################################################################
328
- # Demo Flow
329
- ##############################################################################
330
-
331
- async def main() -> None:
332
- print("🚀 Starting LLM Retry Demo")
333
- print(" (Demonstrates retry logic for uncooperative LLMs)")
334
- print(" (Tool execution uses chuk_tool_processor's built-in reliability)")
335
-
336
- # Setup session storage - FIXED
337
- setup_chuk_sessions_storage(sandbox_id="retry-prompt-demo", default_ttl_hours=1)
338
- backend = get_backend()
339
- store = ChukSessionsStore(backend)
340
-
341
- # Initialize tool registry first
342
- print("\n🔧 Initializing tool registry...")
343
- registry = await initialize()
344
- tools_list = await registry.list_tools()
345
- print(f"📋 Found {len(tools_list)} registered tools:")
346
- for namespace, tool_name in tools_list:
347
- print(f" • {namespace}.{tool_name}")
348
-
349
- # Create session
350
- session = await Session.create()
351
- await session.metadata.set_property("demo", "retry_prompt")
352
- await store.save(session)
353
-
354
- # Add user request
355
- user_prompt = "What's the weather like in London? I need to know if I should bring an umbrella."
356
- user_event = await SessionEvent.create_with_tokens(
357
- message=user_prompt,
358
- prompt=user_prompt,
359
- model="gpt-4o-mini",
360
- source=EventSource.USER,
361
- type=EventType.MESSAGE
362
- )
363
- await session.add_event_and_save(user_event)
364
- print(f"\n👤 User: {user_prompt}")
365
-
366
- # Create components
367
- llm = UnreliableLLM()
368
- processor = await CustomSessionAwareToolProcessor.create(session_id=session.id)
369
- retry_manager = LLMRetryManager(session_id=session.id, max_attempts=6)
370
-
371
- # Build initial messages for LLM
372
- messages = [
373
- {"role": "system", "content": "You are a helpful assistant. When users ask about weather, use the weather tool to get current information."},
374
- {"role": "user", "content": user_prompt}
375
- ]
376
-
377
- # Attempt to get valid tool calls with retries
378
- try:
379
- print(f"\n🎯 Attempting to get valid tool calls (max {retry_manager.max_attempts} attempts)...")
380
- final_response, tool_results = await retry_manager.get_valid_tool_calls(llm, messages, processor)
381
-
382
- print(f"\n{'='*60}")
383
- print("🎉 SUCCESS! LLM cooperated and tools executed successfully")
384
- print(f"{'='*60}")
385
-
386
- # Show tool results
387
- print("\n🛠️ Tool Results:")
388
- for i, result in enumerate(tool_results, 1):
389
- print(f"\n Tool {i}: {result.tool}")
390
- if result.error:
391
- print(f" ❌ Error: {result.error}")
392
- elif isinstance(result.result, dict):
393
- print(f" 📊 Result:")
394
- for key, value in result.result.items():
395
- print(f" {key}: {value}")
396
- else:
397
- print(f" 📊 Result: {result.result}")
398
-
399
- except RuntimeError as e:
400
- print(f"\n❌ FAILED: {e}")
401
-
402
- # Still show the session events for debugging
403
- print("\n🔍 Debugging: Session events created:")
404
- session = await store.get(session.id)
405
- for i, event in enumerate(session.events, 1):
406
- print(f" {i}. {event.type.value}/{event.source.value}: {str(event.message)[:100]}...")
407
- return
408
-
409
- # Show session event tree
410
- session = await store.get(session.id)
411
- print(f"\n{'='*60}")
412
- print("📊 Session Event Tree (Complete Retry History):")
413
- print(f"{'='*60}")
414
-
415
- for i, event in enumerate(session.events, 1):
416
- event_id = event.id[:8] + "..."
417
- if event.type == EventType.MESSAGE and event.source == EventSource.USER:
418
- print(f"{i}. USER MESSAGE [{event_id}]")
419
- print(f" Content: {event.message}")
420
- elif event.type == EventType.MESSAGE and event.source == EventSource.LLM:
421
- if isinstance(event.message, dict) and "attempt" in event.message:
422
- attempt = event.message["attempt"]
423
- success = event.message.get("success", False)
424
- status = "✅ SUCCESS" if success else "❌ FAILED"
425
- print(f"{i}. LLM ATTEMPT {attempt} [{event_id}] - {status}")
426
- else:
427
- print(f"{i}. LLM MESSAGE [{event_id}]")
428
- elif event.type == EventType.TOOL_CALL:
429
- tool_msg = event.message or {}
430
- tool_name = tool_msg.get("tool", "unknown")
431
- error = tool_msg.get("error")
432
- print(f"{i}. TOOL CALL [{event_id}] - {tool_name}")
433
- if error:
434
- print(f" ❌ Error: {error}")
435
- else:
436
- print(f" ✅ Success")
437
-
438
- # Show final prompt for next turn
439
- print(f"\n{'='*60}")
440
- print("🔄 Final Prompt for Next LLM Turn:")
441
- print(f"{'='*60}")
442
- next_prompt = await build_prompt_from_session(session)
443
- pprint.pp(next_prompt, width=80)
444
-
445
- # Show session statistics
446
- print(f"\n{'='*60}")
447
- print("📈 Session Statistics:")
448
- print(f"{'='*60}")
449
- print(f" Session ID: {session.id}")
450
- print(f" Total events: {len(session.events)}")
451
- print(f" Total tokens: {session.total_tokens}")
452
- print(f" Estimated cost: ${session.total_cost:.6f}")
453
-
454
- # Event breakdown
455
- event_types = {}
456
- for event in session.events:
457
- event_type = f"{event.source.value}:{event.type.value}"
458
- event_types[event_type] = event_types.get(event_type, 0) + 1
459
-
460
- print(f" Event breakdown:")
461
- for event_type, count in event_types.items():
462
- print(f" {event_type}: {count}")
463
-
464
- print(f"\n{'='*60}")
465
- print("🎯 Key Takeaways:")
466
- print(" • LLM retries handled at application level")
467
- print(" • Tool execution reliability handled by chuk_tool_processor")
468
- print(" • Complete audit trail in session events")
469
- print(" • Separation of concerns: LLM cooperation vs tool reliability")
470
- print(" • Session tracks all attempts for debugging and analytics")
471
- print(f"{'='*60}")
472
-
473
- if __name__ == "__main__":
474
- asyncio.run(main())
@@ -1,23 +0,0 @@
1
- chuk_ai_session_manager/__init__.py,sha256=h98D5FhFE-WhkWwwRMcF5AXqThxi_SGoYImZtqDniZ0,2038
2
- chuk_ai_session_manager/exceptions.py,sha256=WqrrUZuOAiUmz7tKnSnk0y222U_nV9a8LyaXLayn2fg,4420
3
- chuk_ai_session_manager/infinite_conversation.py,sha256=7j3caMnsX27M5rjj4oOkqiy_2AfcupWwsAWRflnKiSo,12092
4
- chuk_ai_session_manager/sample_tools.py,sha256=yZDM-ast5lv0YVHcd3GTxBMcJd7zuNkUhZPVIb06G0c,8155
5
- chuk_ai_session_manager/session_aware_tool_processor.py,sha256=iVe3d-qfp5QGkdNrgfZeRYoOjd8nLZ0g6K7HW1thFE8,7274
6
- chuk_ai_session_manager/session_prompt_builder.py,sha256=-ZTUczYh5emToInp4TRCj9FvF4CECyn45YHYKoWzmxE,17328
7
- chuk_ai_session_manager/session_storage.py,sha256=HqzYDtwx4zN5an1zJmSZc56BpyD3KjT3IWonIpmnVXQ,5790
8
- chuk_ai_session_manager/api/__init__.py,sha256=Lo_BoDW2rSn0Zw-CbjahOxc6ykjjTpucxHZo5FA2Gnc,41
9
- chuk_ai_session_manager/api/simple_api.py,sha256=RbHA2IAPUzIFZFvT6KpbgouAuonF-Q6GopKOeKej0rk,17795
10
- chuk_ai_session_manager/models/__init__.py,sha256=H1rRuDQDRf821JPUWUn5Zgwvc5BAqcEGekkHEmX-IgE,1167
11
- chuk_ai_session_manager/models/event_source.py,sha256=mn_D16sXMa6nAX-5BzssygJPz6VF24GRe-3IaH7bTnI,196
12
- chuk_ai_session_manager/models/event_type.py,sha256=TPPvAz-PlXVtrwXDNVFVnhdt1yEfgDGmKDGt8ArYcTk,275
13
- chuk_ai_session_manager/models/session.py,sha256=Txnmqd5SmiMz6acur_zL5MiFHJjKqU2se895p7_zUNQ,11781
14
- chuk_ai_session_manager/models/session_event.py,sha256=YPDbymduF42LLHtAv_k_kqlWF68vnth5J_HM4q-bOyI,5896
15
- chuk_ai_session_manager/models/session_metadata.py,sha256=KFG7lc_E0BQTP2OD9Y529elVGJXppDUMqz8vVONW0rw,1510
16
- chuk_ai_session_manager/models/session_run.py,sha256=uhMM4-WSrqOUsiWQPnyakInd-foZhxI-YnSHSWiZZwE,4369
17
- chuk_ai_session_manager/models/token_usage.py,sha256=pnsNDMew9ToUqkRCIz1TADnHC5aKnautdLD4trCA6Zg,11121
18
- chuk_ai_session_manager/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- chuk_ai_session_manager/utils/status_display_utils.py,sha256=id4TIE0VSq3thvDd4wKIyk3kBr_bUMqrtXmOI9CD8r8,19231
20
- chuk_ai_session_manager-0.3.dist-info/METADATA,sha256=1uqP6Syo-Su4OZW97WeiQnxdOmRRgKwaUekwEpacfXg,12197
21
- chuk_ai_session_manager-0.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
- chuk_ai_session_manager-0.3.dist-info/top_level.txt,sha256=5RinqD0v-niHuLYePUREX4gEWTlrpgtUg0RfexVRBMk,24
23
- chuk_ai_session_manager-0.3.dist-info/RECORD,,