chuk-ai-session-manager 0.3__py3-none-any.whl → 0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +12 -10
- chuk_ai_session_manager/models/token_usage.py +13 -2
- chuk_ai_session_manager/sample_tools.py +1 -1
- chuk_ai_session_manager/session_prompt_builder.py +70 -62
- {chuk_ai_session_manager-0.3.dist-info → chuk_ai_session_manager-0.4.dist-info}/METADATA +1 -38
- {chuk_ai_session_manager-0.3.dist-info → chuk_ai_session_manager-0.4.dist-info}/RECORD +8 -10
- chuk_ai_session_manager/utils/__init__.py +0 -0
- chuk_ai_session_manager/utils/status_display_utils.py +0 -474
- {chuk_ai_session_manager-0.3.dist-info → chuk_ai_session_manager-0.4.dist-info}/WHEEL +0 -0
- {chuk_ai_session_manager-0.3.dist-info → chuk_ai_session_manager-0.4.dist-info}/top_level.txt +0 -0
|
@@ -15,7 +15,18 @@ Quick Start:
|
|
|
15
15
|
await sm.ai_responds("It's sunny and 72°F", model="gpt-4")
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
|
-
#
|
|
18
|
+
# Import core models first (these have no circular dependencies)
|
|
19
|
+
from chuk_ai_session_manager.models.event_source import EventSource
|
|
20
|
+
from chuk_ai_session_manager.models.event_type import EventType
|
|
21
|
+
|
|
22
|
+
# Import storage setup (this should work now with the fixed session_storage.py)
|
|
23
|
+
from chuk_ai_session_manager.session_storage import setup_chuk_sessions_storage
|
|
24
|
+
|
|
25
|
+
# Import other models (these might depend on storage being set up)
|
|
26
|
+
from chuk_ai_session_manager.models.session import Session
|
|
27
|
+
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
28
|
+
|
|
29
|
+
# Import the simple API (this should work now that storage is fixed)
|
|
19
30
|
from chuk_ai_session_manager.api.simple_api import (
|
|
20
31
|
SessionManager,
|
|
21
32
|
track_conversation,
|
|
@@ -24,15 +35,6 @@ from chuk_ai_session_manager.api.simple_api import (
|
|
|
24
35
|
track_infinite_conversation
|
|
25
36
|
)
|
|
26
37
|
|
|
27
|
-
# Core models for advanced users
|
|
28
|
-
from chuk_ai_session_manager.models.session import Session
|
|
29
|
-
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
30
|
-
from chuk_ai_session_manager.models.event_source import EventSource
|
|
31
|
-
from chuk_ai_session_manager.models.event_type import EventType
|
|
32
|
-
|
|
33
|
-
# Storage backend setup
|
|
34
|
-
from chuk_ai_session_manager.session_storage import setup_chuk_sessions_storage
|
|
35
|
-
|
|
36
38
|
__version__ = "0.1.0"
|
|
37
39
|
|
|
38
40
|
# Main exports - keep it simple
|
|
@@ -175,7 +175,7 @@ class TokenUsage(BaseModel):
|
|
|
175
175
|
)
|
|
176
176
|
|
|
177
177
|
@staticmethod
|
|
178
|
-
def _count_tokens_sync(text: Optional[str], model: str = "gpt-3.5-turbo") -> int:
|
|
178
|
+
def _count_tokens_sync(text: Optional[Union[str, Any]], model: str = "gpt-3.5-turbo") -> int:
|
|
179
179
|
"""
|
|
180
180
|
Synchronous implementation of count_tokens.
|
|
181
181
|
|
|
@@ -188,6 +188,17 @@ class TokenUsage(BaseModel):
|
|
|
188
188
|
"""
|
|
189
189
|
if text is None:
|
|
190
190
|
return 0
|
|
191
|
+
|
|
192
|
+
# Convert to string if not already a string
|
|
193
|
+
if not isinstance(text, str):
|
|
194
|
+
try:
|
|
195
|
+
text = str(text)
|
|
196
|
+
except Exception:
|
|
197
|
+
return 0
|
|
198
|
+
|
|
199
|
+
# Empty string has 0 tokens
|
|
200
|
+
if not text:
|
|
201
|
+
return 0
|
|
191
202
|
|
|
192
203
|
if TIKTOKEN_AVAILABLE:
|
|
193
204
|
try:
|
|
@@ -206,7 +217,7 @@ class TokenUsage(BaseModel):
|
|
|
206
217
|
return int(len(text) / 4)
|
|
207
218
|
|
|
208
219
|
@staticmethod
|
|
209
|
-
async def count_tokens(text: Optional[str], model: str = "gpt-3.5-turbo") -> int:
|
|
220
|
+
async def count_tokens(text: Optional[Union[str, Any]], model: str = "gpt-3.5-turbo") -> int:
|
|
210
221
|
"""
|
|
211
222
|
Async version of count_tokens.
|
|
212
223
|
|
|
@@ -37,7 +37,8 @@ async def build_prompt_from_session(
|
|
|
37
37
|
max_tokens: Optional[int] = None,
|
|
38
38
|
model: str = "gpt-3.5-turbo",
|
|
39
39
|
include_parent_context: bool = False,
|
|
40
|
-
current_query: Optional[str] = None
|
|
40
|
+
current_query: Optional[str] = None,
|
|
41
|
+
max_history: int = 5 # Add this parameter for conversation strategy
|
|
41
42
|
) -> List[Dict[str, str]]:
|
|
42
43
|
"""
|
|
43
44
|
Build a prompt for the next LLM call from a Session asynchronously.
|
|
@@ -49,6 +50,7 @@ async def build_prompt_from_session(
|
|
|
49
50
|
model: Model to use for token counting
|
|
50
51
|
include_parent_context: Whether to include context from parent sessions
|
|
51
52
|
current_query: Current user query for relevance-based context selection
|
|
53
|
+
max_history: Maximum number of messages to include for conversation strategy
|
|
52
54
|
|
|
53
55
|
Returns:
|
|
54
56
|
A list of message dictionaries suitable for LLM API calls
|
|
@@ -72,7 +74,7 @@ async def build_prompt_from_session(
|
|
|
72
74
|
elif strategy == PromptStrategy.TOOL_FOCUSED:
|
|
73
75
|
return await _build_tool_focused_prompt(session)
|
|
74
76
|
elif strategy == PromptStrategy.CONVERSATION:
|
|
75
|
-
return await _build_conversation_prompt(session, max_history
|
|
77
|
+
return await _build_conversation_prompt(session, max_history)
|
|
76
78
|
elif strategy == PromptStrategy.HIERARCHICAL:
|
|
77
79
|
return await _build_hierarchical_prompt(session, include_parent_context)
|
|
78
80
|
else:
|
|
@@ -112,7 +114,7 @@ async def _build_minimal_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
112
114
|
|
|
113
115
|
if assistant_msg is None:
|
|
114
116
|
# Only the user message exists so far
|
|
115
|
-
return [{"role": "user", "content": first_user.message}] if first_user else []
|
|
117
|
+
return [{"role": "user", "content": _extract_content(first_user.message)}] if first_user else []
|
|
116
118
|
|
|
117
119
|
# Children of that assistant
|
|
118
120
|
children = [
|
|
@@ -126,11 +128,7 @@ async def _build_minimal_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
126
128
|
# Assemble prompt
|
|
127
129
|
prompt: List[Dict[str, str]] = []
|
|
128
130
|
if first_user:
|
|
129
|
-
|
|
130
|
-
user_content = first_user.message
|
|
131
|
-
if isinstance(user_content, dict) and "content" in user_content:
|
|
132
|
-
user_content = user_content["content"]
|
|
133
|
-
prompt.append({"role": "user", "content": user_content})
|
|
131
|
+
prompt.append({"role": "user", "content": _extract_content(first_user.message)})
|
|
134
132
|
|
|
135
133
|
# ALWAYS add the assistant marker - but strip its free text
|
|
136
134
|
prompt.append({"role": "assistant", "content": None})
|
|
@@ -166,6 +164,24 @@ async def _build_minimal_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
166
164
|
return prompt
|
|
167
165
|
|
|
168
166
|
|
|
167
|
+
def _extract_content(message: Any) -> str:
|
|
168
|
+
"""
|
|
169
|
+
Extract content string from a message that could be a string or dict.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
message: The message content (string, dict, or other)
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
The extracted content as a string
|
|
176
|
+
"""
|
|
177
|
+
if isinstance(message, str):
|
|
178
|
+
return message
|
|
179
|
+
elif isinstance(message, dict) and "content" in message:
|
|
180
|
+
return message["content"]
|
|
181
|
+
else:
|
|
182
|
+
return str(message)
|
|
183
|
+
|
|
184
|
+
|
|
169
185
|
async def _build_task_focused_prompt(session: Session) -> List[Dict[str, str]]:
|
|
170
186
|
"""
|
|
171
187
|
Build a task-focused prompt.
|
|
@@ -201,17 +217,11 @@ async def _build_task_focused_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
201
217
|
prompt = []
|
|
202
218
|
|
|
203
219
|
# Always include the first user message (the main task)
|
|
204
|
-
|
|
205
|
-
if isinstance(first_content, dict) and "content" in first_content:
|
|
206
|
-
first_content = first_content["content"]
|
|
207
|
-
prompt.append({"role": "user", "content": first_content})
|
|
220
|
+
prompt.append({"role": "user", "content": _extract_content(first_user.message)})
|
|
208
221
|
|
|
209
222
|
# Include the latest user message if different from the first
|
|
210
223
|
if latest_user and latest_user.id != first_user.id:
|
|
211
|
-
|
|
212
|
-
if isinstance(latest_content, dict) and "content" in latest_content:
|
|
213
|
-
latest_content = latest_content["content"]
|
|
214
|
-
prompt.append({"role": "user", "content": latest_content})
|
|
224
|
+
prompt.append({"role": "user", "content": _extract_content(latest_user.message)})
|
|
215
225
|
|
|
216
226
|
# Include assistant response placeholder
|
|
217
227
|
if assistant_msg:
|
|
@@ -274,10 +284,7 @@ async def _build_tool_focused_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
274
284
|
prompt = []
|
|
275
285
|
|
|
276
286
|
# Include user message
|
|
277
|
-
|
|
278
|
-
if isinstance(user_content, dict) and "content" in user_content:
|
|
279
|
-
user_content = user_content["content"]
|
|
280
|
-
prompt.append({"role": "user", "content": user_content})
|
|
287
|
+
prompt.append({"role": "user", "content": _extract_content(latest_user.message)})
|
|
281
288
|
|
|
282
289
|
# Include assistant placeholder
|
|
283
290
|
if assistant_msg:
|
|
@@ -334,17 +341,17 @@ async def _build_conversation_prompt(
|
|
|
334
341
|
|
|
335
342
|
# Build the conversation history
|
|
336
343
|
prompt = []
|
|
337
|
-
for msg in recent_messages:
|
|
344
|
+
for i, msg in enumerate(recent_messages):
|
|
338
345
|
role = "user" if msg.source == EventSource.USER else "assistant"
|
|
339
|
-
content = msg.message
|
|
346
|
+
content = _extract_content(msg.message)
|
|
340
347
|
|
|
341
|
-
#
|
|
342
|
-
if
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
content
|
|
348
|
+
# For the last assistant message, set content to None and add tool calls
|
|
349
|
+
if (role == "assistant" and
|
|
350
|
+
msg == recent_messages[-1] and
|
|
351
|
+
msg.source != EventSource.USER):
|
|
352
|
+
|
|
353
|
+
# Add the message first with None content
|
|
354
|
+
prompt.append({"role": role, "content": None})
|
|
348
355
|
|
|
349
356
|
# Add tool call results for this assistant message
|
|
350
357
|
tool_calls = [
|
|
@@ -352,9 +359,6 @@ async def _build_conversation_prompt(
|
|
|
352
359
|
if e.type == EventType.TOOL_CALL and e.metadata.get("parent_event_id") == msg.id
|
|
353
360
|
]
|
|
354
361
|
|
|
355
|
-
# Add the message first, then tools
|
|
356
|
-
prompt.append({"role": role, "content": content})
|
|
357
|
-
|
|
358
362
|
# Add tool results
|
|
359
363
|
for tc in tool_calls:
|
|
360
364
|
if isinstance(tc.message, dict):
|
|
@@ -366,11 +370,9 @@ async def _build_conversation_prompt(
|
|
|
366
370
|
"name": tool_name,
|
|
367
371
|
"content": json.dumps(tool_result, default=str),
|
|
368
372
|
})
|
|
369
|
-
|
|
370
|
-
#
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
prompt.append({"role": role, "content": content})
|
|
373
|
+
else:
|
|
374
|
+
# Regular message
|
|
375
|
+
prompt.append({"role": role, "content": content})
|
|
374
376
|
|
|
375
377
|
return prompt
|
|
376
378
|
|
|
@@ -391,32 +393,38 @@ async def _build_hierarchical_prompt(
|
|
|
391
393
|
|
|
392
394
|
# If parent context is enabled and session has a parent
|
|
393
395
|
if include_parent_context and session.parent_id:
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
if parent:
|
|
400
|
-
# Find the most recent summary in parent
|
|
401
|
-
summary_event = next(
|
|
402
|
-
(e for e in reversed(parent.events)
|
|
403
|
-
if e.type == EventType.SUMMARY),
|
|
404
|
-
None
|
|
405
|
-
)
|
|
396
|
+
try:
|
|
397
|
+
# Get the storage backend and create store
|
|
398
|
+
backend = get_backend()
|
|
399
|
+
store = ChukSessionsStore(backend)
|
|
400
|
+
parent = await store.get(session.parent_id)
|
|
406
401
|
|
|
407
|
-
if
|
|
408
|
-
#
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
402
|
+
if parent:
|
|
403
|
+
# Find the most recent summary in parent
|
|
404
|
+
summary_event = next(
|
|
405
|
+
(e for e in reversed(parent.events)
|
|
406
|
+
if e.type == EventType.SUMMARY),
|
|
407
|
+
None
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
if summary_event:
|
|
411
|
+
# Extract summary content
|
|
412
|
+
summary_content = summary_event.message
|
|
413
|
+
if isinstance(summary_content, dict) and "note" in summary_content:
|
|
414
|
+
summary_content = summary_content["note"]
|
|
415
|
+
elif isinstance(summary_content, dict) and "content" in summary_content:
|
|
416
|
+
summary_content = summary_content["content"]
|
|
417
|
+
else:
|
|
418
|
+
summary_content = str(summary_content)
|
|
419
|
+
|
|
420
|
+
# Add parent context at the beginning
|
|
421
|
+
prompt.insert(0, {
|
|
422
|
+
"role": "system",
|
|
423
|
+
"content": f"Context from previous conversation: {summary_content}"
|
|
424
|
+
})
|
|
425
|
+
except Exception as e:
|
|
426
|
+
# If we can't load parent context, just continue with minimal prompt
|
|
427
|
+
logger.warning(f"Could not load parent context for session {session.parent_id}: {e}")
|
|
420
428
|
|
|
421
429
|
return prompt
|
|
422
430
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: chuk-ai-session-manager
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4
|
|
4
4
|
Summary: Session manager for AI applications
|
|
5
5
|
Requires-Python: >=3.11
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
@@ -311,36 +311,6 @@ pip install chuk-ai-session-manager
|
|
|
311
311
|
| **Production Ready** | Requires additional work | Built for production |
|
|
312
312
|
| **Learning Curve** | Steep | 5 minutes to productivity |
|
|
313
313
|
|
|
314
|
-
## 🚀 Migration Guides
|
|
315
|
-
|
|
316
|
-
### From LangChain Memory
|
|
317
|
-
```python
|
|
318
|
-
# Old LangChain way
|
|
319
|
-
from langchain.memory import ConversationBufferMemory
|
|
320
|
-
memory = ConversationBufferMemory()
|
|
321
|
-
memory.save_context({"input": "Hi"}, {"output": "Hello"})
|
|
322
|
-
|
|
323
|
-
# New CHUK way (much simpler!)
|
|
324
|
-
from chuk_ai_session_manager import track_conversation
|
|
325
|
-
await track_conversation("Hi", "Hello")
|
|
326
|
-
```
|
|
327
|
-
|
|
328
|
-
### From Manual Session Management
|
|
329
|
-
```python
|
|
330
|
-
# Old manual way
|
|
331
|
-
conversations = {}
|
|
332
|
-
def save_conversation(user_id, message, response):
|
|
333
|
-
if user_id not in conversations:
|
|
334
|
-
conversations[user_id] = []
|
|
335
|
-
conversations[user_id].append({"user": message, "ai": response})
|
|
336
|
-
|
|
337
|
-
# New CHUK way
|
|
338
|
-
from chuk_ai_session_manager import SessionManager
|
|
339
|
-
sm = SessionManager(session_id=user_id)
|
|
340
|
-
await sm.user_says(message)
|
|
341
|
-
await sm.ai_responds(response)
|
|
342
|
-
```
|
|
343
|
-
|
|
344
314
|
## 📖 More Examples
|
|
345
315
|
|
|
346
316
|
Check out the `/examples` directory for complete working examples:
|
|
@@ -362,16 +332,9 @@ Check out the `/examples` directory for complete working examples:
|
|
|
362
332
|
- ✅ Complete conversation analytics and observability
|
|
363
333
|
- ✅ Framework-agnostic solution that works with any LLM library
|
|
364
334
|
|
|
365
|
-
**Consider alternatives if you:**
|
|
366
|
-
- ❌ Only need basic in-memory conversation history
|
|
367
|
-
- ❌ Are locked into a specific framework (LangChain, etc.)
|
|
368
|
-
- ❌ Don't need cost tracking or analytics
|
|
369
|
-
- ❌ Are building simple, stateless AI applications
|
|
370
|
-
|
|
371
335
|
## 🤝 Community & Support
|
|
372
336
|
|
|
373
337
|
- 📖 **Documentation**: [Full docs with tutorials](link-to-docs)
|
|
374
|
-
- 💬 **Discord**: Join our community for help and discussions
|
|
375
338
|
- 🐛 **Issues**: Report bugs on GitHub
|
|
376
339
|
- 💡 **Feature Requests**: Suggest new features
|
|
377
340
|
- 📧 **Support**: enterprise@chuk.dev for production support
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
chuk_ai_session_manager/__init__.py,sha256=
|
|
1
|
+
chuk_ai_session_manager/__init__.py,sha256=gWpNM9IKH9iOrzwum-4smLmkkSF78t0aPGa583u889M,2208
|
|
2
2
|
chuk_ai_session_manager/exceptions.py,sha256=WqrrUZuOAiUmz7tKnSnk0y222U_nV9a8LyaXLayn2fg,4420
|
|
3
3
|
chuk_ai_session_manager/infinite_conversation.py,sha256=7j3caMnsX27M5rjj4oOkqiy_2AfcupWwsAWRflnKiSo,12092
|
|
4
|
-
chuk_ai_session_manager/sample_tools.py,sha256=
|
|
4
|
+
chuk_ai_session_manager/sample_tools.py,sha256=U-jTGveTJ95uSnA4jB30fJQJG3K-TGxN9jcOY6qVHZQ,8179
|
|
5
5
|
chuk_ai_session_manager/session_aware_tool_processor.py,sha256=iVe3d-qfp5QGkdNrgfZeRYoOjd8nLZ0g6K7HW1thFE8,7274
|
|
6
|
-
chuk_ai_session_manager/session_prompt_builder.py,sha256
|
|
6
|
+
chuk_ai_session_manager/session_prompt_builder.py,sha256=Jeg_MWses_hFtHtDL7ZQl6EdSNVmVIIrLDrWEoPumfM,17613
|
|
7
7
|
chuk_ai_session_manager/session_storage.py,sha256=HqzYDtwx4zN5an1zJmSZc56BpyD3KjT3IWonIpmnVXQ,5790
|
|
8
8
|
chuk_ai_session_manager/api/__init__.py,sha256=Lo_BoDW2rSn0Zw-CbjahOxc6ykjjTpucxHZo5FA2Gnc,41
|
|
9
9
|
chuk_ai_session_manager/api/simple_api.py,sha256=RbHA2IAPUzIFZFvT6KpbgouAuonF-Q6GopKOeKej0rk,17795
|
|
@@ -14,10 +14,8 @@ chuk_ai_session_manager/models/session.py,sha256=Txnmqd5SmiMz6acur_zL5MiFHJjKqU2
|
|
|
14
14
|
chuk_ai_session_manager/models/session_event.py,sha256=YPDbymduF42LLHtAv_k_kqlWF68vnth5J_HM4q-bOyI,5896
|
|
15
15
|
chuk_ai_session_manager/models/session_metadata.py,sha256=KFG7lc_E0BQTP2OD9Y529elVGJXppDUMqz8vVONW0rw,1510
|
|
16
16
|
chuk_ai_session_manager/models/session_run.py,sha256=uhMM4-WSrqOUsiWQPnyakInd-foZhxI-YnSHSWiZZwE,4369
|
|
17
|
-
chuk_ai_session_manager/models/token_usage.py,sha256=
|
|
18
|
-
chuk_ai_session_manager/
|
|
19
|
-
chuk_ai_session_manager/
|
|
20
|
-
chuk_ai_session_manager-0.
|
|
21
|
-
chuk_ai_session_manager-0.
|
|
22
|
-
chuk_ai_session_manager-0.3.dist-info/top_level.txt,sha256=5RinqD0v-niHuLYePUREX4gEWTlrpgtUg0RfexVRBMk,24
|
|
23
|
-
chuk_ai_session_manager-0.3.dist-info/RECORD,,
|
|
17
|
+
chuk_ai_session_manager/models/token_usage.py,sha256=M9Qwmeb2woILaSRwA2SIAiG-sIwC3cL_1H-y3NjW5Ik,11436
|
|
18
|
+
chuk_ai_session_manager-0.4.dist-info/METADATA,sha256=QjyLv5-42g82rYOJhYT6_tDomd0wD1L39xibv1OXi-4,11080
|
|
19
|
+
chuk_ai_session_manager-0.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
20
|
+
chuk_ai_session_manager-0.4.dist-info/top_level.txt,sha256=5RinqD0v-niHuLYePUREX4gEWTlrpgtUg0RfexVRBMk,24
|
|
21
|
+
chuk_ai_session_manager-0.4.dist-info/RECORD,,
|
|
File without changes
|
|
@@ -1,474 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
examples/retry_prompt_demo_fixed.py
|
|
4
|
-
──────────────────────────────────
|
|
5
|
-
Demonstrates LLM-level retry patterns with chuk_ai_session_manager.
|
|
6
|
-
|
|
7
|
-
This shows:
|
|
8
|
-
• Retrying LLM calls until they produce valid tool calls
|
|
9
|
-
• Using chuk_tool_processor's built-in reliability features
|
|
10
|
-
• Session tracking of the entire retry process
|
|
11
|
-
• Proper separation: LLM retries vs tool execution reliability
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
from __future__ import annotations
|
|
15
|
-
|
|
16
|
-
import asyncio
|
|
17
|
-
import json
|
|
18
|
-
import logging
|
|
19
|
-
import pprint
|
|
20
|
-
import sys
|
|
21
|
-
import os
|
|
22
|
-
from typing import Dict, List
|
|
23
|
-
|
|
24
|
-
# Add current directory to path
|
|
25
|
-
sys.path.insert(0, os.getcwd())
|
|
26
|
-
|
|
27
|
-
logging.basicConfig(level=logging.INFO, format="%(levelname)s | %(message)s")
|
|
28
|
-
|
|
29
|
-
# Session imports - FIXED import paths
|
|
30
|
-
from chuk_ai_session_manager.session_storage import get_backend, ChukSessionsStore, setup_chuk_sessions_storage
|
|
31
|
-
from chuk_ai_session_manager.models.session import Session
|
|
32
|
-
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
33
|
-
from chuk_ai_session_manager.models.event_source import EventSource
|
|
34
|
-
from chuk_ai_session_manager.models.event_type import EventType
|
|
35
|
-
from chuk_ai_session_manager.session_prompt_builder import build_prompt_from_session
|
|
36
|
-
|
|
37
|
-
# Status display utilities
|
|
38
|
-
def format_status(success: bool, success_msg: str = "SUCCESS", failure_msg: str = "FAILED") -> str:
|
|
39
|
-
"""Format status with correct emoji."""
|
|
40
|
-
if success:
|
|
41
|
-
return f"✅ {success_msg}"
|
|
42
|
-
else:
|
|
43
|
-
return f"❌ {failure_msg}"
|
|
44
|
-
|
|
45
|
-
# Import from chuk_tool_processor (using the working pattern)
|
|
46
|
-
from chuk_tool_processor.registry import initialize, get_default_registry
|
|
47
|
-
from chuk_tool_processor.models.tool_call import ToolCall
|
|
48
|
-
from chuk_tool_processor.execution.strategies.inprocess_strategy import InProcessStrategy
|
|
49
|
-
from chuk_tool_processor.execution.tool_executor import ToolExecutor
|
|
50
|
-
|
|
51
|
-
# Import sample tools - this will trigger registration
|
|
52
|
-
import sample_tools
|
|
53
|
-
|
|
54
|
-
##############################################################################
|
|
55
|
-
# Custom Tool Processor (based on working OpenAI demo pattern)
|
|
56
|
-
##############################################################################
|
|
57
|
-
|
|
58
|
-
class CustomSessionAwareToolProcessor:
|
|
59
|
-
"""Custom tool processor that properly integrates with chuk_tool_processor."""
|
|
60
|
-
|
|
61
|
-
def __init__(self, session_id: str, registry, executor):
|
|
62
|
-
self.session_id = session_id
|
|
63
|
-
self.registry = registry
|
|
64
|
-
self.executor = executor
|
|
65
|
-
|
|
66
|
-
@classmethod
|
|
67
|
-
async def create(cls, session_id: str):
|
|
68
|
-
"""Create a custom session-aware tool processor."""
|
|
69
|
-
# Get the registry
|
|
70
|
-
registry = await get_default_registry()
|
|
71
|
-
|
|
72
|
-
# Create execution strategy and executor
|
|
73
|
-
strategy = InProcessStrategy(registry)
|
|
74
|
-
executor = ToolExecutor(registry=registry, strategy=strategy)
|
|
75
|
-
|
|
76
|
-
return cls(session_id, registry, executor)
|
|
77
|
-
|
|
78
|
-
async def process_llm_message(self, llm_msg: dict) -> list:
|
|
79
|
-
"""Process tool calls from an LLM message."""
|
|
80
|
-
# Get the session
|
|
81
|
-
backend = get_backend()
|
|
82
|
-
store = ChukSessionsStore(backend)
|
|
83
|
-
session = await store.get(self.session_id)
|
|
84
|
-
if not session:
|
|
85
|
-
raise ValueError(f"Session {self.session_id} not found")
|
|
86
|
-
|
|
87
|
-
# Add the LLM message as an event
|
|
88
|
-
llm_event = await SessionEvent.create_with_tokens(
|
|
89
|
-
message=llm_msg,
|
|
90
|
-
prompt="",
|
|
91
|
-
completion=json.dumps(llm_msg, ensure_ascii=False),
|
|
92
|
-
model="gpt-4o-mini",
|
|
93
|
-
source=EventSource.LLM,
|
|
94
|
-
type=EventType.MESSAGE,
|
|
95
|
-
)
|
|
96
|
-
await session.add_event_and_save(llm_event)
|
|
97
|
-
|
|
98
|
-
# Extract tool calls
|
|
99
|
-
tool_calls = llm_msg.get('tool_calls', [])
|
|
100
|
-
if not tool_calls:
|
|
101
|
-
return []
|
|
102
|
-
|
|
103
|
-
# Convert to ToolCall objects
|
|
104
|
-
chuk_tool_calls = []
|
|
105
|
-
for call in tool_calls:
|
|
106
|
-
func = call.get('function', {})
|
|
107
|
-
tool_name = func.get('name', '')
|
|
108
|
-
try:
|
|
109
|
-
arguments = json.loads(func.get('arguments', '{}'))
|
|
110
|
-
except json.JSONDecodeError:
|
|
111
|
-
arguments = {}
|
|
112
|
-
|
|
113
|
-
chuk_tool_calls.append(ToolCall(
|
|
114
|
-
tool=tool_name,
|
|
115
|
-
arguments=arguments
|
|
116
|
-
))
|
|
117
|
-
|
|
118
|
-
# Execute the tools
|
|
119
|
-
print(f"🔧 Executing {len(chuk_tool_calls)} tools...")
|
|
120
|
-
results = await self.executor.execute(chuk_tool_calls)
|
|
121
|
-
|
|
122
|
-
# Log each result as a session event
|
|
123
|
-
for result in results:
|
|
124
|
-
# Convert result to string for session storage
|
|
125
|
-
result_str = str(result.result) if result.result is not None else "null"
|
|
126
|
-
|
|
127
|
-
tool_event = await SessionEvent.create_with_tokens(
|
|
128
|
-
message={
|
|
129
|
-
"tool": result.tool,
|
|
130
|
-
"arguments": getattr(result, "arguments", None),
|
|
131
|
-
"result": result.result,
|
|
132
|
-
"error": result.error,
|
|
133
|
-
},
|
|
134
|
-
prompt=f"{result.tool}({json.dumps(getattr(result, 'arguments', None), default=str)})",
|
|
135
|
-
completion=result_str,
|
|
136
|
-
model="tool-execution",
|
|
137
|
-
source=EventSource.SYSTEM,
|
|
138
|
-
type=EventType.TOOL_CALL,
|
|
139
|
-
)
|
|
140
|
-
await tool_event.set_metadata("parent_event_id", llm_event.id)
|
|
141
|
-
await session.add_event_and_save(tool_event)
|
|
142
|
-
|
|
143
|
-
return results
|
|
144
|
-
|
|
145
|
-
##############################################################################
|
|
146
|
-
# LLM Simulation: Unreliable at first, then cooperative
|
|
147
|
-
##############################################################################
|
|
148
|
-
|
|
149
|
-
class UnreliableLLM:
|
|
150
|
-
"""Simulates an LLM that sometimes doesn't follow tool-calling instructions."""
|
|
151
|
-
|
|
152
|
-
def __init__(self):
|
|
153
|
-
self.call_count = 0
|
|
154
|
-
self.scenarios = [
|
|
155
|
-
# Scenario 1: Refuses to use tools
|
|
156
|
-
{
|
|
157
|
-
"role": "assistant",
|
|
158
|
-
"content": "I don't need to use any tools. The weather in London is probably fine!",
|
|
159
|
-
"tool_calls": []
|
|
160
|
-
},
|
|
161
|
-
# Scenario 2: Tries to use non-existent tool
|
|
162
|
-
{
|
|
163
|
-
"role": "assistant",
|
|
164
|
-
"content": None,
|
|
165
|
-
"tool_calls": [
|
|
166
|
-
{
|
|
167
|
-
"id": "call_1",
|
|
168
|
-
"type": "function",
|
|
169
|
-
"function": {
|
|
170
|
-
"name": "nonexistent_weather_api",
|
|
171
|
-
"arguments": '{"city": "London"}'
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
]
|
|
175
|
-
},
|
|
176
|
-
# Scenario 3: Invalid JSON in arguments
|
|
177
|
-
{
|
|
178
|
-
"role": "assistant",
|
|
179
|
-
"content": None,
|
|
180
|
-
"tool_calls": [
|
|
181
|
-
{
|
|
182
|
-
"id": "call_2",
|
|
183
|
-
"type": "function",
|
|
184
|
-
"function": {
|
|
185
|
-
"name": "weather",
|
|
186
|
-
"arguments": '{"location": London}' # Missing quotes - invalid JSON
|
|
187
|
-
}
|
|
188
|
-
}
|
|
189
|
-
]
|
|
190
|
-
},
|
|
191
|
-
# Scenario 4: Finally cooperates correctly
|
|
192
|
-
{
|
|
193
|
-
"role": "assistant",
|
|
194
|
-
"content": None,
|
|
195
|
-
"tool_calls": [
|
|
196
|
-
{
|
|
197
|
-
"id": "call_3",
|
|
198
|
-
"type": "function",
|
|
199
|
-
"function": {
|
|
200
|
-
"name": "weather",
|
|
201
|
-
"arguments": '{"location": "London"}'
|
|
202
|
-
}
|
|
203
|
-
}
|
|
204
|
-
]
|
|
205
|
-
}
|
|
206
|
-
]
|
|
207
|
-
|
|
208
|
-
async def chat_completion(self, messages: List[Dict], **kwargs) -> Dict:
|
|
209
|
-
"""Simulate OpenAI chat completion with unreliable behavior."""
|
|
210
|
-
self.call_count += 1
|
|
211
|
-
|
|
212
|
-
if self.call_count <= len(self.scenarios):
|
|
213
|
-
response = self.scenarios[self.call_count - 1]
|
|
214
|
-
print(f" 📞 LLM Call {self.call_count}: {self._describe_response(response)}")
|
|
215
|
-
return response
|
|
216
|
-
else:
|
|
217
|
-
# After all scenarios, always cooperate
|
|
218
|
-
return self.scenarios[-1]
|
|
219
|
-
|
|
220
|
-
def _describe_response(self, response: Dict) -> str:
|
|
221
|
-
"""Describe what the LLM response contains."""
|
|
222
|
-
if response.get("tool_calls"):
|
|
223
|
-
tool_calls = response["tool_calls"]
|
|
224
|
-
if len(tool_calls) == 1:
|
|
225
|
-
func_name = tool_calls[0].get("function", {}).get("name", "unknown")
|
|
226
|
-
return f"Wants to call '{func_name}'"
|
|
227
|
-
else:
|
|
228
|
-
return f"Wants to call {len(tool_calls)} tools"
|
|
229
|
-
elif response.get("content"):
|
|
230
|
-
return f"Text response: '{response['content'][:50]}...'"
|
|
231
|
-
else:
|
|
232
|
-
return "Empty response"
|
|
233
|
-
|
|
234
|
-
##############################################################################
|
|
235
|
-
# Retry Logic for LLM Cooperation
|
|
236
|
-
##############################################################################
|
|
237
|
-
|
|
238
|
-
class LLMRetryManager:
|
|
239
|
-
"""Manages retrying LLM calls until they produce valid, executable tool calls."""
|
|
240
|
-
|
|
241
|
-
def __init__(self, session_id: str, max_attempts: int = 5):
|
|
242
|
-
self.session_id = session_id
|
|
243
|
-
self.max_attempts = max_attempts
|
|
244
|
-
|
|
245
|
-
async def get_valid_tool_calls(self, llm, messages: List[Dict], processor: CustomSessionAwareToolProcessor) -> tuple[Dict, List]:
|
|
246
|
-
"""
|
|
247
|
-
Keep calling the LLM until it produces valid, executable tool calls.
|
|
248
|
-
|
|
249
|
-
Returns:
|
|
250
|
-
Tuple of (successful_llm_response, tool_results)
|
|
251
|
-
"""
|
|
252
|
-
backend = get_backend()
|
|
253
|
-
store = ChukSessionsStore(backend)
|
|
254
|
-
session = await store.get(self.session_id)
|
|
255
|
-
|
|
256
|
-
for attempt in range(1, self.max_attempts + 1):
|
|
257
|
-
print(f"\n🔄 LLM Attempt {attempt}/{self.max_attempts}")
|
|
258
|
-
|
|
259
|
-
# Call LLM
|
|
260
|
-
response = await llm.chat_completion(messages)
|
|
261
|
-
|
|
262
|
-
# Log the LLM response attempt
|
|
263
|
-
attempt_event = SessionEvent(
|
|
264
|
-
message={
|
|
265
|
-
"attempt": attempt,
|
|
266
|
-
"response": response,
|
|
267
|
-
"success": False # Will update if successful
|
|
268
|
-
},
|
|
269
|
-
type=EventType.MESSAGE,
|
|
270
|
-
source=EventSource.LLM,
|
|
271
|
-
)
|
|
272
|
-
await session.add_event_and_save(attempt_event)
|
|
273
|
-
|
|
274
|
-
# Check if response has tool calls
|
|
275
|
-
tool_calls = response.get("tool_calls", [])
|
|
276
|
-
if not tool_calls:
|
|
277
|
-
print(f" {format_status(False, failure_msg='No tool calls in response')}")
|
|
278
|
-
continue
|
|
279
|
-
|
|
280
|
-
# Try to execute the tool calls
|
|
281
|
-
try:
|
|
282
|
-
print(f" 🔧 Attempting to execute {len(tool_calls)} tool calls...")
|
|
283
|
-
|
|
284
|
-
# Check what tools are available vs requested
|
|
285
|
-
registry = await get_default_registry()
|
|
286
|
-
tools_list = await registry.list_tools()
|
|
287
|
-
available_tools = [name for namespace, name in tools_list]
|
|
288
|
-
requested_tool = tool_calls[0].get("function", {}).get("name", "unknown")
|
|
289
|
-
print(f" 🔍 Requested tool: {requested_tool}")
|
|
290
|
-
print(f" 🔍 Available tools: {available_tools}")
|
|
291
|
-
|
|
292
|
-
tool_results = await processor.process_llm_message(response)
|
|
293
|
-
|
|
294
|
-
# Check if all tools executed successfully
|
|
295
|
-
failed_tools = [r for r in tool_results if r.error]
|
|
296
|
-
if failed_tools:
|
|
297
|
-
print(f" {format_status(False, failure_msg=f'{len(failed_tools)} tools failed:')}")
|
|
298
|
-
for failed in failed_tools:
|
|
299
|
-
print(f" • {failed.tool}: {failed.error}")
|
|
300
|
-
continue
|
|
301
|
-
|
|
302
|
-
# Success! All tools executed
|
|
303
|
-
print(f" {format_status(True, success_msg=f'All {len(tool_results)} tools executed successfully')}")
|
|
304
|
-
|
|
305
|
-
# Update the last event to mark success
|
|
306
|
-
session = await store.get(self.session_id)
|
|
307
|
-
if session.events:
|
|
308
|
-
# Find the most recent LLM attempt event
|
|
309
|
-
for event in reversed(session.events):
|
|
310
|
-
if (event.type == EventType.MESSAGE and
|
|
311
|
-
event.source == EventSource.LLM and
|
|
312
|
-
isinstance(event.message, dict) and
|
|
313
|
-
"attempt" in event.message):
|
|
314
|
-
event.message["success"] = True
|
|
315
|
-
await store.save(session)
|
|
316
|
-
break
|
|
317
|
-
|
|
318
|
-
return response, tool_results
|
|
319
|
-
|
|
320
|
-
except Exception as e:
|
|
321
|
-
print(f" {format_status(False, failure_msg=f'Tool execution failed: {e}')}")
|
|
322
|
-
continue
|
|
323
|
-
|
|
324
|
-
# If we get here, all attempts failed
|
|
325
|
-
raise RuntimeError(f"Failed to get valid tool calls after {self.max_attempts} attempts")
|
|
326
|
-
|
|
327
|
-
##############################################################################
|
|
328
|
-
# Demo Flow
|
|
329
|
-
##############################################################################
|
|
330
|
-
|
|
331
|
-
async def main() -> None:
|
|
332
|
-
print("🚀 Starting LLM Retry Demo")
|
|
333
|
-
print(" (Demonstrates retry logic for uncooperative LLMs)")
|
|
334
|
-
print(" (Tool execution uses chuk_tool_processor's built-in reliability)")
|
|
335
|
-
|
|
336
|
-
# Setup session storage - FIXED
|
|
337
|
-
setup_chuk_sessions_storage(sandbox_id="retry-prompt-demo", default_ttl_hours=1)
|
|
338
|
-
backend = get_backend()
|
|
339
|
-
store = ChukSessionsStore(backend)
|
|
340
|
-
|
|
341
|
-
# Initialize tool registry first
|
|
342
|
-
print("\n🔧 Initializing tool registry...")
|
|
343
|
-
registry = await initialize()
|
|
344
|
-
tools_list = await registry.list_tools()
|
|
345
|
-
print(f"📋 Found {len(tools_list)} registered tools:")
|
|
346
|
-
for namespace, tool_name in tools_list:
|
|
347
|
-
print(f" • {namespace}.{tool_name}")
|
|
348
|
-
|
|
349
|
-
# Create session
|
|
350
|
-
session = await Session.create()
|
|
351
|
-
await session.metadata.set_property("demo", "retry_prompt")
|
|
352
|
-
await store.save(session)
|
|
353
|
-
|
|
354
|
-
# Add user request
|
|
355
|
-
user_prompt = "What's the weather like in London? I need to know if I should bring an umbrella."
|
|
356
|
-
user_event = await SessionEvent.create_with_tokens(
|
|
357
|
-
message=user_prompt,
|
|
358
|
-
prompt=user_prompt,
|
|
359
|
-
model="gpt-4o-mini",
|
|
360
|
-
source=EventSource.USER,
|
|
361
|
-
type=EventType.MESSAGE
|
|
362
|
-
)
|
|
363
|
-
await session.add_event_and_save(user_event)
|
|
364
|
-
print(f"\n👤 User: {user_prompt}")
|
|
365
|
-
|
|
366
|
-
# Create components
|
|
367
|
-
llm = UnreliableLLM()
|
|
368
|
-
processor = await CustomSessionAwareToolProcessor.create(session_id=session.id)
|
|
369
|
-
retry_manager = LLMRetryManager(session_id=session.id, max_attempts=6)
|
|
370
|
-
|
|
371
|
-
# Build initial messages for LLM
|
|
372
|
-
messages = [
|
|
373
|
-
{"role": "system", "content": "You are a helpful assistant. When users ask about weather, use the weather tool to get current information."},
|
|
374
|
-
{"role": "user", "content": user_prompt}
|
|
375
|
-
]
|
|
376
|
-
|
|
377
|
-
# Attempt to get valid tool calls with retries
|
|
378
|
-
try:
|
|
379
|
-
print(f"\n🎯 Attempting to get valid tool calls (max {retry_manager.max_attempts} attempts)...")
|
|
380
|
-
final_response, tool_results = await retry_manager.get_valid_tool_calls(llm, messages, processor)
|
|
381
|
-
|
|
382
|
-
print(f"\n{'='*60}")
|
|
383
|
-
print("🎉 SUCCESS! LLM cooperated and tools executed successfully")
|
|
384
|
-
print(f"{'='*60}")
|
|
385
|
-
|
|
386
|
-
# Show tool results
|
|
387
|
-
print("\n🛠️ Tool Results:")
|
|
388
|
-
for i, result in enumerate(tool_results, 1):
|
|
389
|
-
print(f"\n Tool {i}: {result.tool}")
|
|
390
|
-
if result.error:
|
|
391
|
-
print(f" ❌ Error: {result.error}")
|
|
392
|
-
elif isinstance(result.result, dict):
|
|
393
|
-
print(f" 📊 Result:")
|
|
394
|
-
for key, value in result.result.items():
|
|
395
|
-
print(f" {key}: {value}")
|
|
396
|
-
else:
|
|
397
|
-
print(f" 📊 Result: {result.result}")
|
|
398
|
-
|
|
399
|
-
except RuntimeError as e:
|
|
400
|
-
print(f"\n❌ FAILED: {e}")
|
|
401
|
-
|
|
402
|
-
# Still show the session events for debugging
|
|
403
|
-
print("\n🔍 Debugging: Session events created:")
|
|
404
|
-
session = await store.get(session.id)
|
|
405
|
-
for i, event in enumerate(session.events, 1):
|
|
406
|
-
print(f" {i}. {event.type.value}/{event.source.value}: {str(event.message)[:100]}...")
|
|
407
|
-
return
|
|
408
|
-
|
|
409
|
-
# Show session event tree
|
|
410
|
-
session = await store.get(session.id)
|
|
411
|
-
print(f"\n{'='*60}")
|
|
412
|
-
print("📊 Session Event Tree (Complete Retry History):")
|
|
413
|
-
print(f"{'='*60}")
|
|
414
|
-
|
|
415
|
-
for i, event in enumerate(session.events, 1):
|
|
416
|
-
event_id = event.id[:8] + "..."
|
|
417
|
-
if event.type == EventType.MESSAGE and event.source == EventSource.USER:
|
|
418
|
-
print(f"{i}. USER MESSAGE [{event_id}]")
|
|
419
|
-
print(f" Content: {event.message}")
|
|
420
|
-
elif event.type == EventType.MESSAGE and event.source == EventSource.LLM:
|
|
421
|
-
if isinstance(event.message, dict) and "attempt" in event.message:
|
|
422
|
-
attempt = event.message["attempt"]
|
|
423
|
-
success = event.message.get("success", False)
|
|
424
|
-
status = "✅ SUCCESS" if success else "❌ FAILED"
|
|
425
|
-
print(f"{i}. LLM ATTEMPT {attempt} [{event_id}] - {status}")
|
|
426
|
-
else:
|
|
427
|
-
print(f"{i}. LLM MESSAGE [{event_id}]")
|
|
428
|
-
elif event.type == EventType.TOOL_CALL:
|
|
429
|
-
tool_msg = event.message or {}
|
|
430
|
-
tool_name = tool_msg.get("tool", "unknown")
|
|
431
|
-
error = tool_msg.get("error")
|
|
432
|
-
print(f"{i}. TOOL CALL [{event_id}] - {tool_name}")
|
|
433
|
-
if error:
|
|
434
|
-
print(f" ❌ Error: {error}")
|
|
435
|
-
else:
|
|
436
|
-
print(f" ✅ Success")
|
|
437
|
-
|
|
438
|
-
# Show final prompt for next turn
|
|
439
|
-
print(f"\n{'='*60}")
|
|
440
|
-
print("🔄 Final Prompt for Next LLM Turn:")
|
|
441
|
-
print(f"{'='*60}")
|
|
442
|
-
next_prompt = await build_prompt_from_session(session)
|
|
443
|
-
pprint.pp(next_prompt, width=80)
|
|
444
|
-
|
|
445
|
-
# Show session statistics
|
|
446
|
-
print(f"\n{'='*60}")
|
|
447
|
-
print("📈 Session Statistics:")
|
|
448
|
-
print(f"{'='*60}")
|
|
449
|
-
print(f" Session ID: {session.id}")
|
|
450
|
-
print(f" Total events: {len(session.events)}")
|
|
451
|
-
print(f" Total tokens: {session.total_tokens}")
|
|
452
|
-
print(f" Estimated cost: ${session.total_cost:.6f}")
|
|
453
|
-
|
|
454
|
-
# Event breakdown
|
|
455
|
-
event_types = {}
|
|
456
|
-
for event in session.events:
|
|
457
|
-
event_type = f"{event.source.value}:{event.type.value}"
|
|
458
|
-
event_types[event_type] = event_types.get(event_type, 0) + 1
|
|
459
|
-
|
|
460
|
-
print(f" Event breakdown:")
|
|
461
|
-
for event_type, count in event_types.items():
|
|
462
|
-
print(f" {event_type}: {count}")
|
|
463
|
-
|
|
464
|
-
print(f"\n{'='*60}")
|
|
465
|
-
print("🎯 Key Takeaways:")
|
|
466
|
-
print(" • LLM retries handled at application level")
|
|
467
|
-
print(" • Tool execution reliability handled by chuk_tool_processor")
|
|
468
|
-
print(" • Complete audit trail in session events")
|
|
469
|
-
print(" • Separation of concerns: LLM cooperation vs tool reliability")
|
|
470
|
-
print(" • Session tracks all attempts for debugging and analytics")
|
|
471
|
-
print(f"{'='*60}")
|
|
472
|
-
|
|
473
|
-
if __name__ == "__main__":
|
|
474
|
-
asyncio.run(main())
|
|
File without changes
|
{chuk_ai_session_manager-0.3.dist-info → chuk_ai_session_manager-0.4.dist-info}/top_level.txt
RENAMED
|
File without changes
|