chuk-ai-session-manager 0.5__py3-none-any.whl → 0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +73 -3
- chuk_ai_session_manager-0.7.dist-info/METADATA +262 -0
- {chuk_ai_session_manager-0.5.dist-info → chuk_ai_session_manager-0.7.dist-info}/RECORD +5 -5
- chuk_ai_session_manager-0.5.dist-info/METADATA +0 -896
- {chuk_ai_session_manager-0.5.dist-info → chuk_ai_session_manager-0.7.dist-info}/WHEEL +0 -0
- {chuk_ai_session_manager-0.5.dist-info → chuk_ai_session_manager-0.7.dist-info}/top_level.txt +0 -0
|
@@ -28,12 +28,25 @@ Infinite Context Example:
|
|
|
28
28
|
await sm.user_says("Tell me about machine learning")
|
|
29
29
|
await sm.ai_responds("Machine learning is...", model="gpt-4")
|
|
30
30
|
# Session will auto-segment when limits are reached
|
|
31
|
+
|
|
32
|
+
Storage Configuration:
|
|
33
|
+
# Default: Memory storage (no Redis required)
|
|
34
|
+
pip install chuk-ai-session-manager
|
|
35
|
+
|
|
36
|
+
# Redis: For production persistence
|
|
37
|
+
pip install chuk-ai-session-manager[redis]
|
|
38
|
+
export SESSION_PROVIDER=redis
|
|
39
|
+
export SESSION_REDIS_URL=redis://localhost:6379/0
|
|
40
|
+
|
|
41
|
+
# Environment variables:
|
|
42
|
+
SESSION_PROVIDER=memory (default - fast, no persistence)
|
|
43
|
+
SESSION_PROVIDER=redis (persistent - requires [redis] extra)
|
|
31
44
|
"""
|
|
32
45
|
|
|
33
46
|
import logging
|
|
34
47
|
|
|
35
48
|
# Package version
|
|
36
|
-
__version__ = "0.
|
|
49
|
+
__version__ = "0.5"
|
|
37
50
|
|
|
38
51
|
# Set up package-level logger
|
|
39
52
|
logger = logging.getLogger(__name__)
|
|
@@ -103,6 +116,11 @@ def configure_storage(sandbox_id: str = "chuk-ai-session-manager",
|
|
|
103
116
|
|
|
104
117
|
Returns:
|
|
105
118
|
True if configuration was successful, False otherwise
|
|
119
|
+
|
|
120
|
+
Note:
|
|
121
|
+
Storage provider is controlled by SESSION_PROVIDER environment variable:
|
|
122
|
+
- memory (default): Fast, no persistence, no extra dependencies
|
|
123
|
+
- redis: Persistent, requires pip install chuk-ai-session-manager[redis]
|
|
106
124
|
"""
|
|
107
125
|
try:
|
|
108
126
|
setup_chuk_sessions_storage(
|
|
@@ -128,6 +146,22 @@ def is_available() -> dict:
|
|
|
128
146
|
Returns:
|
|
129
147
|
Dictionary showing availability of each component
|
|
130
148
|
"""
|
|
149
|
+
# Check if Redis is available
|
|
150
|
+
redis_available = False
|
|
151
|
+
try:
|
|
152
|
+
import redis
|
|
153
|
+
redis_available = True
|
|
154
|
+
except ImportError:
|
|
155
|
+
pass
|
|
156
|
+
|
|
157
|
+
# Check if tiktoken is available for enhanced token counting
|
|
158
|
+
tiktoken_available = False
|
|
159
|
+
try:
|
|
160
|
+
import tiktoken
|
|
161
|
+
tiktoken_available = True
|
|
162
|
+
except ImportError:
|
|
163
|
+
pass
|
|
164
|
+
|
|
131
165
|
return {
|
|
132
166
|
"core_enums": True,
|
|
133
167
|
"core_models": True,
|
|
@@ -139,9 +173,39 @@ def is_available() -> dict:
|
|
|
139
173
|
"token_tracking": True,
|
|
140
174
|
"exceptions": True,
|
|
141
175
|
"session_manager": True,
|
|
176
|
+
"redis_support": redis_available,
|
|
177
|
+
"enhanced_token_counting": tiktoken_available,
|
|
142
178
|
}
|
|
143
179
|
|
|
144
180
|
|
|
181
|
+
def get_storage_info() -> dict:
|
|
182
|
+
"""
|
|
183
|
+
Get information about the current storage configuration.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
Dictionary with storage configuration details
|
|
187
|
+
"""
|
|
188
|
+
import os
|
|
189
|
+
from chuk_ai_session_manager.session_storage import get_backend
|
|
190
|
+
|
|
191
|
+
try:
|
|
192
|
+
backend = get_backend()
|
|
193
|
+
stats = backend.get_stats()
|
|
194
|
+
|
|
195
|
+
return {
|
|
196
|
+
"provider": os.getenv("SESSION_PROVIDER", "memory"),
|
|
197
|
+
"backend": stats.get("backend", "unknown"),
|
|
198
|
+
"sandbox_id": stats.get("sandbox_id", "unknown"),
|
|
199
|
+
"redis_url": os.getenv("SESSION_REDIS_URL", "not_set"),
|
|
200
|
+
"stats": stats
|
|
201
|
+
}
|
|
202
|
+
except Exception as e:
|
|
203
|
+
return {
|
|
204
|
+
"provider": os.getenv("SESSION_PROVIDER", "memory"),
|
|
205
|
+
"error": str(e)
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
|
|
145
209
|
# Main exports - everything should be available
|
|
146
210
|
__all__ = [
|
|
147
211
|
# Version and utilities
|
|
@@ -149,6 +213,7 @@ __all__ = [
|
|
|
149
213
|
"get_version",
|
|
150
214
|
"is_available",
|
|
151
215
|
"configure_storage",
|
|
216
|
+
"get_storage_info",
|
|
152
217
|
|
|
153
218
|
# Core enums
|
|
154
219
|
"EventSource",
|
|
@@ -201,5 +266,10 @@ try:
|
|
|
201
266
|
except Exception as e:
|
|
202
267
|
logger.debug(f"Auto-setup skipped: {e}")
|
|
203
268
|
|
|
204
|
-
# Log successful import
|
|
205
|
-
|
|
269
|
+
# Log successful import with storage info
|
|
270
|
+
try:
|
|
271
|
+
storage_info = get_storage_info()
|
|
272
|
+
provider = storage_info.get("provider", "unknown")
|
|
273
|
+
logger.debug(f"CHUK AI Session Manager v{__version__} imported successfully (storage: {provider})")
|
|
274
|
+
except Exception:
|
|
275
|
+
logger.debug(f"CHUK AI Session Manager v{__version__} imported successfully")
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: chuk-ai-session-manager
|
|
3
|
+
Version: 0.7
|
|
4
|
+
Summary: Session manager for AI applications
|
|
5
|
+
Requires-Python: >=3.11
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
Requires-Dist: chuk-sessions>=0.4.1
|
|
8
|
+
Requires-Dist: chuk-tool-processor>=0.4.1
|
|
9
|
+
Requires-Dist: pydantic>=2.11.3
|
|
10
|
+
Provides-Extra: redis
|
|
11
|
+
Requires-Dist: chuk-sessions[redis]>=0.4.1; extra == "redis"
|
|
12
|
+
Requires-Dist: redis>=4.0.0; extra == "redis"
|
|
13
|
+
Provides-Extra: tiktoken
|
|
14
|
+
Requires-Dist: tiktoken>=0.9.0; extra == "tiktoken"
|
|
15
|
+
Provides-Extra: dev
|
|
16
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
17
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
18
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
19
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
20
|
+
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
21
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
22
|
+
Provides-Extra: all
|
|
23
|
+
Requires-Dist: chuk-sessions[redis]>=0.4; extra == "all"
|
|
24
|
+
Requires-Dist: redis>=4.0.0; extra == "all"
|
|
25
|
+
Requires-Dist: tiktoken>=0.9.0; extra == "all"
|
|
26
|
+
|
|
27
|
+
# CHUK AI Session Manager
|
|
28
|
+
|
|
29
|
+
**A powerful session management system for AI applications**
|
|
30
|
+
|
|
31
|
+
[](https://www.python.org/downloads/)
|
|
32
|
+
[](https://opensource.org/licenses/MIT)
|
|
33
|
+
|
|
34
|
+
Automatic conversation tracking, token usage monitoring, tool call logging, infinite context support with automatic summarization, and hierarchical session relationships. Perfect for AI applications that need reliable session management.
|
|
35
|
+
|
|
36
|
+
## 🚀 Quick Start
|
|
37
|
+
|
|
38
|
+
### Installation Options
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
# Basic installation (memory storage only)
|
|
42
|
+
pip install chuk-ai-session-manager
|
|
43
|
+
|
|
44
|
+
# With Redis support for production
|
|
45
|
+
pip install chuk-ai-session-manager[redis]
|
|
46
|
+
|
|
47
|
+
# With enhanced token counting
|
|
48
|
+
pip install chuk-ai-session-manager[tiktoken]
|
|
49
|
+
|
|
50
|
+
# Full installation with all optional features
|
|
51
|
+
pip install chuk-ai-session-manager[all]
|
|
52
|
+
|
|
53
|
+
# Development installation
|
|
54
|
+
pip install chuk-ai-session-manager[dev]
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Quick Example
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from chuk_ai_session_manager import track_conversation
|
|
61
|
+
|
|
62
|
+
# Track any conversation automatically
|
|
63
|
+
session_id = await track_conversation(
|
|
64
|
+
user_message="What's the weather like?",
|
|
65
|
+
ai_response="I don't have access to real-time weather data.",
|
|
66
|
+
model="gpt-3.5-turbo",
|
|
67
|
+
provider="openai"
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
print(f"Conversation tracked in session: {session_id}")
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
That's it! Zero configuration required.
|
|
74
|
+
|
|
75
|
+
## ⚡ Major Features
|
|
76
|
+
|
|
77
|
+
### 🎯 **Zero-Configuration Tracking**
|
|
78
|
+
```python
|
|
79
|
+
from chuk_ai_session_manager import SessionManager
|
|
80
|
+
|
|
81
|
+
# Just start using it
|
|
82
|
+
sm = SessionManager()
|
|
83
|
+
await sm.user_says("Hello!")
|
|
84
|
+
await sm.ai_responds("Hi there!", model="gpt-4")
|
|
85
|
+
|
|
86
|
+
# Get stats instantly
|
|
87
|
+
stats = await sm.get_stats()
|
|
88
|
+
print(f"Tokens: {stats['total_tokens']}, Cost: ${stats['estimated_cost']:.4f}")
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### 🔄 **Infinite Context**
|
|
92
|
+
```python
|
|
93
|
+
# Automatically handles conversations longer than token limits
|
|
94
|
+
sm = SessionManager(infinite_context=True, token_threshold=4000)
|
|
95
|
+
await sm.user_says("Tell me about the history of computing...")
|
|
96
|
+
await sm.ai_responds("Computing history begins with...", model="gpt-4")
|
|
97
|
+
# Session will auto-segment when limits are reached
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### ⚙️ **Storage Backends**
|
|
101
|
+
|
|
102
|
+
| Installation | Storage | Use Case | Performance |
|
|
103
|
+
|-------------|---------|----------|-------------|
|
|
104
|
+
| `pip install chuk-ai-session-manager` | Memory | Development, testing | 1.8M ops/sec |
|
|
105
|
+
| `pip install chuk-ai-session-manager[redis]` | Redis | Production, persistence | 20K ops/sec |
|
|
106
|
+
|
|
107
|
+
### 🛠️ **Tool Integration**
|
|
108
|
+
```python
|
|
109
|
+
# Automatic tool call tracking
|
|
110
|
+
await sm.tool_used(
|
|
111
|
+
tool_name="calculator",
|
|
112
|
+
arguments={"operation": "add", "a": 5, "b": 3},
|
|
113
|
+
result={"result": 8}
|
|
114
|
+
)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## 💡 Common Use Cases
|
|
118
|
+
|
|
119
|
+
### Web App Conversation Tracking
|
|
120
|
+
```python
|
|
121
|
+
from chuk_ai_session_manager import track_conversation
|
|
122
|
+
|
|
123
|
+
# In your chat endpoint
|
|
124
|
+
session_id = await track_conversation(
|
|
125
|
+
user_message=request.message,
|
|
126
|
+
ai_response=ai_response,
|
|
127
|
+
model="gpt-4",
|
|
128
|
+
provider="openai",
|
|
129
|
+
session_id=request.session_id # Continue existing conversation
|
|
130
|
+
)
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### LLM Wrapper with Automatic Tracking
|
|
134
|
+
```python
|
|
135
|
+
from chuk_ai_session_manager import track_llm_call
|
|
136
|
+
import openai
|
|
137
|
+
|
|
138
|
+
async def my_openai_call(prompt):
|
|
139
|
+
response = await openai.chat.completions.create(
|
|
140
|
+
model="gpt-3.5-turbo",
|
|
141
|
+
messages=[{"role": "user", "content": prompt}]
|
|
142
|
+
)
|
|
143
|
+
return response.choices[0].message.content
|
|
144
|
+
|
|
145
|
+
# Automatically tracked
|
|
146
|
+
response, session_id = await track_llm_call(
|
|
147
|
+
user_input="Explain machine learning",
|
|
148
|
+
llm_function=my_openai_call,
|
|
149
|
+
model="gpt-3.5-turbo",
|
|
150
|
+
provider="openai"
|
|
151
|
+
)
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
### Long Conversations with Auto-Segmentation
|
|
155
|
+
```python
|
|
156
|
+
from chuk_ai_session_manager import track_infinite_conversation
|
|
157
|
+
|
|
158
|
+
# Start a conversation
|
|
159
|
+
session_id = await track_infinite_conversation(
|
|
160
|
+
user_message="Tell me about the history of computing",
|
|
161
|
+
ai_response="Computing history begins with ancient calculating devices...",
|
|
162
|
+
model="gpt-4",
|
|
163
|
+
token_threshold=4000 # Auto-segment after 4000 tokens
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# Continue the conversation - will auto-segment if needed
|
|
167
|
+
session_id = await track_infinite_conversation(
|
|
168
|
+
user_message="What about quantum computers?",
|
|
169
|
+
ai_response="Quantum computing represents a fundamental shift...",
|
|
170
|
+
session_id=session_id,
|
|
171
|
+
model="gpt-4"
|
|
172
|
+
)
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
## 🔧 Configuration
|
|
176
|
+
|
|
177
|
+
### Storage Configuration
|
|
178
|
+
|
|
179
|
+
```bash
|
|
180
|
+
# Memory provider (default) - fast, no persistence
|
|
181
|
+
export SESSION_PROVIDER=memory
|
|
182
|
+
|
|
183
|
+
# Redis provider - persistent, production-ready (requires redis extra)
|
|
184
|
+
export SESSION_PROVIDER=redis
|
|
185
|
+
export SESSION_REDIS_URL=redis://localhost:6379/0
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
### Installation Matrix
|
|
189
|
+
|
|
190
|
+
| Command | Memory | Redis | Token Counting | Use Case |
|
|
191
|
+
|---------|--------|-------|----------------|----------|
|
|
192
|
+
| `pip install chuk-ai-session-manager` | ✅ | ❌ | Basic | Development |
|
|
193
|
+
| `pip install chuk-ai-session-manager[redis]` | ✅ | ✅ | Basic | Production |
|
|
194
|
+
| `pip install chuk-ai-session-manager[tiktoken]` | ✅ | ❌ | Enhanced | Better accuracy |
|
|
195
|
+
| `pip install chuk-ai-session-manager[all]` | ✅ | ✅ | Enhanced | Full features |
|
|
196
|
+
|
|
197
|
+
## 📊 Monitoring & Analytics
|
|
198
|
+
|
|
199
|
+
```python
|
|
200
|
+
# Get comprehensive session analytics
|
|
201
|
+
stats = await sm.get_stats(include_all_segments=True)
|
|
202
|
+
|
|
203
|
+
print(f"""
|
|
204
|
+
🚀 Session Analytics Dashboard
|
|
205
|
+
============================
|
|
206
|
+
Session ID: {stats['session_id']}
|
|
207
|
+
Total Messages: {stats['total_messages']}
|
|
208
|
+
User Messages: {stats['user_messages']}
|
|
209
|
+
AI Messages: {stats['ai_messages']}
|
|
210
|
+
Tool Calls: {stats['tool_calls']}
|
|
211
|
+
Total Tokens: {stats['total_tokens']}
|
|
212
|
+
Total Cost: ${stats['estimated_cost']:.6f}
|
|
213
|
+
Session Segments: {stats.get('session_segments', 1)}
|
|
214
|
+
""")
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
## 🏗️ Why CHUK AI Session Manager?
|
|
218
|
+
|
|
219
|
+
- **Zero Configuration**: Start tracking conversations in 3 lines of code
|
|
220
|
+
- **Infinite Context**: Never worry about token limits again
|
|
221
|
+
- **Universal**: Works with any LLM provider (OpenAI, Anthropic, etc.)
|
|
222
|
+
- **Production Ready**: Built-in persistence, monitoring, and error handling
|
|
223
|
+
- **Token Aware**: Automatic cost tracking across all providers
|
|
224
|
+
- **Tool Friendly**: Seamless tool call logging and retry mechanisms
|
|
225
|
+
|
|
226
|
+
## 🛡️ Error Handling
|
|
227
|
+
|
|
228
|
+
```python
|
|
229
|
+
from chuk_ai_session_manager import (
|
|
230
|
+
SessionManagerError,
|
|
231
|
+
SessionNotFound,
|
|
232
|
+
TokenLimitExceeded
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
session_id = await track_conversation("Hello", "Hi there")
|
|
237
|
+
except SessionNotFound as e:
|
|
238
|
+
print(f"Session not found: {e}")
|
|
239
|
+
except TokenLimitExceeded as e:
|
|
240
|
+
print(f"Token limit exceeded: {e}")
|
|
241
|
+
except SessionManagerError as e:
|
|
242
|
+
print(f"General session error: {e}")
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
## 🔄 Dependencies
|
|
246
|
+
|
|
247
|
+
- **Required**: `chuk-sessions` (session storage), `pydantic` (data models), `chuk-tool-processor` (tool integration)
|
|
248
|
+
- **Optional**: `redis` (Redis storage), `tiktoken` (accurate token counting)
|
|
249
|
+
|
|
250
|
+
## 📄 License
|
|
251
|
+
|
|
252
|
+
MIT License - build amazing AI applications with confidence!
|
|
253
|
+
|
|
254
|
+
---
|
|
255
|
+
|
|
256
|
+
**Ready to build better AI applications?**
|
|
257
|
+
|
|
258
|
+
```bash
|
|
259
|
+
pip install chuk-ai-session-manager
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
**Start tracking conversations in 30 seconds!**
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
chuk_ai_session_manager/__init__.py,sha256=
|
|
1
|
+
chuk_ai_session_manager/__init__.py,sha256=r24MtKySdzvUgK8psNHYHiTRzOUAEXPOBmaNg2cjyFw,7882
|
|
2
2
|
chuk_ai_session_manager/exceptions.py,sha256=WqrrUZuOAiUmz7tKnSnk0y222U_nV9a8LyaXLayn2fg,4420
|
|
3
3
|
chuk_ai_session_manager/infinite_conversation.py,sha256=7j3caMnsX27M5rjj4oOkqiy_2AfcupWwsAWRflnKiSo,12092
|
|
4
4
|
chuk_ai_session_manager/sample_tools.py,sha256=U-jTGveTJ95uSnA4jB30fJQJG3K-TGxN9jcOY6qVHZQ,8179
|
|
@@ -16,7 +16,7 @@ chuk_ai_session_manager/models/session_event.py,sha256=RTghC9_sDHzD8qdgEYCoclJzp
|
|
|
16
16
|
chuk_ai_session_manager/models/session_metadata.py,sha256=KFG7lc_E0BQTP2OD9Y529elVGJXppDUMqz8vVONW0rw,1510
|
|
17
17
|
chuk_ai_session_manager/models/session_run.py,sha256=uhMM4-WSrqOUsiWQPnyakInd-foZhxI-YnSHSWiZZwE,4369
|
|
18
18
|
chuk_ai_session_manager/models/token_usage.py,sha256=M9Qwmeb2woILaSRwA2SIAiG-sIwC3cL_1H-y3NjW5Ik,11436
|
|
19
|
-
chuk_ai_session_manager-0.
|
|
20
|
-
chuk_ai_session_manager-0.
|
|
21
|
-
chuk_ai_session_manager-0.
|
|
22
|
-
chuk_ai_session_manager-0.
|
|
19
|
+
chuk_ai_session_manager-0.7.dist-info/METADATA,sha256=dyL99WC_86DIPfkjLr-q4Dw2kKi-J7olpyaOfyFEsgM,7910
|
|
20
|
+
chuk_ai_session_manager-0.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
21
|
+
chuk_ai_session_manager-0.7.dist-info/top_level.txt,sha256=5RinqD0v-niHuLYePUREX4gEWTlrpgtUg0RfexVRBMk,24
|
|
22
|
+
chuk_ai_session_manager-0.7.dist-info/RECORD,,
|
|
@@ -1,896 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: chuk-ai-session-manager
|
|
3
|
-
Version: 0.5
|
|
4
|
-
Summary: Session manager for AI applications
|
|
5
|
-
Requires-Python: >=3.11
|
|
6
|
-
Description-Content-Type: text/markdown
|
|
7
|
-
Requires-Dist: chuk-sessions>=0.3
|
|
8
|
-
Requires-Dist: chuk-tool-processor>=0.4.1
|
|
9
|
-
Requires-Dist: pydantic>=2.11.3
|
|
10
|
-
Provides-Extra: tiktoken
|
|
11
|
-
Requires-Dist: tiktoken>=0.9.0; extra == "tiktoken"
|
|
12
|
-
Provides-Extra: redis
|
|
13
|
-
Requires-Dist: redis>=4.0.0; extra == "redis"
|
|
14
|
-
Provides-Extra: dev
|
|
15
|
-
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
16
|
-
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
17
|
-
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
18
|
-
Requires-Dist: redis>=4.0.0; extra == "dev"
|
|
19
|
-
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
20
|
-
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
21
|
-
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
22
|
-
Provides-Extra: full
|
|
23
|
-
|
|
24
|
-
# CHUK AI Session Manager Documentation
|
|
25
|
-
|
|
26
|
-
A powerful session management system for AI applications that provides automatic conversation tracking, token usage monitoring, tool call logging, infinite context support, and hierarchical session relationships.
|
|
27
|
-
|
|
28
|
-
## Table of Contents
|
|
29
|
-
|
|
30
|
-
1. [Overview](#overview)
|
|
31
|
-
2. [Import Issues & Fixes](#import-issues--fixes)
|
|
32
|
-
3. [Core Architecture](#core-architecture)
|
|
33
|
-
4. [Quick Start](#quick-start)
|
|
34
|
-
5. [Simple API](#simple-api)
|
|
35
|
-
6. [Advanced Usage](#advanced-usage)
|
|
36
|
-
7. [Core Models](#core-models)
|
|
37
|
-
8. [Infinite Context](#infinite-context)
|
|
38
|
-
9. [Tool Integration](#tool-integration)
|
|
39
|
-
10. [Session Storage](#session-storage)
|
|
40
|
-
11. [Prompt Building](#prompt-building)
|
|
41
|
-
12. [Configuration](#configuration)
|
|
42
|
-
|
|
43
|
-
## Overview
|
|
44
|
-
|
|
45
|
-
The CHUK AI Session Manager is designed to solve common challenges in AI application development:
|
|
46
|
-
|
|
47
|
-
- **Conversation Tracking**: Automatically track user-AI interactions
|
|
48
|
-
- **Token Management**: Monitor usage and costs across different models
|
|
49
|
-
- **Infinite Context**: Handle conversations that exceed token limits through automatic summarization
|
|
50
|
-
- **Tool Integration**: Log tool calls and results seamlessly
|
|
51
|
-
- **Session Hierarchy**: Create parent-child relationships between conversation segments
|
|
52
|
-
- **Flexible Storage**: Built on CHUK Sessions for reliable persistence
|
|
53
|
-
|
|
54
|
-
### Key Features
|
|
55
|
-
|
|
56
|
-
- **Zero-friction API**: Simple functions for common tasks
|
|
57
|
-
- **Async-first**: Built for modern Python async/await patterns
|
|
58
|
-
- **Token-aware**: Automatic token counting and cost estimation
|
|
59
|
-
- **Provider-agnostic**: Works with any LLM provider (OpenAI, Anthropic, etc.)
|
|
60
|
-
- **Hierarchical sessions**: Support for complex conversation structures
|
|
61
|
-
- **Automatic summarization**: Maintains context across session segments
|
|
62
|
-
|
|
63
|
-
## Import Structure
|
|
64
|
-
|
|
65
|
-
With the clean `__init__.py`, all components should be available at the top level:
|
|
66
|
-
|
|
67
|
-
```python
|
|
68
|
-
from chuk_ai_session_manager import (
|
|
69
|
-
# Simple API - Primary interface for most users
|
|
70
|
-
SessionManager,
|
|
71
|
-
track_conversation,
|
|
72
|
-
track_llm_call,
|
|
73
|
-
quick_conversation,
|
|
74
|
-
track_infinite_conversation,
|
|
75
|
-
track_tool_use,
|
|
76
|
-
get_session_stats,
|
|
77
|
-
get_conversation_history,
|
|
78
|
-
|
|
79
|
-
# Core Models
|
|
80
|
-
Session,
|
|
81
|
-
SessionEvent,
|
|
82
|
-
SessionMetadata,
|
|
83
|
-
SessionRun,
|
|
84
|
-
RunStatus,
|
|
85
|
-
|
|
86
|
-
# Enums
|
|
87
|
-
EventSource,
|
|
88
|
-
EventType,
|
|
89
|
-
|
|
90
|
-
# Token Management
|
|
91
|
-
TokenUsage,
|
|
92
|
-
TokenSummary,
|
|
93
|
-
|
|
94
|
-
# Advanced Components
|
|
95
|
-
InfiniteConversationManager,
|
|
96
|
-
SummarizationStrategy,
|
|
97
|
-
SessionAwareToolProcessor,
|
|
98
|
-
build_prompt_from_session,
|
|
99
|
-
PromptStrategy,
|
|
100
|
-
truncate_prompt_to_token_limit,
|
|
101
|
-
|
|
102
|
-
# Storage
|
|
103
|
-
setup_chuk_sessions_storage,
|
|
104
|
-
|
|
105
|
-
# Exceptions
|
|
106
|
-
SessionManagerError,
|
|
107
|
-
SessionNotFound,
|
|
108
|
-
SessionAlreadyExists,
|
|
109
|
-
InvalidSessionOperation,
|
|
110
|
-
TokenLimitExceeded,
|
|
111
|
-
StorageError,
|
|
112
|
-
ToolProcessingError,
|
|
113
|
-
|
|
114
|
-
# Utilities
|
|
115
|
-
configure_storage,
|
|
116
|
-
get_version,
|
|
117
|
-
is_available
|
|
118
|
-
)
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
### Verifying Installation
|
|
122
|
-
|
|
123
|
-
Check that everything is working:
|
|
124
|
-
|
|
125
|
-
```python
|
|
126
|
-
import chuk_ai_session_manager as casm
|
|
127
|
-
|
|
128
|
-
print(f"Version: {casm.get_version()}")
|
|
129
|
-
print("Available components:", casm.is_available())
|
|
130
|
-
|
|
131
|
-
# This should show all components as True
|
|
132
|
-
# {
|
|
133
|
-
# "core_enums": True,
|
|
134
|
-
# "core_models": True,
|
|
135
|
-
# "simple_api": True,
|
|
136
|
-
# "storage": True,
|
|
137
|
-
# "infinite_context": True,
|
|
138
|
-
# "tool_processor": True,
|
|
139
|
-
# "prompt_builder": True,
|
|
140
|
-
# "token_tracking": True,
|
|
141
|
-
# "exceptions": True,
|
|
142
|
-
# "session_manager": True
|
|
143
|
-
# }
|
|
144
|
-
```
|
|
145
|
-
|
|
146
|
-
## Core Architecture
|
|
147
|
-
|
|
148
|
-
The system is built around several key components working together to provide seamless conversation management:
|
|
149
|
-
|
|
150
|
-
```
|
|
151
|
-
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
|
|
152
|
-
│ Simple API │ │ SessionManager │ │ Core Models │
|
|
153
|
-
│ │ │ │ │ │
|
|
154
|
-
│ track_conversation() │ │ High-level API │ │ Session │
|
|
155
|
-
│ track_llm_call() │ │ Infinite context │ │ SessionEvent │
|
|
156
|
-
│ quick_conversation() │ │ Auto-summarization│ │ TokenUsage │
|
|
157
|
-
└─────────────────┘ └──────────────────┘ └─────────────────┘
|
|
158
|
-
│ │ │
|
|
159
|
-
└───────────────────────┼───────────────────────┘
|
|
160
|
-
│
|
|
161
|
-
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
|
|
162
|
-
│ Tool Processor │ │ Storage Backend │ │ Prompt Builder │
|
|
163
|
-
│ │ │ │ │ │
|
|
164
|
-
│ Session-aware │ │ CHUK Sessions │ │ Multiple │
|
|
165
|
-
│ Tool execution │ │ JSON persistence │ │ strategies │
|
|
166
|
-
│ Retry & caching │ │ TTL management │ │ Token limits │
|
|
167
|
-
└─────────────────┘ └──────────────────┘ └─────────────────┘
|
|
168
|
-
```
|
|
169
|
-
|
|
170
|
-
**Key Components:**
|
|
171
|
-
- **Simple API**: One-line functions for common operations
|
|
172
|
-
- **SessionManager**: High-level conversation management with infinite context
|
|
173
|
-
- **Core Models**: Session, SessionEvent, TokenUsage for data modeling
|
|
174
|
-
- **Tool Processor**: Automatic tool call tracking with retry and caching
|
|
175
|
-
- **Storage Backend**: CHUK Sessions for reliable persistence
|
|
176
|
-
- **Prompt Builder**: Intelligent context building for LLM calls
|
|
177
|
-
|
|
178
|
-
## Quick Start
|
|
179
|
-
|
|
180
|
-
### Installation
|
|
181
|
-
|
|
182
|
-
```bash
|
|
183
|
-
# Install the package
|
|
184
|
-
uv add chuk-ai-session-manager
|
|
185
|
-
|
|
186
|
-
# Or with pip
|
|
187
|
-
pip install chuk-ai-session-manager
|
|
188
|
-
```
|
|
189
|
-
|
|
190
|
-
### Basic Usage
|
|
191
|
-
|
|
192
|
-
```python
|
|
193
|
-
from chuk_ai_session_manager import track_conversation
|
|
194
|
-
|
|
195
|
-
# Track a simple conversation
|
|
196
|
-
session_id = await track_conversation(
|
|
197
|
-
user_message="What's the weather like?",
|
|
198
|
-
ai_response="I don't have access to real-time weather data.",
|
|
199
|
-
model="gpt-3.5-turbo",
|
|
200
|
-
provider="openai"
|
|
201
|
-
)
|
|
202
|
-
|
|
203
|
-
print(f"Conversation tracked in session: {session_id}")
|
|
204
|
-
```
|
|
205
|
-
|
|
206
|
-
### With Statistics
|
|
207
|
-
|
|
208
|
-
```python
|
|
209
|
-
from chuk_ai_session_manager import quick_conversation
|
|
210
|
-
|
|
211
|
-
stats = await quick_conversation(
|
|
212
|
-
user_message="Explain quantum computing",
|
|
213
|
-
ai_response="Quantum computing uses quantum mechanical phenomena...",
|
|
214
|
-
model="gpt-4",
|
|
215
|
-
provider="openai"
|
|
216
|
-
)
|
|
217
|
-
|
|
218
|
-
print(f"Tokens used: {stats['total_tokens']}")
|
|
219
|
-
print(f"Estimated cost: ${stats['estimated_cost']:.4f}")
|
|
220
|
-
```
|
|
221
|
-
|
|
222
|
-
### Basic Integration with Your LLM
|
|
223
|
-
|
|
224
|
-
```python
|
|
225
|
-
from chuk_ai_session_manager import track_llm_call
|
|
226
|
-
import openai
|
|
227
|
-
|
|
228
|
-
async def my_openai_call(prompt):
|
|
229
|
-
response = await openai.chat.completions.create(
|
|
230
|
-
model="gpt-3.5-turbo",
|
|
231
|
-
messages=[{"role": "user", "content": prompt}]
|
|
232
|
-
)
|
|
233
|
-
return response.choices[0].message.content
|
|
234
|
-
|
|
235
|
-
# Track your LLM call automatically
|
|
236
|
-
response, session_id = await track_llm_call(
|
|
237
|
-
user_input="Explain machine learning",
|
|
238
|
-
llm_function=my_openai_call,
|
|
239
|
-
model="gpt-3.5-turbo",
|
|
240
|
-
provider="openai"
|
|
241
|
-
)
|
|
242
|
-
|
|
243
|
-
print(f"AI Response: {response}")
|
|
244
|
-
print(f"Tracked in session: {session_id}")
|
|
245
|
-
```
|
|
246
|
-
|
|
247
|
-
## Simple API
|
|
248
|
-
|
|
249
|
-
The Simple API provides convenient functions for common tasks:
|
|
250
|
-
|
|
251
|
-
```python
|
|
252
|
-
from chuk_ai_session_manager import (
|
|
253
|
-
track_conversation,
|
|
254
|
-
track_llm_call,
|
|
255
|
-
quick_conversation,
|
|
256
|
-
track_infinite_conversation,
|
|
257
|
-
track_tool_use,
|
|
258
|
-
get_session_stats,
|
|
259
|
-
get_conversation_history
|
|
260
|
-
)
|
|
261
|
-
```
|
|
262
|
-
|
|
263
|
-
### `track_conversation()`
|
|
264
|
-
|
|
265
|
-
The simplest way to track a conversation turn - perfect for one-off tracking:
|
|
266
|
-
|
|
267
|
-
```python
|
|
268
|
-
session_id = await track_conversation(
|
|
269
|
-
user_message="Hello!",
|
|
270
|
-
ai_response="Hi there! How can I help?",
|
|
271
|
-
model="gpt-3.5-turbo",
|
|
272
|
-
provider="openai",
|
|
273
|
-
session_id=None, # Optional: continue existing session
|
|
274
|
-
infinite_context=False, # Enable infinite context
|
|
275
|
-
token_threshold=4000 # Token limit for segmentation
|
|
276
|
-
)
|
|
277
|
-
|
|
278
|
-
# Returns the session ID for continuing the conversation later
|
|
279
|
-
```
|
|
280
|
-
|
|
281
|
-
### `track_llm_call()`
|
|
282
|
-
|
|
283
|
-
Wrap your LLM function calls for automatic tracking:
|
|
284
|
-
|
|
285
|
-
```python
|
|
286
|
-
async def call_openai(prompt):
|
|
287
|
-
# Your OpenAI API call here
|
|
288
|
-
response = await openai.chat.completions.create(
|
|
289
|
-
model="gpt-3.5-turbo",
|
|
290
|
-
messages=[{"role": "user", "content": prompt}]
|
|
291
|
-
)
|
|
292
|
-
return response.choices[0].message.content
|
|
293
|
-
|
|
294
|
-
response, session_id = await track_llm_call(
|
|
295
|
-
user_input="Explain machine learning",
|
|
296
|
-
llm_function=call_openai,
|
|
297
|
-
model="gpt-3.5-turbo",
|
|
298
|
-
provider="openai"
|
|
299
|
-
)
|
|
300
|
-
```
|
|
301
|
-
|
|
302
|
-
### `track_infinite_conversation()`
|
|
303
|
-
|
|
304
|
-
For long conversations that might exceed token limits:
|
|
305
|
-
|
|
306
|
-
```python
|
|
307
|
-
# Start a conversation
|
|
308
|
-
session_id = await track_infinite_conversation(
|
|
309
|
-
user_message="Tell me about the history of computing",
|
|
310
|
-
ai_response="Computing history begins with ancient calculating devices...",
|
|
311
|
-
model="gpt-4",
|
|
312
|
-
token_threshold=4000, # Auto-segment after 4000 tokens
|
|
313
|
-
max_turns=20 # Or after 20 conversation turns
|
|
314
|
-
)
|
|
315
|
-
|
|
316
|
-
# Continue the conversation
|
|
317
|
-
session_id = await track_infinite_conversation(
|
|
318
|
-
user_message="What about quantum computers?",
|
|
319
|
-
ai_response="Quantum computing represents a fundamental shift...",
|
|
320
|
-
session_id=session_id, # Continue the same conversation
|
|
321
|
-
model="gpt-4"
|
|
322
|
-
)
|
|
323
|
-
```
|
|
324
|
-
|
|
325
|
-
### `track_tool_use()`
|
|
326
|
-
|
|
327
|
-
Track tool/function calls:
|
|
328
|
-
|
|
329
|
-
```python
|
|
330
|
-
session_id = await track_tool_use(
|
|
331
|
-
tool_name="calculator",
|
|
332
|
-
arguments={"operation": "add", "a": 5, "b": 3},
|
|
333
|
-
result={"result": 8},
|
|
334
|
-
session_id=session_id,
|
|
335
|
-
error=None # Optional error message
|
|
336
|
-
)
|
|
337
|
-
```
|
|
338
|
-
|
|
339
|
-
## SessionManager Class
|
|
340
|
-
|
|
341
|
-
For more control and persistent conversations, use the `SessionManager` class directly:
|
|
342
|
-
|
|
343
|
-
```python
|
|
344
|
-
from chuk_ai_session_manager import SessionManager
|
|
345
|
-
|
|
346
|
-
# Create a session manager
|
|
347
|
-
sm = SessionManager(
|
|
348
|
-
system_prompt="You are a helpful assistant specialized in Python programming.",
|
|
349
|
-
infinite_context=True,
|
|
350
|
-
token_threshold=4000,
|
|
351
|
-
max_turns_per_segment=20
|
|
352
|
-
)
|
|
353
|
-
|
|
354
|
-
# Track conversations
|
|
355
|
-
await sm.user_says("How do I create a list comprehension?")
|
|
356
|
-
await sm.ai_responds(
|
|
357
|
-
"A list comprehension is a concise way to create lists in Python...",
|
|
358
|
-
model="gpt-4",
|
|
359
|
-
provider="openai"
|
|
360
|
-
)
|
|
361
|
-
|
|
362
|
-
# Track tool usage
|
|
363
|
-
await sm.tool_used(
|
|
364
|
-
tool_name="code_executor",
|
|
365
|
-
arguments={"code": "print([x**2 for x in range(5)])"},
|
|
366
|
-
result={"output": "[0, 1, 4, 9, 16]"}
|
|
367
|
-
)
|
|
368
|
-
|
|
369
|
-
# Get session statistics
|
|
370
|
-
stats = await sm.get_stats()
|
|
371
|
-
print(f"Session {stats['session_id']}: {stats['total_messages']} messages, ${stats['estimated_cost']:.4f}")
|
|
372
|
-
```
|
|
373
|
-
|
|
374
|
-
### Working with System Prompts
|
|
375
|
-
|
|
376
|
-
```python
|
|
377
|
-
# Set initial system prompt
|
|
378
|
-
sm = SessionManager(system_prompt="You are a creative writing assistant.")
|
|
379
|
-
|
|
380
|
-
# Update system prompt later
|
|
381
|
-
await sm.update_system_prompt("You are now a technical documentation writer.")
|
|
382
|
-
|
|
383
|
-
# Get messages including system prompt for your LLM calls
|
|
384
|
-
messages = await sm.get_messages_for_llm(include_system=True)
|
|
385
|
-
# [{"role": "system", "content": "You are now a technical documentation writer."}, ...]
|
|
386
|
-
```
|
|
387
|
-
|
|
388
|
-
### SessionManager Properties
|
|
389
|
-
|
|
390
|
-
```python
|
|
391
|
-
sm = SessionManager()
|
|
392
|
-
|
|
393
|
-
# Access session information
|
|
394
|
-
print(f"Session ID: {sm.session_id}")
|
|
395
|
-
print(f"System Prompt: {sm.system_prompt}")
|
|
396
|
-
print(f"Infinite Context: {sm.is_infinite}")
|
|
397
|
-
|
|
398
|
-
# Check if this is a new session
|
|
399
|
-
print(f"Is new session: {sm._is_new}") # Useful for initialization logic
|
|
400
|
-
```
|
|
401
|
-
|
|
402
|
-
### Managing Long Conversations
|
|
403
|
-
|
|
404
|
-
```python
|
|
405
|
-
# Enable infinite context with custom settings
|
|
406
|
-
sm = SessionManager(
|
|
407
|
-
infinite_context=True,
|
|
408
|
-
token_threshold=3000, # Segment at 3000 tokens
|
|
409
|
-
max_turns_per_segment=15 # Or 15 conversation turns
|
|
410
|
-
)
|
|
411
|
-
|
|
412
|
-
# The session will auto-segment when limits are reached
|
|
413
|
-
# You don't need to do anything - it happens automatically!
|
|
414
|
-
|
|
415
|
-
# Get full conversation across all segments
|
|
416
|
-
full_conversation = await sm.get_conversation(include_all_segments=True)
|
|
417
|
-
|
|
418
|
-
# Get session chain (list of session IDs in the conversation)
|
|
419
|
-
session_chain = await sm.get_session_chain()
|
|
420
|
-
print(f"Conversation spans {len(session_chain)} sessions: {session_chain}")
|
|
421
|
-
```
|
|
422
|
-
|
|
423
|
-
## Core Models
|
|
424
|
-
|
|
425
|
-
### Session
|
|
426
|
-
|
|
427
|
-
The main container for a conversation:
|
|
428
|
-
|
|
429
|
-
```python
|
|
430
|
-
from chuk_ai_session_manager import Session
|
|
431
|
-
|
|
432
|
-
# Create a new session
|
|
433
|
-
session = await Session.create(
|
|
434
|
-
parent_id=None, # Optional parent session
|
|
435
|
-
metadata={"user_id": "user123", "topic": "programming"}
|
|
436
|
-
)
|
|
437
|
-
|
|
438
|
-
# Session properties
|
|
439
|
-
print(f"Session ID: {session.id}")
|
|
440
|
-
print(f"Created: {session.metadata.created_at}")
|
|
441
|
-
print(f"Total tokens: {session.total_tokens}")
|
|
442
|
-
print(f"Total cost: ${session.total_cost:.4f}")
|
|
443
|
-
|
|
444
|
-
# Add events
|
|
445
|
-
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
446
|
-
from chuk_ai_session_manager.models.event_source import EventSource
|
|
447
|
-
from chuk_ai_session_manager.models.event_type import EventType
|
|
448
|
-
|
|
449
|
-
event = await SessionEvent.create_with_tokens(
|
|
450
|
-
message="Hello world!",
|
|
451
|
-
prompt="Hello world!",
|
|
452
|
-
model="gpt-3.5-turbo",
|
|
453
|
-
source=EventSource.USER,
|
|
454
|
-
type=EventType.MESSAGE
|
|
455
|
-
)
|
|
456
|
-
|
|
457
|
-
await session.add_event_and_save(event)
|
|
458
|
-
```
|
|
459
|
-
|
|
460
|
-
### SessionEvent
|
|
461
|
-
|
|
462
|
-
Individual events within a session:
|
|
463
|
-
|
|
464
|
-
```python
|
|
465
|
-
from chuk_ai_session_manager import SessionEvent, EventSource, EventType
|
|
466
|
-
|
|
467
|
-
# Create an event with automatic token counting
|
|
468
|
-
event = await SessionEvent.create_with_tokens(
|
|
469
|
-
message="What is machine learning?",
|
|
470
|
-
prompt="What is machine learning?",
|
|
471
|
-
completion=None, # For user messages
|
|
472
|
-
model="gpt-3.5-turbo",
|
|
473
|
-
source=EventSource.USER,
|
|
474
|
-
type=EventType.MESSAGE
|
|
475
|
-
)
|
|
476
|
-
|
|
477
|
-
# Event properties
|
|
478
|
-
print(f"Event ID: {event.id}")
|
|
479
|
-
print(f"Tokens used: {event.token_usage.total_tokens}")
|
|
480
|
-
print(f"Source: {event.source.value}")
|
|
481
|
-
print(f"Type: {event.type.value}")
|
|
482
|
-
|
|
483
|
-
# Update metadata
|
|
484
|
-
await event.set_metadata("user_id", "user123")
|
|
485
|
-
await event.set_metadata("intent", "question")
|
|
486
|
-
|
|
487
|
-
# Check metadata
|
|
488
|
-
user_id = await event.get_metadata("user_id")
|
|
489
|
-
has_intent = await event.has_metadata("intent")
|
|
490
|
-
```
|
|
491
|
-
|
|
492
|
-
### TokenUsage
|
|
493
|
-
|
|
494
|
-
Tracks token consumption and costs:
|
|
495
|
-
|
|
496
|
-
```python
|
|
497
|
-
from chuk_ai_session_manager import TokenUsage
|
|
498
|
-
|
|
499
|
-
# Create from text
|
|
500
|
-
usage = await TokenUsage.from_text(
|
|
501
|
-
prompt="What is the capital of France?",
|
|
502
|
-
completion="The capital of France is Paris.",
|
|
503
|
-
model="gpt-3.5-turbo"
|
|
504
|
-
)
|
|
505
|
-
|
|
506
|
-
print(f"Prompt tokens: {usage.prompt_tokens}")
|
|
507
|
-
print(f"Completion tokens: {usage.completion_tokens}")
|
|
508
|
-
print(f"Total tokens: {usage.total_tokens}")
|
|
509
|
-
print(f"Estimated cost: ${usage.estimated_cost_usd:.6f}")
|
|
510
|
-
|
|
511
|
-
# Update token usage
|
|
512
|
-
await usage.update(prompt_tokens=10, completion_tokens=5)
|
|
513
|
-
|
|
514
|
-
# Count tokens for any text
|
|
515
|
-
token_count = await TokenUsage.count_tokens("Hello world!", "gpt-4")
|
|
516
|
-
```
|
|
517
|
-
|
|
518
|
-
### Event Source and Type Enums
|
|
519
|
-
|
|
520
|
-
```python
|
|
521
|
-
from chuk_ai_session_manager import EventSource, EventType
|
|
522
|
-
|
|
523
|
-
# Event sources
|
|
524
|
-
EventSource.USER # User input
|
|
525
|
-
EventSource.LLM # AI model response
|
|
526
|
-
EventSource.SYSTEM # System/tool events
|
|
527
|
-
|
|
528
|
-
# Event types
|
|
529
|
-
EventType.MESSAGE # Conversation messages
|
|
530
|
-
EventType.TOOL_CALL # Tool/function calls
|
|
531
|
-
EventType.SUMMARY # Session summaries
|
|
532
|
-
EventType.REFERENCE # References to other content
|
|
533
|
-
EventType.CONTEXT_BRIDGE # Context bridging events
|
|
534
|
-
```
|
|
535
|
-
|
|
536
|
-
## Infinite Context
|
|
537
|
-
|
|
538
|
-
The infinite context system automatically handles conversations that exceed token limits by creating linked sessions with summaries.
|
|
539
|
-
|
|
540
|
-
### InfiniteConversationManager
|
|
541
|
-
|
|
542
|
-
```python
|
|
543
|
-
from chuk_ai_session_manager import (
|
|
544
|
-
InfiniteConversationManager,
|
|
545
|
-
SummarizationStrategy,
|
|
546
|
-
EventSource
|
|
547
|
-
)
|
|
548
|
-
|
|
549
|
-
# Create manager with custom settings
|
|
550
|
-
icm = InfiniteConversationManager(
|
|
551
|
-
token_threshold=3000,
|
|
552
|
-
max_turns_per_segment=15,
|
|
553
|
-
summarization_strategy=SummarizationStrategy.KEY_POINTS
|
|
554
|
-
)
|
|
555
|
-
|
|
556
|
-
# Process messages (automatically segments when needed)
|
|
557
|
-
async def my_llm_callback(messages):
|
|
558
|
-
# Your LLM call here
|
|
559
|
-
return "Summary of the conversation..."
|
|
560
|
-
|
|
561
|
-
current_session_id = await icm.process_message(
|
|
562
|
-
session_id="session-123",
|
|
563
|
-
message="Tell me about quantum computing",
|
|
564
|
-
source=EventSource.USER,
|
|
565
|
-
llm_callback=my_llm_callback,
|
|
566
|
-
model="gpt-4"
|
|
567
|
-
)
|
|
568
|
-
|
|
569
|
-
# Build context for LLM calls
|
|
570
|
-
context = await icm.build_context_for_llm(
|
|
571
|
-
session_id=current_session_id,
|
|
572
|
-
max_messages=10,
|
|
573
|
-
include_summaries=True
|
|
574
|
-
)
|
|
575
|
-
|
|
576
|
-
# Get session chain
|
|
577
|
-
chain = await icm.get_session_chain(current_session_id)
|
|
578
|
-
print(f"Conversation chain: {[s.id for s in chain]}")
|
|
579
|
-
```
|
|
580
|
-
|
|
581
|
-
### Summarization Strategies
|
|
582
|
-
|
|
583
|
-
```python
|
|
584
|
-
from chuk_ai_session_manager import SummarizationStrategy
|
|
585
|
-
|
|
586
|
-
# Different summarization approaches
|
|
587
|
-
SummarizationStrategy.BASIC # General overview
|
|
588
|
-
SummarizationStrategy.KEY_POINTS # Focus on key information
|
|
589
|
-
SummarizationStrategy.TOPIC_BASED # Organize by topics
|
|
590
|
-
SummarizationStrategy.QUERY_FOCUSED # Focus on user questions
|
|
591
|
-
```
|
|
592
|
-
|
|
593
|
-
## Tool Integration
|
|
594
|
-
|
|
595
|
-
### SessionAwareToolProcessor
|
|
596
|
-
|
|
597
|
-
Integrates with `chuk_tool_processor` for automatic tool call tracking:
|
|
598
|
-
|
|
599
|
-
```python
|
|
600
|
-
from chuk_ai_session_manager import SessionAwareToolProcessor
|
|
601
|
-
|
|
602
|
-
# Create processor for a session
|
|
603
|
-
processor = await SessionAwareToolProcessor.create(
|
|
604
|
-
session_id="session-123",
|
|
605
|
-
enable_caching=True,
|
|
606
|
-
max_retries=2,
|
|
607
|
-
retry_delay=1.0
|
|
608
|
-
)
|
|
609
|
-
|
|
610
|
-
# Process LLM message with tool calls
|
|
611
|
-
llm_response = {
|
|
612
|
-
"tool_calls": [
|
|
613
|
-
{
|
|
614
|
-
"function": {
|
|
615
|
-
"name": "calculator",
|
|
616
|
-
"arguments": '{"operation": "add", "a": 5, "b": 3}'
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
]
|
|
620
|
-
}
|
|
621
|
-
|
|
622
|
-
results = await processor.process_llm_message(llm_response, None)
|
|
623
|
-
for result in results:
|
|
624
|
-
print(f"Tool: {result.tool}, Result: {result.result}")
|
|
625
|
-
```
|
|
626
|
-
|
|
627
|
-
### Sample Tools
|
|
628
|
-
|
|
629
|
-
```python
|
|
630
|
-
# The package includes sample tools for demonstration
|
|
631
|
-
from chuk_ai_session_manager.sample_tools import (
|
|
632
|
-
CalculatorTool,
|
|
633
|
-
WeatherTool,
|
|
634
|
-
SearchTool
|
|
635
|
-
)
|
|
636
|
-
|
|
637
|
-
# These are registered with chuk_tool_processor
|
|
638
|
-
# You can see how to structure your own tools
|
|
639
|
-
```
|
|
640
|
-
|
|
641
|
-
## Session Storage
|
|
642
|
-
|
|
643
|
-
### CHUK Sessions Backend
|
|
644
|
-
|
|
645
|
-
The storage is built on CHUK Sessions:
|
|
646
|
-
|
|
647
|
-
```python
|
|
648
|
-
from chuk_ai_session_manager import (
|
|
649
|
-
setup_chuk_sessions_storage,
|
|
650
|
-
SessionStorage,
|
|
651
|
-
ChukSessionsStore
|
|
652
|
-
)
|
|
653
|
-
|
|
654
|
-
# Setup storage (usually done automatically)
|
|
655
|
-
backend = setup_chuk_sessions_storage(
|
|
656
|
-
sandbox_id="my-ai-app",
|
|
657
|
-
default_ttl_hours=48
|
|
658
|
-
)
|
|
659
|
-
|
|
660
|
-
# Get the store
|
|
661
|
-
store = ChukSessionsStore(backend)
|
|
662
|
-
|
|
663
|
-
# Manual session operations
|
|
664
|
-
session = await store.get("session-123")
|
|
665
|
-
await store.save(session)
|
|
666
|
-
await store.delete("session-123")
|
|
667
|
-
session_ids = await store.list_sessions(prefix="user-")
|
|
668
|
-
```
|
|
669
|
-
|
|
670
|
-
### Storage Configuration
|
|
671
|
-
|
|
672
|
-
```python
|
|
673
|
-
# Configure storage at import time
|
|
674
|
-
from chuk_ai_session_manager import configure_storage
|
|
675
|
-
|
|
676
|
-
success = configure_storage(
|
|
677
|
-
sandbox_id="my-application",
|
|
678
|
-
default_ttl_hours=72 # 3 day TTL
|
|
679
|
-
)
|
|
680
|
-
|
|
681
|
-
if success:
|
|
682
|
-
print("Storage configured successfully")
|
|
683
|
-
else:
|
|
684
|
-
print("Storage configuration failed")
|
|
685
|
-
```
|
|
686
|
-
|
|
687
|
-
## Prompt Building
|
|
688
|
-
|
|
689
|
-
### Building Prompts from Sessions
|
|
690
|
-
|
|
691
|
-
```python
|
|
692
|
-
from chuk_ai_session_manager import (
|
|
693
|
-
build_prompt_from_session,
|
|
694
|
-
PromptStrategy,
|
|
695
|
-
truncate_prompt_to_token_limit
|
|
696
|
-
)
|
|
697
|
-
|
|
698
|
-
# Build prompts with different strategies
|
|
699
|
-
prompt = await build_prompt_from_session(
|
|
700
|
-
session,
|
|
701
|
-
strategy=PromptStrategy.CONVERSATION, # Include conversation history
|
|
702
|
-
max_tokens=3000,
|
|
703
|
-
model="gpt-4",
|
|
704
|
-
include_parent_context=True,
|
|
705
|
-
max_history=10
|
|
706
|
-
)
|
|
707
|
-
|
|
708
|
-
# Prompt strategies
|
|
709
|
-
PromptStrategy.MINIMAL # Just task and latest context
|
|
710
|
-
PromptStrategy.TASK_FOCUSED # Focus on the task
|
|
711
|
-
PromptStrategy.TOOL_FOCUSED # Emphasize tool usage
|
|
712
|
-
PromptStrategy.CONVERSATION # Include conversation history
|
|
713
|
-
PromptStrategy.HIERARCHICAL # Include parent session context
|
|
714
|
-
```
|
|
715
|
-
|
|
716
|
-
### Token Limit Management
|
|
717
|
-
|
|
718
|
-
```python
|
|
719
|
-
from chuk_ai_session_manager import truncate_prompt_to_token_limit
|
|
720
|
-
|
|
721
|
-
# Ensure prompt fits within token limits
|
|
722
|
-
truncated_prompt = await truncate_prompt_to_token_limit(
|
|
723
|
-
prompt=messages,
|
|
724
|
-
max_tokens=3000,
|
|
725
|
-
model="gpt-3.5-turbo"
|
|
726
|
-
)
|
|
727
|
-
```
|
|
728
|
-
|
|
729
|
-
## Configuration
|
|
730
|
-
|
|
731
|
-
### Package Configuration
|
|
732
|
-
|
|
733
|
-
```python
|
|
734
|
-
import chuk_ai_session_manager as casm
|
|
735
|
-
|
|
736
|
-
# Check what's available
|
|
737
|
-
print("Package version:", casm.get_version())
|
|
738
|
-
availability = casm.is_available()
|
|
739
|
-
print("Available components:", availability)
|
|
740
|
-
|
|
741
|
-
# Configure storage
|
|
742
|
-
success = casm.configure_storage(
|
|
743
|
-
sandbox_id="my-app",
|
|
744
|
-
default_ttl_hours=24
|
|
745
|
-
)
|
|
746
|
-
```
|
|
747
|
-
|
|
748
|
-
### Environment Setup
|
|
749
|
-
|
|
750
|
-
The package depends on several components:
|
|
751
|
-
|
|
752
|
-
1. **Required**: `chuk_sessions` - for storage backend
|
|
753
|
-
2. **Required**: `pydantic` - for data models
|
|
754
|
-
3. **Optional**: `tiktoken` - for accurate token counting (falls back to approximation)
|
|
755
|
-
4. **Optional**: `chuk_tool_processor` - for tool integration
|
|
756
|
-
|
|
757
|
-
### Error Handling
|
|
758
|
-
|
|
759
|
-
```python
|
|
760
|
-
from chuk_ai_session_manager import (
|
|
761
|
-
SessionManagerError,
|
|
762
|
-
SessionNotFound,
|
|
763
|
-
TokenLimitExceeded,
|
|
764
|
-
StorageError
|
|
765
|
-
)
|
|
766
|
-
|
|
767
|
-
try:
|
|
768
|
-
session_id = await track_conversation("Hello", "Hi there")
|
|
769
|
-
except SessionNotFound as e:
|
|
770
|
-
print(f"Session not found: {e}")
|
|
771
|
-
except TokenLimitExceeded as e:
|
|
772
|
-
print(f"Token limit exceeded: {e}")
|
|
773
|
-
except StorageError as e:
|
|
774
|
-
print(f"Storage error: {e}")
|
|
775
|
-
except SessionManagerError as e:
|
|
776
|
-
print(f"General session error: {e}")
|
|
777
|
-
```
|
|
778
|
-
|
|
779
|
-
## 🌟 What Makes CHUK Special?
|
|
780
|
-
|
|
781
|
-
| Feature | Other Libraries | CHUK AI Session Manager |
|
|
782
|
-
|---------|----------------|------------------------|
|
|
783
|
-
| **Setup Complexity** | Complex configuration | 3 lines of code |
|
|
784
|
-
| **Cost Tracking** | Manual calculation | Automatic across all providers |
|
|
785
|
-
| **Long Conversations** | Token limit errors | Infinite context with auto-segmentation |
|
|
786
|
-
| **Multi-Provider** | Provider-specific code | Works with any LLM |
|
|
787
|
-
| **Production Ready** | Requires additional work | Built for production |
|
|
788
|
-
| **Learning Curve** | Steep | 5 minutes to productivity |
|
|
789
|
-
| **Tool Integration** | Manual tracking | Automatic tool call logging |
|
|
790
|
-
| **Session Management** | Build from scratch | Complete session hierarchy |
|
|
791
|
-
|
|
792
|
-
## 🎯 Quick Decision Guide
|
|
793
|
-
|
|
794
|
-
**Choose CHUK AI Session Manager if you want:**
|
|
795
|
-
- ✅ Simple conversation tracking with zero configuration
|
|
796
|
-
- ✅ Automatic cost monitoring across all LLM providers
|
|
797
|
-
- ✅ Infinite conversation length without token limit errors
|
|
798
|
-
- ✅ Production-ready session management out of the box
|
|
799
|
-
- ✅ Complete conversation analytics and observability
|
|
800
|
-
- ✅ Framework-agnostic solution that works with any LLM library
|
|
801
|
-
- ✅ Built-in tool call tracking and retry mechanisms
|
|
802
|
-
- ✅ Hierarchical session relationships for complex workflows
|
|
803
|
-
|
|
804
|
-
## 📊 Monitoring & Analytics
|
|
805
|
-
|
|
806
|
-
```python
|
|
807
|
-
# Get comprehensive session analytics
|
|
808
|
-
stats = await sm.get_stats(include_all_segments=True)
|
|
809
|
-
|
|
810
|
-
print(f"""
|
|
811
|
-
🚀 Session Analytics Dashboard
|
|
812
|
-
============================
|
|
813
|
-
Session ID: {stats['session_id']}
|
|
814
|
-
Total Messages: {stats['total_messages']}
|
|
815
|
-
User Messages: {stats['user_messages']}
|
|
816
|
-
AI Messages: {stats['ai_messages']}
|
|
817
|
-
Tool Calls: {stats['tool_calls']}
|
|
818
|
-
Total Tokens: {stats['total_tokens']}
|
|
819
|
-
Total Cost: ${stats['estimated_cost']:.6f}
|
|
820
|
-
Session Segments: {stats.get('session_segments', 1)}
|
|
821
|
-
Created: {stats['created_at']}
|
|
822
|
-
Last Update: {stats['last_update']}
|
|
823
|
-
Infinite Context: {stats.get('infinite_context', False)}
|
|
824
|
-
""")
|
|
825
|
-
|
|
826
|
-
# Get conversation history
|
|
827
|
-
conversation = await sm.get_conversation(include_all_segments=True)
|
|
828
|
-
for i, turn in enumerate(conversation):
|
|
829
|
-
print(f"{i+1}. {turn['role']}: {turn['content'][:50]}...")
|
|
830
|
-
```
|
|
831
|
-
|
|
832
|
-
## 🛡️ Error Handling
|
|
833
|
-
|
|
834
|
-
The package provides specific exceptions for different error conditions:
|
|
835
|
-
|
|
836
|
-
```python
|
|
837
|
-
from chuk_ai_session_manager import (
|
|
838
|
-
SessionManagerError,
|
|
839
|
-
SessionNotFound,
|
|
840
|
-
TokenLimitExceeded,
|
|
841
|
-
StorageError
|
|
842
|
-
)
|
|
843
|
-
|
|
844
|
-
try:
|
|
845
|
-
session_id = await track_conversation("Hello", "Hi there")
|
|
846
|
-
except SessionNotFound as e:
|
|
847
|
-
print(f"Session not found: {e}")
|
|
848
|
-
except TokenLimitExceeded as e:
|
|
849
|
-
print(f"Token limit exceeded: {e}")
|
|
850
|
-
except StorageError as e:
|
|
851
|
-
print(f"Storage error: {e}")
|
|
852
|
-
except SessionManagerError as e:
|
|
853
|
-
print(f"General session error: {e}")
|
|
854
|
-
```
|
|
855
|
-
|
|
856
|
-
## 🔧 Environment Setup
|
|
857
|
-
|
|
858
|
-
The package requires several dependencies that should be automatically installed:
|
|
859
|
-
|
|
860
|
-
1. **Required**: `chuk_sessions` - for storage backend
|
|
861
|
-
2. **Required**: `pydantic` - for data models
|
|
862
|
-
3. **Optional**: `tiktoken` - for accurate token counting (falls back to approximation)
|
|
863
|
-
4. **Optional**: `chuk_tool_processor` - for tool integration
|
|
864
|
-
|
|
865
|
-
### Dependencies Check
|
|
866
|
-
|
|
867
|
-
```python
|
|
868
|
-
import chuk_ai_session_manager as casm
|
|
869
|
-
|
|
870
|
-
# Check if all components are available
|
|
871
|
-
availability = casm.is_available()
|
|
872
|
-
for component, available in availability.items():
|
|
873
|
-
status = "✅" if available else "❌"
|
|
874
|
-
print(f"{status} {component}")
|
|
875
|
-
```
|
|
876
|
-
|
|
877
|
-
## 🤝 Community & Support
|
|
878
|
-
|
|
879
|
-
- 📖 **Full Documentation**: Complete API reference and tutorials
|
|
880
|
-
- 🐛 **Issues**: Report bugs and request features on GitHub
|
|
881
|
-
- 💡 **Examples**: Check `/examples` directory for working code
|
|
882
|
-
- 📧 **Support**: Enterprise support available
|
|
883
|
-
|
|
884
|
-
## 📝 License
|
|
885
|
-
|
|
886
|
-
MIT License - build amazing AI applications with confidence!
|
|
887
|
-
|
|
888
|
-
---
|
|
889
|
-
|
|
890
|
-
**🎉 Ready to build better AI applications?**
|
|
891
|
-
|
|
892
|
-
```bash
|
|
893
|
-
uv add chuk-ai-session-manager
|
|
894
|
-
```
|
|
895
|
-
|
|
896
|
-
**Get started in 30 seconds with one line of code!**
|
|
File without changes
|
{chuk_ai_session_manager-0.5.dist-info → chuk_ai_session_manager-0.7.dist-info}/top_level.txt
RENAMED
|
File without changes
|