chuk-ai-session-manager 0.2.1__py3-none-any.whl → 0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +57 -343
- chuk_ai_session_manager/api/simple_api.py +329 -198
- chuk_ai_session_manager/models/token_usage.py +13 -2
- chuk_ai_session_manager/sample_tools.py +1 -1
- chuk_ai_session_manager/session_prompt_builder.py +70 -62
- chuk_ai_session_manager-0.4.dist-info/METADATA +354 -0
- {chuk_ai_session_manager-0.2.1.dist-info → chuk_ai_session_manager-0.4.dist-info}/RECORD +9 -11
- chuk_ai_session_manager/utils/__init__.py +0 -0
- chuk_ai_session_manager/utils/status_display_utils.py +0 -474
- chuk_ai_session_manager-0.2.1.dist-info/METADATA +0 -501
- {chuk_ai_session_manager-0.2.1.dist-info → chuk_ai_session_manager-0.4.dist-info}/WHEEL +0 -0
- {chuk_ai_session_manager-0.2.1.dist-info → chuk_ai_session_manager-0.4.dist-info}/top_level.txt +0 -0
|
@@ -175,7 +175,7 @@ class TokenUsage(BaseModel):
|
|
|
175
175
|
)
|
|
176
176
|
|
|
177
177
|
@staticmethod
|
|
178
|
-
def _count_tokens_sync(text: Optional[str], model: str = "gpt-3.5-turbo") -> int:
|
|
178
|
+
def _count_tokens_sync(text: Optional[Union[str, Any]], model: str = "gpt-3.5-turbo") -> int:
|
|
179
179
|
"""
|
|
180
180
|
Synchronous implementation of count_tokens.
|
|
181
181
|
|
|
@@ -188,6 +188,17 @@ class TokenUsage(BaseModel):
|
|
|
188
188
|
"""
|
|
189
189
|
if text is None:
|
|
190
190
|
return 0
|
|
191
|
+
|
|
192
|
+
# Convert to string if not already a string
|
|
193
|
+
if not isinstance(text, str):
|
|
194
|
+
try:
|
|
195
|
+
text = str(text)
|
|
196
|
+
except Exception:
|
|
197
|
+
return 0
|
|
198
|
+
|
|
199
|
+
# Empty string has 0 tokens
|
|
200
|
+
if not text:
|
|
201
|
+
return 0
|
|
191
202
|
|
|
192
203
|
if TIKTOKEN_AVAILABLE:
|
|
193
204
|
try:
|
|
@@ -206,7 +217,7 @@ class TokenUsage(BaseModel):
|
|
|
206
217
|
return int(len(text) / 4)
|
|
207
218
|
|
|
208
219
|
@staticmethod
|
|
209
|
-
async def count_tokens(text: Optional[str], model: str = "gpt-3.5-turbo") -> int:
|
|
220
|
+
async def count_tokens(text: Optional[Union[str, Any]], model: str = "gpt-3.5-turbo") -> int:
|
|
210
221
|
"""
|
|
211
222
|
Async version of count_tokens.
|
|
212
223
|
|
|
@@ -37,7 +37,8 @@ async def build_prompt_from_session(
|
|
|
37
37
|
max_tokens: Optional[int] = None,
|
|
38
38
|
model: str = "gpt-3.5-turbo",
|
|
39
39
|
include_parent_context: bool = False,
|
|
40
|
-
current_query: Optional[str] = None
|
|
40
|
+
current_query: Optional[str] = None,
|
|
41
|
+
max_history: int = 5 # Add this parameter for conversation strategy
|
|
41
42
|
) -> List[Dict[str, str]]:
|
|
42
43
|
"""
|
|
43
44
|
Build a prompt for the next LLM call from a Session asynchronously.
|
|
@@ -49,6 +50,7 @@ async def build_prompt_from_session(
|
|
|
49
50
|
model: Model to use for token counting
|
|
50
51
|
include_parent_context: Whether to include context from parent sessions
|
|
51
52
|
current_query: Current user query for relevance-based context selection
|
|
53
|
+
max_history: Maximum number of messages to include for conversation strategy
|
|
52
54
|
|
|
53
55
|
Returns:
|
|
54
56
|
A list of message dictionaries suitable for LLM API calls
|
|
@@ -72,7 +74,7 @@ async def build_prompt_from_session(
|
|
|
72
74
|
elif strategy == PromptStrategy.TOOL_FOCUSED:
|
|
73
75
|
return await _build_tool_focused_prompt(session)
|
|
74
76
|
elif strategy == PromptStrategy.CONVERSATION:
|
|
75
|
-
return await _build_conversation_prompt(session, max_history
|
|
77
|
+
return await _build_conversation_prompt(session, max_history)
|
|
76
78
|
elif strategy == PromptStrategy.HIERARCHICAL:
|
|
77
79
|
return await _build_hierarchical_prompt(session, include_parent_context)
|
|
78
80
|
else:
|
|
@@ -112,7 +114,7 @@ async def _build_minimal_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
112
114
|
|
|
113
115
|
if assistant_msg is None:
|
|
114
116
|
# Only the user message exists so far
|
|
115
|
-
return [{"role": "user", "content": first_user.message}] if first_user else []
|
|
117
|
+
return [{"role": "user", "content": _extract_content(first_user.message)}] if first_user else []
|
|
116
118
|
|
|
117
119
|
# Children of that assistant
|
|
118
120
|
children = [
|
|
@@ -126,11 +128,7 @@ async def _build_minimal_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
126
128
|
# Assemble prompt
|
|
127
129
|
prompt: List[Dict[str, str]] = []
|
|
128
130
|
if first_user:
|
|
129
|
-
|
|
130
|
-
user_content = first_user.message
|
|
131
|
-
if isinstance(user_content, dict) and "content" in user_content:
|
|
132
|
-
user_content = user_content["content"]
|
|
133
|
-
prompt.append({"role": "user", "content": user_content})
|
|
131
|
+
prompt.append({"role": "user", "content": _extract_content(first_user.message)})
|
|
134
132
|
|
|
135
133
|
# ALWAYS add the assistant marker - but strip its free text
|
|
136
134
|
prompt.append({"role": "assistant", "content": None})
|
|
@@ -166,6 +164,24 @@ async def _build_minimal_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
166
164
|
return prompt
|
|
167
165
|
|
|
168
166
|
|
|
167
|
+
def _extract_content(message: Any) -> str:
|
|
168
|
+
"""
|
|
169
|
+
Extract content string from a message that could be a string or dict.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
message: The message content (string, dict, or other)
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
The extracted content as a string
|
|
176
|
+
"""
|
|
177
|
+
if isinstance(message, str):
|
|
178
|
+
return message
|
|
179
|
+
elif isinstance(message, dict) and "content" in message:
|
|
180
|
+
return message["content"]
|
|
181
|
+
else:
|
|
182
|
+
return str(message)
|
|
183
|
+
|
|
184
|
+
|
|
169
185
|
async def _build_task_focused_prompt(session: Session) -> List[Dict[str, str]]:
|
|
170
186
|
"""
|
|
171
187
|
Build a task-focused prompt.
|
|
@@ -201,17 +217,11 @@ async def _build_task_focused_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
201
217
|
prompt = []
|
|
202
218
|
|
|
203
219
|
# Always include the first user message (the main task)
|
|
204
|
-
|
|
205
|
-
if isinstance(first_content, dict) and "content" in first_content:
|
|
206
|
-
first_content = first_content["content"]
|
|
207
|
-
prompt.append({"role": "user", "content": first_content})
|
|
220
|
+
prompt.append({"role": "user", "content": _extract_content(first_user.message)})
|
|
208
221
|
|
|
209
222
|
# Include the latest user message if different from the first
|
|
210
223
|
if latest_user and latest_user.id != first_user.id:
|
|
211
|
-
|
|
212
|
-
if isinstance(latest_content, dict) and "content" in latest_content:
|
|
213
|
-
latest_content = latest_content["content"]
|
|
214
|
-
prompt.append({"role": "user", "content": latest_content})
|
|
224
|
+
prompt.append({"role": "user", "content": _extract_content(latest_user.message)})
|
|
215
225
|
|
|
216
226
|
# Include assistant response placeholder
|
|
217
227
|
if assistant_msg:
|
|
@@ -274,10 +284,7 @@ async def _build_tool_focused_prompt(session: Session) -> List[Dict[str, str]]:
|
|
|
274
284
|
prompt = []
|
|
275
285
|
|
|
276
286
|
# Include user message
|
|
277
|
-
|
|
278
|
-
if isinstance(user_content, dict) and "content" in user_content:
|
|
279
|
-
user_content = user_content["content"]
|
|
280
|
-
prompt.append({"role": "user", "content": user_content})
|
|
287
|
+
prompt.append({"role": "user", "content": _extract_content(latest_user.message)})
|
|
281
288
|
|
|
282
289
|
# Include assistant placeholder
|
|
283
290
|
if assistant_msg:
|
|
@@ -334,17 +341,17 @@ async def _build_conversation_prompt(
|
|
|
334
341
|
|
|
335
342
|
# Build the conversation history
|
|
336
343
|
prompt = []
|
|
337
|
-
for msg in recent_messages:
|
|
344
|
+
for i, msg in enumerate(recent_messages):
|
|
338
345
|
role = "user" if msg.source == EventSource.USER else "assistant"
|
|
339
|
-
content = msg.message
|
|
346
|
+
content = _extract_content(msg.message)
|
|
340
347
|
|
|
341
|
-
#
|
|
342
|
-
if
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
content
|
|
348
|
+
# For the last assistant message, set content to None and add tool calls
|
|
349
|
+
if (role == "assistant" and
|
|
350
|
+
msg == recent_messages[-1] and
|
|
351
|
+
msg.source != EventSource.USER):
|
|
352
|
+
|
|
353
|
+
# Add the message first with None content
|
|
354
|
+
prompt.append({"role": role, "content": None})
|
|
348
355
|
|
|
349
356
|
# Add tool call results for this assistant message
|
|
350
357
|
tool_calls = [
|
|
@@ -352,9 +359,6 @@ async def _build_conversation_prompt(
|
|
|
352
359
|
if e.type == EventType.TOOL_CALL and e.metadata.get("parent_event_id") == msg.id
|
|
353
360
|
]
|
|
354
361
|
|
|
355
|
-
# Add the message first, then tools
|
|
356
|
-
prompt.append({"role": role, "content": content})
|
|
357
|
-
|
|
358
362
|
# Add tool results
|
|
359
363
|
for tc in tool_calls:
|
|
360
364
|
if isinstance(tc.message, dict):
|
|
@@ -366,11 +370,9 @@ async def _build_conversation_prompt(
|
|
|
366
370
|
"name": tool_name,
|
|
367
371
|
"content": json.dumps(tool_result, default=str),
|
|
368
372
|
})
|
|
369
|
-
|
|
370
|
-
#
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
prompt.append({"role": role, "content": content})
|
|
373
|
+
else:
|
|
374
|
+
# Regular message
|
|
375
|
+
prompt.append({"role": role, "content": content})
|
|
374
376
|
|
|
375
377
|
return prompt
|
|
376
378
|
|
|
@@ -391,32 +393,38 @@ async def _build_hierarchical_prompt(
|
|
|
391
393
|
|
|
392
394
|
# If parent context is enabled and session has a parent
|
|
393
395
|
if include_parent_context and session.parent_id:
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
if parent:
|
|
400
|
-
# Find the most recent summary in parent
|
|
401
|
-
summary_event = next(
|
|
402
|
-
(e for e in reversed(parent.events)
|
|
403
|
-
if e.type == EventType.SUMMARY),
|
|
404
|
-
None
|
|
405
|
-
)
|
|
396
|
+
try:
|
|
397
|
+
# Get the storage backend and create store
|
|
398
|
+
backend = get_backend()
|
|
399
|
+
store = ChukSessionsStore(backend)
|
|
400
|
+
parent = await store.get(session.parent_id)
|
|
406
401
|
|
|
407
|
-
if
|
|
408
|
-
#
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
402
|
+
if parent:
|
|
403
|
+
# Find the most recent summary in parent
|
|
404
|
+
summary_event = next(
|
|
405
|
+
(e for e in reversed(parent.events)
|
|
406
|
+
if e.type == EventType.SUMMARY),
|
|
407
|
+
None
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
if summary_event:
|
|
411
|
+
# Extract summary content
|
|
412
|
+
summary_content = summary_event.message
|
|
413
|
+
if isinstance(summary_content, dict) and "note" in summary_content:
|
|
414
|
+
summary_content = summary_content["note"]
|
|
415
|
+
elif isinstance(summary_content, dict) and "content" in summary_content:
|
|
416
|
+
summary_content = summary_content["content"]
|
|
417
|
+
else:
|
|
418
|
+
summary_content = str(summary_content)
|
|
419
|
+
|
|
420
|
+
# Add parent context at the beginning
|
|
421
|
+
prompt.insert(0, {
|
|
422
|
+
"role": "system",
|
|
423
|
+
"content": f"Context from previous conversation: {summary_content}"
|
|
424
|
+
})
|
|
425
|
+
except Exception as e:
|
|
426
|
+
# If we can't load parent context, just continue with minimal prompt
|
|
427
|
+
logger.warning(f"Could not load parent context for session {session.parent_id}: {e}")
|
|
420
428
|
|
|
421
429
|
return prompt
|
|
422
430
|
|
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: chuk-ai-session-manager
|
|
3
|
+
Version: 0.4
|
|
4
|
+
Summary: Session manager for AI applications
|
|
5
|
+
Requires-Python: >=3.11
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
Requires-Dist: chuk-sessions>=0.3
|
|
8
|
+
Requires-Dist: chuk-tool-processor>=0.4.1
|
|
9
|
+
Requires-Dist: pydantic>=2.11.3
|
|
10
|
+
Provides-Extra: tiktoken
|
|
11
|
+
Requires-Dist: tiktoken>=0.9.0; extra == "tiktoken"
|
|
12
|
+
Provides-Extra: redis
|
|
13
|
+
Requires-Dist: redis>=4.0.0; extra == "redis"
|
|
14
|
+
Provides-Extra: dev
|
|
15
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
16
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
17
|
+
Requires-Dist: redis>=4.0.0; extra == "dev"
|
|
18
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
19
|
+
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
20
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
21
|
+
Provides-Extra: full
|
|
22
|
+
|
|
23
|
+
# chuk-ai-session-manager
|
|
24
|
+
|
|
25
|
+
[](https://www.python.org/downloads/)
|
|
26
|
+
[](https://opensource.org/licenses/MIT)
|
|
27
|
+
|
|
28
|
+
**The easiest way to add conversation tracking to any AI application.**
|
|
29
|
+
|
|
30
|
+
Track conversations, monitor costs, and manage infinite context with just 3 lines of code. Built for production, designed for simplicity.
|
|
31
|
+
|
|
32
|
+
## 🚀 30-Second Start
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
uv add chuk-ai-session-manager
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
```python
|
|
39
|
+
from chuk_ai_session_manager import track_conversation
|
|
40
|
+
|
|
41
|
+
# Track any AI conversation in one line
|
|
42
|
+
await track_conversation("Hello!", "Hi there! How can I help?")
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
That's it! 🎉 Your conversation is now tracked with full observability.
|
|
46
|
+
|
|
47
|
+
## ✨ Why Choose CHUK?
|
|
48
|
+
|
|
49
|
+
- **🔥 Stupidly Simple**: 3 lines to track any conversation
|
|
50
|
+
- **💰 Cost Smart**: Automatic token counting and cost tracking
|
|
51
|
+
- **♾️ Infinite Context**: No more "conversation too long" errors
|
|
52
|
+
- **🔧 Any LLM**: Works with OpenAI, Anthropic, local models, anything
|
|
53
|
+
- **📊 Full Observability**: See exactly what's happening in your AI app
|
|
54
|
+
- **🚀 Production Ready**: Used in real applications, not just demos
|
|
55
|
+
|
|
56
|
+
## 🎯 Perfect For
|
|
57
|
+
|
|
58
|
+
- **Building chatbots** that remember conversations
|
|
59
|
+
- **Tracking LLM costs** across your entire application
|
|
60
|
+
- **Managing long conversations** without hitting token limits
|
|
61
|
+
- **Debugging AI applications** with complete audit trails
|
|
62
|
+
- **Production AI systems** that need reliable session management
|
|
63
|
+
|
|
64
|
+
## 📱 Quick Examples
|
|
65
|
+
|
|
66
|
+
### Track Any Conversation
|
|
67
|
+
```python
|
|
68
|
+
from chuk_ai_session_manager import track_conversation
|
|
69
|
+
|
|
70
|
+
# Works with any LLM response
|
|
71
|
+
session_id = await track_conversation(
|
|
72
|
+
user_message="What's the weather like?",
|
|
73
|
+
ai_response="It's sunny and 75°F in your area.",
|
|
74
|
+
model="gpt-4",
|
|
75
|
+
provider="openai"
|
|
76
|
+
)
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Persistent Conversations
|
|
80
|
+
```python
|
|
81
|
+
from chuk_ai_session_manager import SessionManager
|
|
82
|
+
|
|
83
|
+
# Create a conversation that remembers context
|
|
84
|
+
sm = SessionManager()
|
|
85
|
+
|
|
86
|
+
await sm.user_says("My name is Alice")
|
|
87
|
+
await sm.ai_responds("Nice to meet you, Alice!")
|
|
88
|
+
|
|
89
|
+
await sm.user_says("What's my name?")
|
|
90
|
+
await sm.ai_responds("Your name is Alice!")
|
|
91
|
+
|
|
92
|
+
# Get conversation stats
|
|
93
|
+
stats = await sm.get_stats()
|
|
94
|
+
print(f"Cost: ${stats['estimated_cost']:.6f}")
|
|
95
|
+
print(f"Tokens: {stats['total_tokens']}")
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### Infinite Context (Never Run Out of Space)
|
|
99
|
+
```python
|
|
100
|
+
# Automatically handles conversations of any length
|
|
101
|
+
sm = SessionManager(
|
|
102
|
+
infinite_context=True, # 🔥 Magic happens here
|
|
103
|
+
token_threshold=4000 # When to create new segment
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Keep chatting forever - context is preserved automatically
|
|
107
|
+
for i in range(100): # This would normally hit token limits
|
|
108
|
+
await sm.user_says(f"Question {i}: Tell me about AI")
|
|
109
|
+
await sm.ai_responds("AI is fascinating...")
|
|
110
|
+
|
|
111
|
+
# Still works! Automatic summarization keeps context alive
|
|
112
|
+
conversation = await sm.get_conversation()
|
|
113
|
+
print(f"Full conversation: {len(conversation)} exchanges")
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
### Cost Tracking (Know What You're Spending)
|
|
117
|
+
```python
|
|
118
|
+
# Automatic cost monitoring across all interactions
|
|
119
|
+
sm = SessionManager()
|
|
120
|
+
|
|
121
|
+
await sm.user_says("Write a long story about dragons")
|
|
122
|
+
await sm.ai_responds("Once upon a time..." * 500) # Long response
|
|
123
|
+
|
|
124
|
+
stats = await sm.get_stats()
|
|
125
|
+
print(f"💰 That story cost: ${stats['estimated_cost']:.6f}")
|
|
126
|
+
print(f"📊 Used {stats['total_tokens']} tokens")
|
|
127
|
+
print(f"📈 {stats['user_messages']} user messages, {stats['ai_messages']} AI responses")
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### Multi-Provider Support
|
|
131
|
+
```python
|
|
132
|
+
# Works with any LLM provider
|
|
133
|
+
import openai
|
|
134
|
+
import anthropic
|
|
135
|
+
|
|
136
|
+
sm = SessionManager()
|
|
137
|
+
|
|
138
|
+
# OpenAI
|
|
139
|
+
await sm.user_says("Hello!")
|
|
140
|
+
openai_response = await openai.chat.completions.create(...)
|
|
141
|
+
await sm.ai_responds(openai_response.choices[0].message.content, model="gpt-4", provider="openai")
|
|
142
|
+
|
|
143
|
+
# Anthropic
|
|
144
|
+
await sm.user_says("How are you?")
|
|
145
|
+
anthropic_response = await anthropic.messages.create(...)
|
|
146
|
+
await sm.ai_responds(anthropic_response.content[0].text, model="claude-3", provider="anthropic")
|
|
147
|
+
|
|
148
|
+
# See costs across all providers
|
|
149
|
+
stats = await sm.get_stats()
|
|
150
|
+
print(f"Total cost across all providers: ${stats['estimated_cost']:.6f}")
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
## 🛠️ Advanced Features
|
|
154
|
+
|
|
155
|
+
### Conversation Analytics
|
|
156
|
+
```python
|
|
157
|
+
# Get detailed insights into your conversations
|
|
158
|
+
conversation = await sm.get_conversation()
|
|
159
|
+
stats = await sm.get_stats()
|
|
160
|
+
|
|
161
|
+
print(f"📊 Conversation Analytics:")
|
|
162
|
+
print(f" Messages: {stats['user_messages']} user, {stats['ai_messages']} AI")
|
|
163
|
+
print(f" Average response length: {stats['avg_response_length']}")
|
|
164
|
+
print(f" Most expensive response: ${stats['max_response_cost']:.6f}")
|
|
165
|
+
print(f" Session duration: {stats['duration_minutes']:.1f} minutes")
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Tool Integration
|
|
169
|
+
```python
|
|
170
|
+
# Track tool usage alongside conversations
|
|
171
|
+
await sm.tool_used(
|
|
172
|
+
tool_name="web_search",
|
|
173
|
+
arguments={"query": "latest AI news"},
|
|
174
|
+
result={"articles": ["AI breakthrough...", "New model released..."]},
|
|
175
|
+
cost=0.001
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
stats = await sm.get_stats()
|
|
179
|
+
print(f"Tool calls: {stats['tool_calls']}")
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
### Session Export/Import
|
|
183
|
+
```python
|
|
184
|
+
# Export conversations for analysis
|
|
185
|
+
conversation_data = await sm.export_conversation()
|
|
186
|
+
with open('conversation.json', 'w') as f:
|
|
187
|
+
json.dump(conversation_data, f)
|
|
188
|
+
|
|
189
|
+
# Import previous conversations
|
|
190
|
+
sm = SessionManager()
|
|
191
|
+
await sm.import_conversation('conversation.json')
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
## 🎨 Real-World Examples
|
|
195
|
+
|
|
196
|
+
### Customer Support Bot
|
|
197
|
+
```python
|
|
198
|
+
async def handle_support_ticket(user_message: str, ticket_id: str):
|
|
199
|
+
# Each ticket gets its own session
|
|
200
|
+
sm = SessionManager(session_id=ticket_id)
|
|
201
|
+
|
|
202
|
+
await sm.user_says(user_message)
|
|
203
|
+
|
|
204
|
+
# Your AI logic here
|
|
205
|
+
ai_response = await your_ai_model(user_message)
|
|
206
|
+
await sm.ai_responds(ai_response, model="gpt-4", provider="openai")
|
|
207
|
+
|
|
208
|
+
# Automatic cost tracking per ticket
|
|
209
|
+
stats = await sm.get_stats()
|
|
210
|
+
print(f"Ticket {ticket_id} cost: ${stats['estimated_cost']:.6f}")
|
|
211
|
+
|
|
212
|
+
return ai_response
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
### AI Assistant with Memory
|
|
216
|
+
```python
|
|
217
|
+
async def ai_assistant():
|
|
218
|
+
sm = SessionManager(infinite_context=True)
|
|
219
|
+
|
|
220
|
+
while True:
|
|
221
|
+
user_input = input("You: ")
|
|
222
|
+
if user_input.lower() == 'quit':
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
await sm.user_says(user_input)
|
|
226
|
+
|
|
227
|
+
# Get conversation context for AI
|
|
228
|
+
conversation = await sm.get_conversation()
|
|
229
|
+
context = "\n".join([f"{turn['role']}: {turn['content']}" for turn in conversation[-5:]])
|
|
230
|
+
|
|
231
|
+
# Your AI call with context
|
|
232
|
+
ai_response = await your_ai_model(f"Context:\n{context}\n\nUser: {user_input}")
|
|
233
|
+
await sm.ai_responds(ai_response)
|
|
234
|
+
|
|
235
|
+
print(f"AI: {ai_response}")
|
|
236
|
+
|
|
237
|
+
# Show final stats
|
|
238
|
+
stats = await sm.get_stats()
|
|
239
|
+
print(f"\n💰 Total conversation cost: ${stats['estimated_cost']:.6f}")
|
|
240
|
+
```
|
|
241
|
+
|
|
242
|
+
### Multi-User Chat Application
|
|
243
|
+
```python
|
|
244
|
+
class ChatApplication:
|
|
245
|
+
def __init__(self):
|
|
246
|
+
self.user_sessions = {}
|
|
247
|
+
|
|
248
|
+
async def handle_message(self, user_id: str, message: str):
|
|
249
|
+
# Each user gets their own session
|
|
250
|
+
if user_id not in self.user_sessions:
|
|
251
|
+
self.user_sessions[user_id] = SessionManager(infinite_context=True)
|
|
252
|
+
|
|
253
|
+
sm = self.user_sessions[user_id]
|
|
254
|
+
await sm.user_says(message)
|
|
255
|
+
|
|
256
|
+
# AI processes with user's personal context
|
|
257
|
+
ai_response = await self.generate_response(sm, message)
|
|
258
|
+
await sm.ai_responds(ai_response)
|
|
259
|
+
|
|
260
|
+
return ai_response
|
|
261
|
+
|
|
262
|
+
async def get_user_stats(self, user_id: str):
|
|
263
|
+
if user_id in self.user_sessions:
|
|
264
|
+
return await self.user_sessions[user_id].get_stats()
|
|
265
|
+
return None
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
## 📊 Monitoring Dashboard
|
|
269
|
+
|
|
270
|
+
```python
|
|
271
|
+
# Get comprehensive analytics across all sessions
|
|
272
|
+
from chuk_ai_session_manager import get_global_stats
|
|
273
|
+
|
|
274
|
+
stats = await get_global_stats()
|
|
275
|
+
print(f"""
|
|
276
|
+
🚀 AI Application Dashboard
|
|
277
|
+
==========================
|
|
278
|
+
Total Sessions: {stats['total_sessions']}
|
|
279
|
+
Total Messages: {stats['total_messages']}
|
|
280
|
+
Total Cost: ${stats['total_cost']:.2f}
|
|
281
|
+
Average Session Length: {stats['avg_session_length']:.1f} messages
|
|
282
|
+
Most Active Hour: {stats['peak_hour']}
|
|
283
|
+
Top Models Used: {', '.join(stats['top_models'])}
|
|
284
|
+
""")
|
|
285
|
+
```
|
|
286
|
+
|
|
287
|
+
## 🔧 Installation Options
|
|
288
|
+
|
|
289
|
+
```bash
|
|
290
|
+
# Basic installation
|
|
291
|
+
uv add chuk-ai-session-manager
|
|
292
|
+
|
|
293
|
+
# With Redis support (for production)
|
|
294
|
+
uv add chuk-ai-session-manager[redis]
|
|
295
|
+
|
|
296
|
+
# Full installation (all features)
|
|
297
|
+
uv add chuk-ai-session-manager[full]
|
|
298
|
+
|
|
299
|
+
# Or with pip
|
|
300
|
+
pip install chuk-ai-session-manager
|
|
301
|
+
```
|
|
302
|
+
|
|
303
|
+
## 🌟 What Makes CHUK Special?
|
|
304
|
+
|
|
305
|
+
| Feature | Other Libraries | CHUK AI Session Manager |
|
|
306
|
+
|---------|----------------|------------------------|
|
|
307
|
+
| **Setup Complexity** | Complex configuration | 3 lines of code |
|
|
308
|
+
| **Cost Tracking** | Manual calculation | Automatic across all providers |
|
|
309
|
+
| **Long Conversations** | Token limit errors | Infinite context with auto-segmentation |
|
|
310
|
+
| **Multi-Provider** | Provider-specific code | Works with any LLM |
|
|
311
|
+
| **Production Ready** | Requires additional work | Built for production |
|
|
312
|
+
| **Learning Curve** | Steep | 5 minutes to productivity |
|
|
313
|
+
|
|
314
|
+
## 📖 More Examples
|
|
315
|
+
|
|
316
|
+
Check out the `/examples` directory for complete working examples:
|
|
317
|
+
|
|
318
|
+
- `simple_tracking.py` - Basic conversation tracking
|
|
319
|
+
- `openai_integration.py` - OpenAI API integration
|
|
320
|
+
- `infinite_context.py` - Handling long conversations
|
|
321
|
+
- `cost_monitoring.py` - Cost tracking and analytics
|
|
322
|
+
- `multi_provider.py` - Using multiple LLM providers
|
|
323
|
+
- `production_app.py` - Production-ready application
|
|
324
|
+
|
|
325
|
+
## 🎯 Quick Decision Guide
|
|
326
|
+
|
|
327
|
+
**Choose CHUK AI Session Manager if you want:**
|
|
328
|
+
- ✅ Simple conversation tracking with zero configuration
|
|
329
|
+
- ✅ Automatic cost monitoring across all LLM providers
|
|
330
|
+
- ✅ Infinite conversation length without token limit errors
|
|
331
|
+
- ✅ Production-ready session management out of the box
|
|
332
|
+
- ✅ Complete conversation analytics and observability
|
|
333
|
+
- ✅ Framework-agnostic solution that works with any LLM library
|
|
334
|
+
|
|
335
|
+
## 🤝 Community & Support
|
|
336
|
+
|
|
337
|
+
- 📖 **Documentation**: [Full docs with tutorials](link-to-docs)
|
|
338
|
+
- 🐛 **Issues**: Report bugs on GitHub
|
|
339
|
+
- 💡 **Feature Requests**: Suggest new features
|
|
340
|
+
- 📧 **Support**: enterprise@chuk.dev for production support
|
|
341
|
+
|
|
342
|
+
## 📝 License
|
|
343
|
+
|
|
344
|
+
MIT License - build amazing AI applications with confidence!
|
|
345
|
+
|
|
346
|
+
---
|
|
347
|
+
|
|
348
|
+
**🎉 Ready to build better AI applications?**
|
|
349
|
+
|
|
350
|
+
```bash
|
|
351
|
+
uv add chuk-ai-session-manager
|
|
352
|
+
```
|
|
353
|
+
|
|
354
|
+
**Get started in 30 seconds with one line of code!**
|
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
chuk_ai_session_manager/__init__.py,sha256=
|
|
1
|
+
chuk_ai_session_manager/__init__.py,sha256=gWpNM9IKH9iOrzwum-4smLmkkSF78t0aPGa583u889M,2208
|
|
2
2
|
chuk_ai_session_manager/exceptions.py,sha256=WqrrUZuOAiUmz7tKnSnk0y222U_nV9a8LyaXLayn2fg,4420
|
|
3
3
|
chuk_ai_session_manager/infinite_conversation.py,sha256=7j3caMnsX27M5rjj4oOkqiy_2AfcupWwsAWRflnKiSo,12092
|
|
4
|
-
chuk_ai_session_manager/sample_tools.py,sha256=
|
|
4
|
+
chuk_ai_session_manager/sample_tools.py,sha256=U-jTGveTJ95uSnA4jB30fJQJG3K-TGxN9jcOY6qVHZQ,8179
|
|
5
5
|
chuk_ai_session_manager/session_aware_tool_processor.py,sha256=iVe3d-qfp5QGkdNrgfZeRYoOjd8nLZ0g6K7HW1thFE8,7274
|
|
6
|
-
chuk_ai_session_manager/session_prompt_builder.py,sha256
|
|
6
|
+
chuk_ai_session_manager/session_prompt_builder.py,sha256=Jeg_MWses_hFtHtDL7ZQl6EdSNVmVIIrLDrWEoPumfM,17613
|
|
7
7
|
chuk_ai_session_manager/session_storage.py,sha256=HqzYDtwx4zN5an1zJmSZc56BpyD3KjT3IWonIpmnVXQ,5790
|
|
8
8
|
chuk_ai_session_manager/api/__init__.py,sha256=Lo_BoDW2rSn0Zw-CbjahOxc6ykjjTpucxHZo5FA2Gnc,41
|
|
9
|
-
chuk_ai_session_manager/api/simple_api.py,sha256=
|
|
9
|
+
chuk_ai_session_manager/api/simple_api.py,sha256=RbHA2IAPUzIFZFvT6KpbgouAuonF-Q6GopKOeKej0rk,17795
|
|
10
10
|
chuk_ai_session_manager/models/__init__.py,sha256=H1rRuDQDRf821JPUWUn5Zgwvc5BAqcEGekkHEmX-IgE,1167
|
|
11
11
|
chuk_ai_session_manager/models/event_source.py,sha256=mn_D16sXMa6nAX-5BzssygJPz6VF24GRe-3IaH7bTnI,196
|
|
12
12
|
chuk_ai_session_manager/models/event_type.py,sha256=TPPvAz-PlXVtrwXDNVFVnhdt1yEfgDGmKDGt8ArYcTk,275
|
|
@@ -14,10 +14,8 @@ chuk_ai_session_manager/models/session.py,sha256=Txnmqd5SmiMz6acur_zL5MiFHJjKqU2
|
|
|
14
14
|
chuk_ai_session_manager/models/session_event.py,sha256=YPDbymduF42LLHtAv_k_kqlWF68vnth5J_HM4q-bOyI,5896
|
|
15
15
|
chuk_ai_session_manager/models/session_metadata.py,sha256=KFG7lc_E0BQTP2OD9Y529elVGJXppDUMqz8vVONW0rw,1510
|
|
16
16
|
chuk_ai_session_manager/models/session_run.py,sha256=uhMM4-WSrqOUsiWQPnyakInd-foZhxI-YnSHSWiZZwE,4369
|
|
17
|
-
chuk_ai_session_manager/models/token_usage.py,sha256=
|
|
18
|
-
chuk_ai_session_manager/
|
|
19
|
-
chuk_ai_session_manager/
|
|
20
|
-
chuk_ai_session_manager-0.
|
|
21
|
-
chuk_ai_session_manager-0.
|
|
22
|
-
chuk_ai_session_manager-0.2.1.dist-info/top_level.txt,sha256=5RinqD0v-niHuLYePUREX4gEWTlrpgtUg0RfexVRBMk,24
|
|
23
|
-
chuk_ai_session_manager-0.2.1.dist-info/RECORD,,
|
|
17
|
+
chuk_ai_session_manager/models/token_usage.py,sha256=M9Qwmeb2woILaSRwA2SIAiG-sIwC3cL_1H-y3NjW5Ik,11436
|
|
18
|
+
chuk_ai_session_manager-0.4.dist-info/METADATA,sha256=QjyLv5-42g82rYOJhYT6_tDomd0wD1L39xibv1OXi-4,11080
|
|
19
|
+
chuk_ai_session_manager-0.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
20
|
+
chuk_ai_session_manager-0.4.dist-info/top_level.txt,sha256=5RinqD0v-niHuLYePUREX4gEWTlrpgtUg0RfexVRBMk,24
|
|
21
|
+
chuk_ai_session_manager-0.4.dist-info/RECORD,,
|
|
File without changes
|