chuk-ai-session-manager 0.2.1__tar.gz → 0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. chuk_ai_session_manager-0.4/PKG-INFO +354 -0
  2. chuk_ai_session_manager-0.4/README.md +332 -0
  3. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/pyproject.toml +22 -5
  4. chuk_ai_session_manager-0.4/src/chuk_ai_session_manager/__init__.py +73 -0
  5. chuk_ai_session_manager-0.4/src/chuk_ai_session_manager/api/simple_api.py +507 -0
  6. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/token_usage.py +13 -2
  7. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/sample_tools.py +1 -1
  8. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/session_prompt_builder.py +70 -62
  9. chuk_ai_session_manager-0.4/src/chuk_ai_session_manager.egg-info/PKG-INFO +354 -0
  10. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager.egg-info/SOURCES.txt +7 -7
  11. chuk_ai_session_manager-0.4/tests/test_basic_functionality.py +341 -0
  12. chuk_ai_session_manager-0.4/tests/test_exceptions.py +436 -0
  13. chuk_ai_session_manager-0.4/tests/test_infinite_conversation.py +446 -0
  14. chuk_ai_session_manager-0.4/tests/test_models.py +457 -0
  15. chuk_ai_session_manager-0.4/tests/test_prompt_builder.py +668 -0
  16. chuk_ai_session_manager-0.4/tests/test_simple.py +569 -0
  17. chuk_ai_session_manager-0.4/tests/test_simple_api.py +862 -0
  18. chuk_ai_session_manager-0.4/tests/test_storage.py +445 -0
  19. chuk_ai_session_manager-0.4/tests/test_tools.py +548 -0
  20. chuk_ai_session_manager-0.2.1/PKG-INFO +0 -501
  21. chuk_ai_session_manager-0.2.1/README.md +0 -479
  22. chuk_ai_session_manager-0.2.1/src/chuk_ai_session_manager/__init__.py +0 -359
  23. chuk_ai_session_manager-0.2.1/src/chuk_ai_session_manager/api/simple_api.py +0 -376
  24. chuk_ai_session_manager-0.2.1/src/chuk_ai_session_manager/utils/__init__.py +0 -0
  25. chuk_ai_session_manager-0.2.1/src/chuk_ai_session_manager/utils/status_display_utils.py +0 -474
  26. chuk_ai_session_manager-0.2.1/src/chuk_ai_session_manager.egg-info/PKG-INFO +0 -501
  27. chuk_ai_session_manager-0.2.1/tests/test_basic_functionality.py +0 -126
  28. chuk_ai_session_manager-0.2.1/tests/test_chuk_session_storage.py +0 -249
  29. chuk_ai_session_manager-0.2.1/tests/test_infinite_conversation.py +0 -143
  30. chuk_ai_session_manager-0.2.1/tests/test_infinite_conversation_advanced.py +0 -308
  31. chuk_ai_session_manager-0.2.1/tests/test_session.py +0 -285
  32. chuk_ai_session_manager-0.2.1/tests/test_session_aware_tool_processor.py +0 -117
  33. chuk_ai_session_manager-0.2.1/tests/test_session_prompt_builder.py +0 -57
  34. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/setup.cfg +0 -0
  35. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/api/__init__.py +0 -0
  36. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/exceptions.py +0 -0
  37. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/infinite_conversation.py +0 -0
  38. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/__init__.py +0 -0
  39. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/event_source.py +0 -0
  40. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/event_type.py +0 -0
  41. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/session.py +0 -0
  42. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/session_event.py +0 -0
  43. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/session_metadata.py +0 -0
  44. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/models/session_run.py +0 -0
  45. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/session_aware_tool_processor.py +0 -0
  46. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager/session_storage.py +0 -0
  47. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager.egg-info/dependency_links.txt +0 -0
  48. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager.egg-info/requires.txt +0 -0
  49. {chuk_ai_session_manager-0.2.1 → chuk_ai_session_manager-0.4}/src/chuk_ai_session_manager.egg-info/top_level.txt +0 -0
@@ -0,0 +1,354 @@
1
+ Metadata-Version: 2.4
2
+ Name: chuk-ai-session-manager
3
+ Version: 0.4
4
+ Summary: Session manager for AI applications
5
+ Requires-Python: >=3.11
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: chuk-sessions>=0.3
8
+ Requires-Dist: chuk-tool-processor>=0.4.1
9
+ Requires-Dist: pydantic>=2.11.3
10
+ Provides-Extra: tiktoken
11
+ Requires-Dist: tiktoken>=0.9.0; extra == "tiktoken"
12
+ Provides-Extra: redis
13
+ Requires-Dist: redis>=4.0.0; extra == "redis"
14
+ Provides-Extra: dev
15
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
16
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
17
+ Requires-Dist: redis>=4.0.0; extra == "dev"
18
+ Requires-Dist: black>=23.0.0; extra == "dev"
19
+ Requires-Dist: isort>=5.12.0; extra == "dev"
20
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
21
+ Provides-Extra: full
22
+
23
+ # chuk-ai-session-manager
24
+
25
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
26
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
27
+
28
+ **The easiest way to add conversation tracking to any AI application.**
29
+
30
+ Track conversations, monitor costs, and manage infinite context with just 3 lines of code. Built for production, designed for simplicity.
31
+
32
+ ## 🚀 30-Second Start
33
+
34
+ ```bash
35
+ uv add chuk-ai-session-manager
36
+ ```
37
+
38
+ ```python
39
+ from chuk_ai_session_manager import track_conversation
40
+
41
+ # Track any AI conversation in one line
42
+ await track_conversation("Hello!", "Hi there! How can I help?")
43
+ ```
44
+
45
+ That's it! 🎉 Your conversation is now tracked with full observability.
46
+
47
+ ## ✨ Why Choose CHUK?
48
+
49
+ - **🔥 Stupidly Simple**: 3 lines to track any conversation
50
+ - **💰 Cost Smart**: Automatic token counting and cost tracking
51
+ - **♾️ Infinite Context**: No more "conversation too long" errors
52
+ - **🔧 Any LLM**: Works with OpenAI, Anthropic, local models, anything
53
+ - **📊 Full Observability**: See exactly what's happening in your AI app
54
+ - **🚀 Production Ready**: Used in real applications, not just demos
55
+
56
+ ## 🎯 Perfect For
57
+
58
+ - **Building chatbots** that remember conversations
59
+ - **Tracking LLM costs** across your entire application
60
+ - **Managing long conversations** without hitting token limits
61
+ - **Debugging AI applications** with complete audit trails
62
+ - **Production AI systems** that need reliable session management
63
+
64
+ ## 📱 Quick Examples
65
+
66
+ ### Track Any Conversation
67
+ ```python
68
+ from chuk_ai_session_manager import track_conversation
69
+
70
+ # Works with any LLM response
71
+ session_id = await track_conversation(
72
+ user_message="What's the weather like?",
73
+ ai_response="It's sunny and 75°F in your area.",
74
+ model="gpt-4",
75
+ provider="openai"
76
+ )
77
+ ```
78
+
79
+ ### Persistent Conversations
80
+ ```python
81
+ from chuk_ai_session_manager import SessionManager
82
+
83
+ # Create a conversation that remembers context
84
+ sm = SessionManager()
85
+
86
+ await sm.user_says("My name is Alice")
87
+ await sm.ai_responds("Nice to meet you, Alice!")
88
+
89
+ await sm.user_says("What's my name?")
90
+ await sm.ai_responds("Your name is Alice!")
91
+
92
+ # Get conversation stats
93
+ stats = await sm.get_stats()
94
+ print(f"Cost: ${stats['estimated_cost']:.6f}")
95
+ print(f"Tokens: {stats['total_tokens']}")
96
+ ```
97
+
98
+ ### Infinite Context (Never Run Out of Space)
99
+ ```python
100
+ # Automatically handles conversations of any length
101
+ sm = SessionManager(
102
+ infinite_context=True, # 🔥 Magic happens here
103
+ token_threshold=4000 # When to create new segment
104
+ )
105
+
106
+ # Keep chatting forever - context is preserved automatically
107
+ for i in range(100): # This would normally hit token limits
108
+ await sm.user_says(f"Question {i}: Tell me about AI")
109
+ await sm.ai_responds("AI is fascinating...")
110
+
111
+ # Still works! Automatic summarization keeps context alive
112
+ conversation = await sm.get_conversation()
113
+ print(f"Full conversation: {len(conversation)} exchanges")
114
+ ```
115
+
116
+ ### Cost Tracking (Know What You're Spending)
117
+ ```python
118
+ # Automatic cost monitoring across all interactions
119
+ sm = SessionManager()
120
+
121
+ await sm.user_says("Write a long story about dragons")
122
+ await sm.ai_responds("Once upon a time..." * 500) # Long response
123
+
124
+ stats = await sm.get_stats()
125
+ print(f"💰 That story cost: ${stats['estimated_cost']:.6f}")
126
+ print(f"📊 Used {stats['total_tokens']} tokens")
127
+ print(f"📈 {stats['user_messages']} user messages, {stats['ai_messages']} AI responses")
128
+ ```
129
+
130
+ ### Multi-Provider Support
131
+ ```python
132
+ # Works with any LLM provider
133
+ import openai
134
+ import anthropic
135
+
136
+ sm = SessionManager()
137
+
138
+ # OpenAI
139
+ await sm.user_says("Hello!")
140
+ openai_response = await openai.chat.completions.create(...)
141
+ await sm.ai_responds(openai_response.choices[0].message.content, model="gpt-4", provider="openai")
142
+
143
+ # Anthropic
144
+ await sm.user_says("How are you?")
145
+ anthropic_response = await anthropic.messages.create(...)
146
+ await sm.ai_responds(anthropic_response.content[0].text, model="claude-3", provider="anthropic")
147
+
148
+ # See costs across all providers
149
+ stats = await sm.get_stats()
150
+ print(f"Total cost across all providers: ${stats['estimated_cost']:.6f}")
151
+ ```
152
+
153
+ ## 🛠️ Advanced Features
154
+
155
+ ### Conversation Analytics
156
+ ```python
157
+ # Get detailed insights into your conversations
158
+ conversation = await sm.get_conversation()
159
+ stats = await sm.get_stats()
160
+
161
+ print(f"📊 Conversation Analytics:")
162
+ print(f" Messages: {stats['user_messages']} user, {stats['ai_messages']} AI")
163
+ print(f" Average response length: {stats['avg_response_length']}")
164
+ print(f" Most expensive response: ${stats['max_response_cost']:.6f}")
165
+ print(f" Session duration: {stats['duration_minutes']:.1f} minutes")
166
+ ```
167
+
168
+ ### Tool Integration
169
+ ```python
170
+ # Track tool usage alongside conversations
171
+ await sm.tool_used(
172
+ tool_name="web_search",
173
+ arguments={"query": "latest AI news"},
174
+ result={"articles": ["AI breakthrough...", "New model released..."]},
175
+ cost=0.001
176
+ )
177
+
178
+ stats = await sm.get_stats()
179
+ print(f"Tool calls: {stats['tool_calls']}")
180
+ ```
181
+
182
+ ### Session Export/Import
183
+ ```python
184
+ # Export conversations for analysis
185
+ conversation_data = await sm.export_conversation()
186
+ with open('conversation.json', 'w') as f:
187
+ json.dump(conversation_data, f)
188
+
189
+ # Import previous conversations
190
+ sm = SessionManager()
191
+ await sm.import_conversation('conversation.json')
192
+ ```
193
+
194
+ ## 🎨 Real-World Examples
195
+
196
+ ### Customer Support Bot
197
+ ```python
198
+ async def handle_support_ticket(user_message: str, ticket_id: str):
199
+ # Each ticket gets its own session
200
+ sm = SessionManager(session_id=ticket_id)
201
+
202
+ await sm.user_says(user_message)
203
+
204
+ # Your AI logic here
205
+ ai_response = await your_ai_model(user_message)
206
+ await sm.ai_responds(ai_response, model="gpt-4", provider="openai")
207
+
208
+ # Automatic cost tracking per ticket
209
+ stats = await sm.get_stats()
210
+ print(f"Ticket {ticket_id} cost: ${stats['estimated_cost']:.6f}")
211
+
212
+ return ai_response
213
+ ```
214
+
215
+ ### AI Assistant with Memory
216
+ ```python
217
+ async def ai_assistant():
218
+ sm = SessionManager(infinite_context=True)
219
+
220
+ while True:
221
+ user_input = input("You: ")
222
+ if user_input.lower() == 'quit':
223
+ break
224
+
225
+ await sm.user_says(user_input)
226
+
227
+ # Get conversation context for AI
228
+ conversation = await sm.get_conversation()
229
+ context = "\n".join([f"{turn['role']}: {turn['content']}" for turn in conversation[-5:]])
230
+
231
+ # Your AI call with context
232
+ ai_response = await your_ai_model(f"Context:\n{context}\n\nUser: {user_input}")
233
+ await sm.ai_responds(ai_response)
234
+
235
+ print(f"AI: {ai_response}")
236
+
237
+ # Show final stats
238
+ stats = await sm.get_stats()
239
+ print(f"\n💰 Total conversation cost: ${stats['estimated_cost']:.6f}")
240
+ ```
241
+
242
+ ### Multi-User Chat Application
243
+ ```python
244
+ class ChatApplication:
245
+ def __init__(self):
246
+ self.user_sessions = {}
247
+
248
+ async def handle_message(self, user_id: str, message: str):
249
+ # Each user gets their own session
250
+ if user_id not in self.user_sessions:
251
+ self.user_sessions[user_id] = SessionManager(infinite_context=True)
252
+
253
+ sm = self.user_sessions[user_id]
254
+ await sm.user_says(message)
255
+
256
+ # AI processes with user's personal context
257
+ ai_response = await self.generate_response(sm, message)
258
+ await sm.ai_responds(ai_response)
259
+
260
+ return ai_response
261
+
262
+ async def get_user_stats(self, user_id: str):
263
+ if user_id in self.user_sessions:
264
+ return await self.user_sessions[user_id].get_stats()
265
+ return None
266
+ ```
267
+
268
+ ## 📊 Monitoring Dashboard
269
+
270
+ ```python
271
+ # Get comprehensive analytics across all sessions
272
+ from chuk_ai_session_manager import get_global_stats
273
+
274
+ stats = await get_global_stats()
275
+ print(f"""
276
+ 🚀 AI Application Dashboard
277
+ ==========================
278
+ Total Sessions: {stats['total_sessions']}
279
+ Total Messages: {stats['total_messages']}
280
+ Total Cost: ${stats['total_cost']:.2f}
281
+ Average Session Length: {stats['avg_session_length']:.1f} messages
282
+ Most Active Hour: {stats['peak_hour']}
283
+ Top Models Used: {', '.join(stats['top_models'])}
284
+ """)
285
+ ```
286
+
287
+ ## 🔧 Installation Options
288
+
289
+ ```bash
290
+ # Basic installation
291
+ uv add chuk-ai-session-manager
292
+
293
+ # With Redis support (for production)
294
+ uv add chuk-ai-session-manager[redis]
295
+
296
+ # Full installation (all features)
297
+ uv add chuk-ai-session-manager[full]
298
+
299
+ # Or with pip
300
+ pip install chuk-ai-session-manager
301
+ ```
302
+
303
+ ## 🌟 What Makes CHUK Special?
304
+
305
+ | Feature | Other Libraries | CHUK AI Session Manager |
306
+ |---------|----------------|------------------------|
307
+ | **Setup Complexity** | Complex configuration | 3 lines of code |
308
+ | **Cost Tracking** | Manual calculation | Automatic across all providers |
309
+ | **Long Conversations** | Token limit errors | Infinite context with auto-segmentation |
310
+ | **Multi-Provider** | Provider-specific code | Works with any LLM |
311
+ | **Production Ready** | Requires additional work | Built for production |
312
+ | **Learning Curve** | Steep | 5 minutes to productivity |
313
+
314
+ ## 📖 More Examples
315
+
316
+ Check out the `/examples` directory for complete working examples:
317
+
318
+ - `simple_tracking.py` - Basic conversation tracking
319
+ - `openai_integration.py` - OpenAI API integration
320
+ - `infinite_context.py` - Handling long conversations
321
+ - `cost_monitoring.py` - Cost tracking and analytics
322
+ - `multi_provider.py` - Using multiple LLM providers
323
+ - `production_app.py` - Production-ready application
324
+
325
+ ## 🎯 Quick Decision Guide
326
+
327
+ **Choose CHUK AI Session Manager if you want:**
328
+ - ✅ Simple conversation tracking with zero configuration
329
+ - ✅ Automatic cost monitoring across all LLM providers
330
+ - ✅ Infinite conversation length without token limit errors
331
+ - ✅ Production-ready session management out of the box
332
+ - ✅ Complete conversation analytics and observability
333
+ - ✅ Framework-agnostic solution that works with any LLM library
334
+
335
+ ## 🤝 Community & Support
336
+
337
+ - 📖 **Documentation**: [Full docs with tutorials](link-to-docs)
338
+ - 🐛 **Issues**: Report bugs on GitHub
339
+ - 💡 **Feature Requests**: Suggest new features
340
+ - 📧 **Support**: enterprise@chuk.dev for production support
341
+
342
+ ## 📝 License
343
+
344
+ MIT License - build amazing AI applications with confidence!
345
+
346
+ ---
347
+
348
+ **🎉 Ready to build better AI applications?**
349
+
350
+ ```bash
351
+ uv add chuk-ai-session-manager
352
+ ```
353
+
354
+ **Get started in 30 seconds with one line of code!**
@@ -0,0 +1,332 @@
1
+ # chuk-ai-session-manager
2
+
3
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
5
+
6
+ **The easiest way to add conversation tracking to any AI application.**
7
+
8
+ Track conversations, monitor costs, and manage infinite context with just 3 lines of code. Built for production, designed for simplicity.
9
+
10
+ ## 🚀 30-Second Start
11
+
12
+ ```bash
13
+ uv add chuk-ai-session-manager
14
+ ```
15
+
16
+ ```python
17
+ from chuk_ai_session_manager import track_conversation
18
+
19
+ # Track any AI conversation in one line
20
+ await track_conversation("Hello!", "Hi there! How can I help?")
21
+ ```
22
+
23
+ That's it! 🎉 Your conversation is now tracked with full observability.
24
+
25
+ ## ✨ Why Choose CHUK?
26
+
27
+ - **🔥 Stupidly Simple**: 3 lines to track any conversation
28
+ - **💰 Cost Smart**: Automatic token counting and cost tracking
29
+ - **♾️ Infinite Context**: No more "conversation too long" errors
30
+ - **🔧 Any LLM**: Works with OpenAI, Anthropic, local models, anything
31
+ - **📊 Full Observability**: See exactly what's happening in your AI app
32
+ - **🚀 Production Ready**: Used in real applications, not just demos
33
+
34
+ ## 🎯 Perfect For
35
+
36
+ - **Building chatbots** that remember conversations
37
+ - **Tracking LLM costs** across your entire application
38
+ - **Managing long conversations** without hitting token limits
39
+ - **Debugging AI applications** with complete audit trails
40
+ - **Production AI systems** that need reliable session management
41
+
42
+ ## 📱 Quick Examples
43
+
44
+ ### Track Any Conversation
45
+ ```python
46
+ from chuk_ai_session_manager import track_conversation
47
+
48
+ # Works with any LLM response
49
+ session_id = await track_conversation(
50
+ user_message="What's the weather like?",
51
+ ai_response="It's sunny and 75°F in your area.",
52
+ model="gpt-4",
53
+ provider="openai"
54
+ )
55
+ ```
56
+
57
+ ### Persistent Conversations
58
+ ```python
59
+ from chuk_ai_session_manager import SessionManager
60
+
61
+ # Create a conversation that remembers context
62
+ sm = SessionManager()
63
+
64
+ await sm.user_says("My name is Alice")
65
+ await sm.ai_responds("Nice to meet you, Alice!")
66
+
67
+ await sm.user_says("What's my name?")
68
+ await sm.ai_responds("Your name is Alice!")
69
+
70
+ # Get conversation stats
71
+ stats = await sm.get_stats()
72
+ print(f"Cost: ${stats['estimated_cost']:.6f}")
73
+ print(f"Tokens: {stats['total_tokens']}")
74
+ ```
75
+
76
+ ### Infinite Context (Never Run Out of Space)
77
+ ```python
78
+ # Automatically handles conversations of any length
79
+ sm = SessionManager(
80
+ infinite_context=True, # 🔥 Magic happens here
81
+ token_threshold=4000 # When to create new segment
82
+ )
83
+
84
+ # Keep chatting forever - context is preserved automatically
85
+ for i in range(100): # This would normally hit token limits
86
+ await sm.user_says(f"Question {i}: Tell me about AI")
87
+ await sm.ai_responds("AI is fascinating...")
88
+
89
+ # Still works! Automatic summarization keeps context alive
90
+ conversation = await sm.get_conversation()
91
+ print(f"Full conversation: {len(conversation)} exchanges")
92
+ ```
93
+
94
+ ### Cost Tracking (Know What You're Spending)
95
+ ```python
96
+ # Automatic cost monitoring across all interactions
97
+ sm = SessionManager()
98
+
99
+ await sm.user_says("Write a long story about dragons")
100
+ await sm.ai_responds("Once upon a time..." * 500) # Long response
101
+
102
+ stats = await sm.get_stats()
103
+ print(f"💰 That story cost: ${stats['estimated_cost']:.6f}")
104
+ print(f"📊 Used {stats['total_tokens']} tokens")
105
+ print(f"📈 {stats['user_messages']} user messages, {stats['ai_messages']} AI responses")
106
+ ```
107
+
108
+ ### Multi-Provider Support
109
+ ```python
110
+ # Works with any LLM provider
111
+ import openai
112
+ import anthropic
113
+
114
+ sm = SessionManager()
115
+
116
+ # OpenAI
117
+ await sm.user_says("Hello!")
118
+ openai_response = await openai.chat.completions.create(...)
119
+ await sm.ai_responds(openai_response.choices[0].message.content, model="gpt-4", provider="openai")
120
+
121
+ # Anthropic
122
+ await sm.user_says("How are you?")
123
+ anthropic_response = await anthropic.messages.create(...)
124
+ await sm.ai_responds(anthropic_response.content[0].text, model="claude-3", provider="anthropic")
125
+
126
+ # See costs across all providers
127
+ stats = await sm.get_stats()
128
+ print(f"Total cost across all providers: ${stats['estimated_cost']:.6f}")
129
+ ```
130
+
131
+ ## 🛠️ Advanced Features
132
+
133
+ ### Conversation Analytics
134
+ ```python
135
+ # Get detailed insights into your conversations
136
+ conversation = await sm.get_conversation()
137
+ stats = await sm.get_stats()
138
+
139
+ print(f"📊 Conversation Analytics:")
140
+ print(f" Messages: {stats['user_messages']} user, {stats['ai_messages']} AI")
141
+ print(f" Average response length: {stats['avg_response_length']}")
142
+ print(f" Most expensive response: ${stats['max_response_cost']:.6f}")
143
+ print(f" Session duration: {stats['duration_minutes']:.1f} minutes")
144
+ ```
145
+
146
+ ### Tool Integration
147
+ ```python
148
+ # Track tool usage alongside conversations
149
+ await sm.tool_used(
150
+ tool_name="web_search",
151
+ arguments={"query": "latest AI news"},
152
+ result={"articles": ["AI breakthrough...", "New model released..."]},
153
+ cost=0.001
154
+ )
155
+
156
+ stats = await sm.get_stats()
157
+ print(f"Tool calls: {stats['tool_calls']}")
158
+ ```
159
+
160
+ ### Session Export/Import
161
+ ```python
162
+ # Export conversations for analysis
163
+ conversation_data = await sm.export_conversation()
164
+ with open('conversation.json', 'w') as f:
165
+ json.dump(conversation_data, f)
166
+
167
+ # Import previous conversations
168
+ sm = SessionManager()
169
+ await sm.import_conversation('conversation.json')
170
+ ```
171
+
172
+ ## 🎨 Real-World Examples
173
+
174
+ ### Customer Support Bot
175
+ ```python
176
+ async def handle_support_ticket(user_message: str, ticket_id: str):
177
+ # Each ticket gets its own session
178
+ sm = SessionManager(session_id=ticket_id)
179
+
180
+ await sm.user_says(user_message)
181
+
182
+ # Your AI logic here
183
+ ai_response = await your_ai_model(user_message)
184
+ await sm.ai_responds(ai_response, model="gpt-4", provider="openai")
185
+
186
+ # Automatic cost tracking per ticket
187
+ stats = await sm.get_stats()
188
+ print(f"Ticket {ticket_id} cost: ${stats['estimated_cost']:.6f}")
189
+
190
+ return ai_response
191
+ ```
192
+
193
+ ### AI Assistant with Memory
194
+ ```python
195
+ async def ai_assistant():
196
+ sm = SessionManager(infinite_context=True)
197
+
198
+ while True:
199
+ user_input = input("You: ")
200
+ if user_input.lower() == 'quit':
201
+ break
202
+
203
+ await sm.user_says(user_input)
204
+
205
+ # Get conversation context for AI
206
+ conversation = await sm.get_conversation()
207
+ context = "\n".join([f"{turn['role']}: {turn['content']}" for turn in conversation[-5:]])
208
+
209
+ # Your AI call with context
210
+ ai_response = await your_ai_model(f"Context:\n{context}\n\nUser: {user_input}")
211
+ await sm.ai_responds(ai_response)
212
+
213
+ print(f"AI: {ai_response}")
214
+
215
+ # Show final stats
216
+ stats = await sm.get_stats()
217
+ print(f"\n💰 Total conversation cost: ${stats['estimated_cost']:.6f}")
218
+ ```
219
+
220
+ ### Multi-User Chat Application
221
+ ```python
222
+ class ChatApplication:
223
+ def __init__(self):
224
+ self.user_sessions = {}
225
+
226
+ async def handle_message(self, user_id: str, message: str):
227
+ # Each user gets their own session
228
+ if user_id not in self.user_sessions:
229
+ self.user_sessions[user_id] = SessionManager(infinite_context=True)
230
+
231
+ sm = self.user_sessions[user_id]
232
+ await sm.user_says(message)
233
+
234
+ # AI processes with user's personal context
235
+ ai_response = await self.generate_response(sm, message)
236
+ await sm.ai_responds(ai_response)
237
+
238
+ return ai_response
239
+
240
+ async def get_user_stats(self, user_id: str):
241
+ if user_id in self.user_sessions:
242
+ return await self.user_sessions[user_id].get_stats()
243
+ return None
244
+ ```
245
+
246
+ ## 📊 Monitoring Dashboard
247
+
248
+ ```python
249
+ # Get comprehensive analytics across all sessions
250
+ from chuk_ai_session_manager import get_global_stats
251
+
252
+ stats = await get_global_stats()
253
+ print(f"""
254
+ 🚀 AI Application Dashboard
255
+ ==========================
256
+ Total Sessions: {stats['total_sessions']}
257
+ Total Messages: {stats['total_messages']}
258
+ Total Cost: ${stats['total_cost']:.2f}
259
+ Average Session Length: {stats['avg_session_length']:.1f} messages
260
+ Most Active Hour: {stats['peak_hour']}
261
+ Top Models Used: {', '.join(stats['top_models'])}
262
+ """)
263
+ ```
264
+
265
+ ## 🔧 Installation Options
266
+
267
+ ```bash
268
+ # Basic installation
269
+ uv add chuk-ai-session-manager
270
+
271
+ # With Redis support (for production)
272
+ uv add chuk-ai-session-manager[redis]
273
+
274
+ # Full installation (all features)
275
+ uv add chuk-ai-session-manager[full]
276
+
277
+ # Or with pip
278
+ pip install chuk-ai-session-manager
279
+ ```
280
+
281
+ ## 🌟 What Makes CHUK Special?
282
+
283
+ | Feature | Other Libraries | CHUK AI Session Manager |
284
+ |---------|----------------|------------------------|
285
+ | **Setup Complexity** | Complex configuration | 3 lines of code |
286
+ | **Cost Tracking** | Manual calculation | Automatic across all providers |
287
+ | **Long Conversations** | Token limit errors | Infinite context with auto-segmentation |
288
+ | **Multi-Provider** | Provider-specific code | Works with any LLM |
289
+ | **Production Ready** | Requires additional work | Built for production |
290
+ | **Learning Curve** | Steep | 5 minutes to productivity |
291
+
292
+ ## 📖 More Examples
293
+
294
+ Check out the `/examples` directory for complete working examples:
295
+
296
+ - `simple_tracking.py` - Basic conversation tracking
297
+ - `openai_integration.py` - OpenAI API integration
298
+ - `infinite_context.py` - Handling long conversations
299
+ - `cost_monitoring.py` - Cost tracking and analytics
300
+ - `multi_provider.py` - Using multiple LLM providers
301
+ - `production_app.py` - Production-ready application
302
+
303
+ ## 🎯 Quick Decision Guide
304
+
305
+ **Choose CHUK AI Session Manager if you want:**
306
+ - ✅ Simple conversation tracking with zero configuration
307
+ - ✅ Automatic cost monitoring across all LLM providers
308
+ - ✅ Infinite conversation length without token limit errors
309
+ - ✅ Production-ready session management out of the box
310
+ - ✅ Complete conversation analytics and observability
311
+ - ✅ Framework-agnostic solution that works with any LLM library
312
+
313
+ ## 🤝 Community & Support
314
+
315
+ - 📖 **Documentation**: [Full docs with tutorials](link-to-docs)
316
+ - 🐛 **Issues**: Report bugs on GitHub
317
+ - 💡 **Feature Requests**: Suggest new features
318
+ - 📧 **Support**: enterprise@chuk.dev for production support
319
+
320
+ ## 📝 License
321
+
322
+ MIT License - build amazing AI applications with confidence!
323
+
324
+ ---
325
+
326
+ **🎉 Ready to build better AI applications?**
327
+
328
+ ```bash
329
+ uv add chuk-ai-session-manager
330
+ ```
331
+
332
+ **Get started in 30 seconds with one line of code!**