chuk-ai-session-manager 0.7.1__py3-none-any.whl → 0.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +84 -40
- chuk_ai_session_manager/api/__init__.py +1 -1
- chuk_ai_session_manager/api/simple_api.py +53 -59
- chuk_ai_session_manager/exceptions.py +31 -17
- chuk_ai_session_manager/guards/__init__.py +118 -0
- chuk_ai_session_manager/guards/bindings.py +217 -0
- chuk_ai_session_manager/guards/cache.py +163 -0
- chuk_ai_session_manager/guards/manager.py +819 -0
- chuk_ai_session_manager/guards/models.py +498 -0
- chuk_ai_session_manager/guards/ungrounded.py +159 -0
- chuk_ai_session_manager/infinite_conversation.py +86 -79
- chuk_ai_session_manager/memory/__init__.py +247 -0
- chuk_ai_session_manager/memory/artifacts_bridge.py +469 -0
- chuk_ai_session_manager/memory/context_packer.py +347 -0
- chuk_ai_session_manager/memory/fault_handler.py +507 -0
- chuk_ai_session_manager/memory/manifest.py +307 -0
- chuk_ai_session_manager/memory/models.py +1084 -0
- chuk_ai_session_manager/memory/mutation_log.py +186 -0
- chuk_ai_session_manager/memory/pack_cache.py +206 -0
- chuk_ai_session_manager/memory/page_table.py +275 -0
- chuk_ai_session_manager/memory/prefetcher.py +192 -0
- chuk_ai_session_manager/memory/tlb.py +247 -0
- chuk_ai_session_manager/memory/vm_prompts.py +238 -0
- chuk_ai_session_manager/memory/working_set.py +574 -0
- chuk_ai_session_manager/models/__init__.py +21 -9
- chuk_ai_session_manager/models/event_source.py +3 -1
- chuk_ai_session_manager/models/event_type.py +10 -1
- chuk_ai_session_manager/models/session.py +103 -68
- chuk_ai_session_manager/models/session_event.py +69 -68
- chuk_ai_session_manager/models/session_metadata.py +9 -10
- chuk_ai_session_manager/models/session_run.py +21 -22
- chuk_ai_session_manager/models/token_usage.py +76 -76
- chuk_ai_session_manager/procedural_memory/__init__.py +70 -0
- chuk_ai_session_manager/procedural_memory/formatter.py +407 -0
- chuk_ai_session_manager/procedural_memory/manager.py +523 -0
- chuk_ai_session_manager/procedural_memory/models.py +371 -0
- chuk_ai_session_manager/sample_tools.py +79 -46
- chuk_ai_session_manager/session_aware_tool_processor.py +27 -16
- chuk_ai_session_manager/session_manager.py +259 -232
- chuk_ai_session_manager/session_prompt_builder.py +163 -111
- chuk_ai_session_manager/session_storage.py +45 -52
- {chuk_ai_session_manager-0.7.1.dist-info → chuk_ai_session_manager-0.8.1.dist-info}/METADATA +80 -4
- chuk_ai_session_manager-0.8.1.dist-info/RECORD +45 -0
- {chuk_ai_session_manager-0.7.1.dist-info → chuk_ai_session_manager-0.8.1.dist-info}/WHEEL +1 -1
- chuk_ai_session_manager-0.7.1.dist-info/RECORD +0 -22
- {chuk_ai_session_manager-0.7.1.dist-info → chuk_ai_session_manager-0.8.1.dist-info}/top_level.txt +0 -0
|
@@ -4,7 +4,7 @@ CHUK AI Session Manager - Simple Developer API
|
|
|
4
4
|
|
|
5
5
|
A powerful session management system for AI applications that provides:
|
|
6
6
|
- Automatic conversation tracking
|
|
7
|
-
- Token usage monitoring
|
|
7
|
+
- Token usage monitoring
|
|
8
8
|
- Tool call logging
|
|
9
9
|
- Infinite context support with automatic summarization
|
|
10
10
|
- Hierarchical session relationships
|
|
@@ -32,12 +32,12 @@ Infinite Context Example:
|
|
|
32
32
|
Storage Configuration:
|
|
33
33
|
# Default: Memory storage (no Redis required)
|
|
34
34
|
pip install chuk-ai-session-manager
|
|
35
|
-
|
|
35
|
+
|
|
36
36
|
# Redis: For production persistence
|
|
37
37
|
pip install chuk-ai-session-manager[redis]
|
|
38
38
|
export SESSION_PROVIDER=redis
|
|
39
39
|
export SESSION_REDIS_URL=redis://localhost:6379/0
|
|
40
|
-
|
|
40
|
+
|
|
41
41
|
# Environment variables:
|
|
42
42
|
SESSION_PROVIDER=memory (default - fast, no persistence)
|
|
43
43
|
SESSION_PROVIDER=redis (persistent - requires [redis] extra)
|
|
@@ -46,7 +46,7 @@ Storage Configuration:
|
|
|
46
46
|
import logging
|
|
47
47
|
|
|
48
48
|
# Package version
|
|
49
|
-
__version__ = "0.
|
|
49
|
+
__version__ = "0.8"
|
|
50
50
|
|
|
51
51
|
# Set up package-level logger
|
|
52
52
|
logger = logging.getLogger(__name__)
|
|
@@ -63,7 +63,7 @@ from chuk_ai_session_manager.exceptions import (
|
|
|
63
63
|
InvalidSessionOperation,
|
|
64
64
|
TokenLimitExceeded,
|
|
65
65
|
StorageError,
|
|
66
|
-
ToolProcessingError
|
|
66
|
+
ToolProcessingError,
|
|
67
67
|
)
|
|
68
68
|
|
|
69
69
|
# Core models
|
|
@@ -87,36 +87,66 @@ from chuk_ai_session_manager.api.simple_api import (
|
|
|
87
87
|
track_infinite_conversation,
|
|
88
88
|
track_tool_use,
|
|
89
89
|
get_session_stats,
|
|
90
|
-
get_conversation_history
|
|
90
|
+
get_conversation_history,
|
|
91
91
|
)
|
|
92
92
|
|
|
93
93
|
# Advanced components
|
|
94
94
|
from chuk_ai_session_manager.infinite_conversation import (
|
|
95
95
|
InfiniteConversationManager,
|
|
96
|
-
SummarizationStrategy
|
|
96
|
+
SummarizationStrategy,
|
|
97
97
|
)
|
|
98
98
|
|
|
99
|
-
from chuk_ai_session_manager.session_aware_tool_processor import
|
|
99
|
+
from chuk_ai_session_manager.session_aware_tool_processor import (
|
|
100
|
+
SessionAwareToolProcessor,
|
|
101
|
+
)
|
|
100
102
|
|
|
101
103
|
from chuk_ai_session_manager.session_prompt_builder import (
|
|
102
104
|
build_prompt_from_session,
|
|
103
105
|
PromptStrategy,
|
|
104
|
-
truncate_prompt_to_token_limit
|
|
106
|
+
truncate_prompt_to_token_limit,
|
|
105
107
|
)
|
|
106
108
|
|
|
109
|
+
# Procedural memory
|
|
110
|
+
from chuk_ai_session_manager.procedural_memory import (
|
|
111
|
+
ToolMemoryManager,
|
|
112
|
+
ToolOutcome,
|
|
113
|
+
ToolLogEntry,
|
|
114
|
+
ToolPattern,
|
|
115
|
+
ProceduralMemory,
|
|
116
|
+
ProceduralContextFormatter,
|
|
117
|
+
FormatterConfig,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Guards and state management
|
|
121
|
+
from chuk_ai_session_manager.guards import (
|
|
122
|
+
ToolStateManager,
|
|
123
|
+
get_tool_state,
|
|
124
|
+
reset_tool_state,
|
|
125
|
+
BindingManager,
|
|
126
|
+
ResultCache,
|
|
127
|
+
UngroundedGuard,
|
|
128
|
+
UngroundedGuardConfig,
|
|
129
|
+
RuntimeLimits,
|
|
130
|
+
RuntimeMode,
|
|
131
|
+
ValueBinding,
|
|
132
|
+
ToolClassification,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
107
136
|
# Configuration functions
|
|
108
|
-
def configure_storage(
|
|
109
|
-
|
|
137
|
+
def configure_storage(
|
|
138
|
+
sandbox_id: str = "chuk-ai-session-manager", default_ttl_hours: int = 24
|
|
139
|
+
) -> bool:
|
|
110
140
|
"""
|
|
111
141
|
Configure the storage backend.
|
|
112
|
-
|
|
142
|
+
|
|
113
143
|
Args:
|
|
114
144
|
sandbox_id: CHUK Sessions sandbox ID to use
|
|
115
145
|
default_ttl_hours: Default TTL for sessions
|
|
116
|
-
|
|
146
|
+
|
|
117
147
|
Returns:
|
|
118
148
|
True if configuration was successful, False otherwise
|
|
119
|
-
|
|
149
|
+
|
|
120
150
|
Note:
|
|
121
151
|
Storage provider is controlled by SESSION_PROVIDER environment variable:
|
|
122
152
|
- memory (default): Fast, no persistence, no extra dependencies
|
|
@@ -124,8 +154,7 @@ def configure_storage(sandbox_id: str = "chuk-ai-session-manager",
|
|
|
124
154
|
"""
|
|
125
155
|
try:
|
|
126
156
|
setup_chuk_sessions_storage(
|
|
127
|
-
sandbox_id=sandbox_id,
|
|
128
|
-
default_ttl_hours=default_ttl_hours
|
|
157
|
+
sandbox_id=sandbox_id, default_ttl_hours=default_ttl_hours
|
|
129
158
|
)
|
|
130
159
|
logger.info(f"Storage configured with sandbox_id='{sandbox_id}'")
|
|
131
160
|
return True
|
|
@@ -142,26 +171,28 @@ def get_version() -> str:
|
|
|
142
171
|
def is_available() -> dict:
|
|
143
172
|
"""
|
|
144
173
|
Check which components are available.
|
|
145
|
-
|
|
174
|
+
|
|
146
175
|
Returns:
|
|
147
176
|
Dictionary showing availability of each component
|
|
148
177
|
"""
|
|
149
178
|
# Check if Redis is available
|
|
150
179
|
redis_available = False
|
|
151
180
|
try:
|
|
152
|
-
import redis
|
|
181
|
+
import redis # noqa: F401
|
|
182
|
+
|
|
153
183
|
redis_available = True
|
|
154
184
|
except ImportError:
|
|
155
185
|
pass
|
|
156
|
-
|
|
186
|
+
|
|
157
187
|
# Check if tiktoken is available for enhanced token counting
|
|
158
188
|
tiktoken_available = False
|
|
159
189
|
try:
|
|
160
|
-
import tiktoken
|
|
190
|
+
import tiktoken # noqa: F401
|
|
191
|
+
|
|
161
192
|
tiktoken_available = True
|
|
162
193
|
except ImportError:
|
|
163
194
|
pass
|
|
164
|
-
|
|
195
|
+
|
|
165
196
|
return {
|
|
166
197
|
"core_enums": True,
|
|
167
198
|
"core_models": True,
|
|
@@ -181,53 +212,47 @@ def is_available() -> dict:
|
|
|
181
212
|
def get_storage_info() -> dict:
|
|
182
213
|
"""
|
|
183
214
|
Get information about the current storage configuration.
|
|
184
|
-
|
|
215
|
+
|
|
185
216
|
Returns:
|
|
186
217
|
Dictionary with storage configuration details
|
|
187
218
|
"""
|
|
188
219
|
import os
|
|
189
220
|
from chuk_ai_session_manager.session_storage import get_backend
|
|
190
|
-
|
|
221
|
+
|
|
191
222
|
try:
|
|
192
223
|
backend = get_backend()
|
|
193
224
|
stats = backend.get_stats()
|
|
194
|
-
|
|
225
|
+
|
|
195
226
|
return {
|
|
196
227
|
"provider": os.getenv("SESSION_PROVIDER", "memory"),
|
|
197
228
|
"backend": stats.get("backend", "unknown"),
|
|
198
229
|
"sandbox_id": stats.get("sandbox_id", "unknown"),
|
|
199
230
|
"redis_url": os.getenv("SESSION_REDIS_URL", "not_set"),
|
|
200
|
-
"stats": stats
|
|
231
|
+
"stats": stats,
|
|
201
232
|
}
|
|
202
233
|
except Exception as e:
|
|
203
|
-
return {
|
|
204
|
-
"provider": os.getenv("SESSION_PROVIDER", "memory"),
|
|
205
|
-
"error": str(e)
|
|
206
|
-
}
|
|
234
|
+
return {"provider": os.getenv("SESSION_PROVIDER", "memory"), "error": str(e)}
|
|
207
235
|
|
|
208
236
|
|
|
209
237
|
# Main exports - everything should be available
|
|
210
238
|
__all__ = [
|
|
211
239
|
# Version and utilities
|
|
212
240
|
"__version__",
|
|
213
|
-
"get_version",
|
|
241
|
+
"get_version",
|
|
214
242
|
"is_available",
|
|
215
243
|
"configure_storage",
|
|
216
244
|
"get_storage_info",
|
|
217
|
-
|
|
218
245
|
# Core enums
|
|
219
246
|
"EventSource",
|
|
220
247
|
"EventType",
|
|
221
|
-
|
|
222
248
|
# Exception classes
|
|
223
249
|
"SessionManagerError",
|
|
224
|
-
"SessionNotFound",
|
|
250
|
+
"SessionNotFound",
|
|
225
251
|
"SessionAlreadyExists",
|
|
226
252
|
"InvalidSessionOperation",
|
|
227
253
|
"TokenLimitExceeded",
|
|
228
254
|
"StorageError",
|
|
229
255
|
"ToolProcessingError",
|
|
230
|
-
|
|
231
256
|
# Core models
|
|
232
257
|
"Session",
|
|
233
258
|
"SessionEvent",
|
|
@@ -236,20 +261,17 @@ __all__ = [
|
|
|
236
261
|
"RunStatus",
|
|
237
262
|
"TokenUsage",
|
|
238
263
|
"TokenSummary",
|
|
239
|
-
|
|
240
264
|
# Storage
|
|
241
265
|
"setup_chuk_sessions_storage",
|
|
242
|
-
|
|
243
266
|
# Primary interfaces - what most users will use
|
|
244
267
|
"SessionManager",
|
|
245
|
-
"track_conversation",
|
|
268
|
+
"track_conversation",
|
|
246
269
|
"track_llm_call",
|
|
247
270
|
"quick_conversation",
|
|
248
271
|
"track_infinite_conversation",
|
|
249
272
|
"track_tool_use",
|
|
250
273
|
"get_session_stats",
|
|
251
274
|
"get_conversation_history",
|
|
252
|
-
|
|
253
275
|
# Advanced components
|
|
254
276
|
"InfiniteConversationManager",
|
|
255
277
|
"SummarizationStrategy",
|
|
@@ -257,6 +279,26 @@ __all__ = [
|
|
|
257
279
|
"build_prompt_from_session",
|
|
258
280
|
"PromptStrategy",
|
|
259
281
|
"truncate_prompt_to_token_limit",
|
|
282
|
+
# Procedural memory
|
|
283
|
+
"ToolMemoryManager",
|
|
284
|
+
"ToolOutcome",
|
|
285
|
+
"ToolLogEntry",
|
|
286
|
+
"ToolPattern",
|
|
287
|
+
"ProceduralMemory",
|
|
288
|
+
"ProceduralContextFormatter",
|
|
289
|
+
"FormatterConfig",
|
|
290
|
+
# Guards and state management
|
|
291
|
+
"ToolStateManager",
|
|
292
|
+
"get_tool_state",
|
|
293
|
+
"reset_tool_state",
|
|
294
|
+
"BindingManager",
|
|
295
|
+
"ResultCache",
|
|
296
|
+
"UngroundedGuard",
|
|
297
|
+
"UngroundedGuardConfig",
|
|
298
|
+
"RuntimeLimits",
|
|
299
|
+
"RuntimeMode",
|
|
300
|
+
"ValueBinding",
|
|
301
|
+
"ToolClassification",
|
|
260
302
|
]
|
|
261
303
|
|
|
262
304
|
# Auto-setup storage on import
|
|
@@ -270,6 +312,8 @@ except Exception as e:
|
|
|
270
312
|
try:
|
|
271
313
|
storage_info = get_storage_info()
|
|
272
314
|
provider = storage_info.get("provider", "unknown")
|
|
273
|
-
logger.debug(
|
|
315
|
+
logger.debug(
|
|
316
|
+
f"CHUK AI Session Manager v{__version__} imported successfully (storage: {provider})"
|
|
317
|
+
)
|
|
274
318
|
except Exception:
|
|
275
|
-
logger.debug(f"CHUK AI Session Manager v{__version__} imported successfully")
|
|
319
|
+
logger.debug(f"CHUK AI Session Manager v{__version__} imported successfully")
|
|
@@ -1 +1 @@
|
|
|
1
|
-
# chuk_ai_session_manager/api/__init__.py
|
|
1
|
+
# chuk_ai_session_manager/api/__init__.py
|
|
@@ -7,10 +7,10 @@ building on top of the SessionManager class.
|
|
|
7
7
|
|
|
8
8
|
Usage:
|
|
9
9
|
from chuk_ai_session_manager import track_conversation
|
|
10
|
-
|
|
10
|
+
|
|
11
11
|
# Quick tracking
|
|
12
12
|
await track_conversation("Hello!", "Hi there!")
|
|
13
|
-
|
|
13
|
+
|
|
14
14
|
# Track with model info
|
|
15
15
|
await track_conversation(
|
|
16
16
|
"What's the weather?",
|
|
@@ -18,7 +18,7 @@ Usage:
|
|
|
18
18
|
model="gpt-4",
|
|
19
19
|
provider="openai"
|
|
20
20
|
)
|
|
21
|
-
|
|
21
|
+
|
|
22
22
|
# Infinite context
|
|
23
23
|
await track_infinite_conversation(
|
|
24
24
|
"Tell me a long story",
|
|
@@ -43,14 +43,14 @@ async def track_conversation(
|
|
|
43
43
|
provider: str = "unknown",
|
|
44
44
|
session_id: Optional[str] = None,
|
|
45
45
|
infinite_context: bool = False,
|
|
46
|
-
token_threshold: int = 4000
|
|
46
|
+
token_threshold: int = 4000,
|
|
47
47
|
) -> str:
|
|
48
48
|
"""
|
|
49
49
|
Quick way to track a single conversation turn.
|
|
50
|
-
|
|
50
|
+
|
|
51
51
|
This is the simplest way to track a conversation exchange between
|
|
52
52
|
a user and an AI assistant.
|
|
53
|
-
|
|
53
|
+
|
|
54
54
|
Args:
|
|
55
55
|
user_message: What the user said.
|
|
56
56
|
ai_response: What the AI responded.
|
|
@@ -59,10 +59,10 @@ async def track_conversation(
|
|
|
59
59
|
session_id: Optional existing session ID to continue.
|
|
60
60
|
infinite_context: Enable infinite context support.
|
|
61
61
|
token_threshold: Token limit for infinite context segmentation.
|
|
62
|
-
|
|
62
|
+
|
|
63
63
|
Returns:
|
|
64
64
|
The session ID (useful for continuing the conversation).
|
|
65
|
-
|
|
65
|
+
|
|
66
66
|
Example:
|
|
67
67
|
```python
|
|
68
68
|
session_id = await track_conversation(
|
|
@@ -76,7 +76,7 @@ async def track_conversation(
|
|
|
76
76
|
sm = SessionManager(
|
|
77
77
|
session_id=session_id,
|
|
78
78
|
infinite_context=infinite_context,
|
|
79
|
-
token_threshold=token_threshold
|
|
79
|
+
token_threshold=token_threshold,
|
|
80
80
|
)
|
|
81
81
|
await sm.user_says(user_message)
|
|
82
82
|
session_id = await sm.ai_responds(ai_response, model=model, provider=provider)
|
|
@@ -91,14 +91,14 @@ async def track_llm_call(
|
|
|
91
91
|
session_manager: Optional[SessionManager] = None,
|
|
92
92
|
session_id: Optional[str] = None,
|
|
93
93
|
infinite_context: bool = False,
|
|
94
|
-
token_threshold: int = 4000
|
|
94
|
+
token_threshold: int = 4000,
|
|
95
95
|
) -> tuple[str, str]:
|
|
96
96
|
"""
|
|
97
97
|
Track an LLM call automatically.
|
|
98
|
-
|
|
98
|
+
|
|
99
99
|
This function wraps your LLM call and automatically tracks both the
|
|
100
100
|
input and output in a session.
|
|
101
|
-
|
|
101
|
+
|
|
102
102
|
Args:
|
|
103
103
|
user_input: The user's input to the LLM.
|
|
104
104
|
llm_function: Function that calls the LLM (sync or async).
|
|
@@ -108,16 +108,16 @@ async def track_llm_call(
|
|
|
108
108
|
session_id: Optional session ID if not using session_manager.
|
|
109
109
|
infinite_context: Enable infinite context support.
|
|
110
110
|
token_threshold: Token limit for infinite context.
|
|
111
|
-
|
|
111
|
+
|
|
112
112
|
Returns:
|
|
113
113
|
Tuple of (response_text, session_id).
|
|
114
|
-
|
|
114
|
+
|
|
115
115
|
Example:
|
|
116
116
|
```python
|
|
117
117
|
async def call_openai(prompt):
|
|
118
118
|
# Your OpenAI call here
|
|
119
119
|
return response.choices[0].message.content
|
|
120
|
-
|
|
120
|
+
|
|
121
121
|
response, session_id = await track_llm_call(
|
|
122
122
|
"Explain quantum computing",
|
|
123
123
|
call_openai,
|
|
@@ -130,17 +130,17 @@ async def track_llm_call(
|
|
|
130
130
|
session_manager = SessionManager(
|
|
131
131
|
session_id=session_id,
|
|
132
132
|
infinite_context=infinite_context,
|
|
133
|
-
token_threshold=token_threshold
|
|
133
|
+
token_threshold=token_threshold,
|
|
134
134
|
)
|
|
135
|
-
|
|
135
|
+
|
|
136
136
|
await session_manager.user_says(user_input)
|
|
137
|
-
|
|
137
|
+
|
|
138
138
|
# Call the LLM function
|
|
139
139
|
if asyncio.iscoroutinefunction(llm_function):
|
|
140
140
|
ai_response = await llm_function(user_input)
|
|
141
141
|
else:
|
|
142
142
|
ai_response = llm_function(user_input)
|
|
143
|
-
|
|
143
|
+
|
|
144
144
|
# Handle different response formats
|
|
145
145
|
if isinstance(ai_response, dict) and "choices" in ai_response:
|
|
146
146
|
# OpenAI format
|
|
@@ -151,11 +151,11 @@ async def track_llm_call(
|
|
|
151
151
|
else:
|
|
152
152
|
# Plain string or other
|
|
153
153
|
response_text = str(ai_response)
|
|
154
|
-
|
|
154
|
+
|
|
155
155
|
session_id = await session_manager.ai_responds(
|
|
156
156
|
response_text, model=model, provider=provider
|
|
157
157
|
)
|
|
158
|
-
|
|
158
|
+
|
|
159
159
|
return response_text, session_id
|
|
160
160
|
|
|
161
161
|
|
|
@@ -164,24 +164,24 @@ async def quick_conversation(
|
|
|
164
164
|
ai_response: str,
|
|
165
165
|
model: str = "unknown",
|
|
166
166
|
provider: str = "unknown",
|
|
167
|
-
infinite_context: bool = False
|
|
167
|
+
infinite_context: bool = False,
|
|
168
168
|
) -> Dict[str, Any]:
|
|
169
169
|
"""
|
|
170
170
|
Quickest way to track a conversation and get basic stats.
|
|
171
|
-
|
|
171
|
+
|
|
172
172
|
This is perfect for one-off tracking where you want immediate
|
|
173
173
|
statistics about the conversation.
|
|
174
|
-
|
|
174
|
+
|
|
175
175
|
Args:
|
|
176
176
|
user_message: What the user said.
|
|
177
177
|
ai_response: What the AI responded.
|
|
178
178
|
model: The model used.
|
|
179
179
|
provider: The provider used.
|
|
180
180
|
infinite_context: Enable infinite context support.
|
|
181
|
-
|
|
181
|
+
|
|
182
182
|
Returns:
|
|
183
183
|
Dictionary with conversation statistics.
|
|
184
|
-
|
|
184
|
+
|
|
185
185
|
Example:
|
|
186
186
|
```python
|
|
187
187
|
stats = await quick_conversation(
|
|
@@ -195,11 +195,11 @@ async def quick_conversation(
|
|
|
195
195
|
"""
|
|
196
196
|
# Create a new session manager
|
|
197
197
|
sm = SessionManager(infinite_context=infinite_context)
|
|
198
|
-
|
|
198
|
+
|
|
199
199
|
# Track the conversation
|
|
200
200
|
await sm.user_says(user_message)
|
|
201
201
|
await sm.ai_responds(ai_response, model=model, provider=provider)
|
|
202
|
-
|
|
202
|
+
|
|
203
203
|
# Return stats directly
|
|
204
204
|
return await sm.get_stats()
|
|
205
205
|
|
|
@@ -211,15 +211,15 @@ async def track_infinite_conversation(
|
|
|
211
211
|
provider: str = "unknown",
|
|
212
212
|
session_id: Optional[str] = None,
|
|
213
213
|
token_threshold: int = 4000,
|
|
214
|
-
max_turns: int = 20
|
|
214
|
+
max_turns: int = 20,
|
|
215
215
|
) -> str:
|
|
216
216
|
"""
|
|
217
217
|
Track a conversation with infinite context support.
|
|
218
|
-
|
|
218
|
+
|
|
219
219
|
This automatically handles long conversations by creating new
|
|
220
220
|
session segments when limits are reached, maintaining context
|
|
221
221
|
through summaries.
|
|
222
|
-
|
|
222
|
+
|
|
223
223
|
Args:
|
|
224
224
|
user_message: What the user said.
|
|
225
225
|
ai_response: What the AI responded.
|
|
@@ -228,10 +228,10 @@ async def track_infinite_conversation(
|
|
|
228
228
|
session_id: Optional existing session ID to continue.
|
|
229
229
|
token_threshold: Create new segment after this many tokens.
|
|
230
230
|
max_turns: Create new segment after this many turns.
|
|
231
|
-
|
|
231
|
+
|
|
232
232
|
Returns:
|
|
233
233
|
The current session ID (may be different if segmented).
|
|
234
|
-
|
|
234
|
+
|
|
235
235
|
Example:
|
|
236
236
|
```python
|
|
237
237
|
# First message
|
|
@@ -240,7 +240,7 @@ async def track_infinite_conversation(
|
|
|
240
240
|
"Computing history begins with...",
|
|
241
241
|
model="gpt-4"
|
|
242
242
|
)
|
|
243
|
-
|
|
243
|
+
|
|
244
244
|
# Continue the conversation
|
|
245
245
|
session_id = await track_infinite_conversation(
|
|
246
246
|
"What about quantum computers?",
|
|
@@ -251,13 +251,13 @@ async def track_infinite_conversation(
|
|
|
251
251
|
```
|
|
252
252
|
"""
|
|
253
253
|
return await track_conversation(
|
|
254
|
-
user_message,
|
|
255
|
-
ai_response,
|
|
256
|
-
model=model,
|
|
254
|
+
user_message,
|
|
255
|
+
ai_response,
|
|
256
|
+
model=model,
|
|
257
257
|
provider=provider,
|
|
258
258
|
session_id=session_id,
|
|
259
|
-
infinite_context=True,
|
|
260
|
-
token_threshold=token_threshold
|
|
259
|
+
infinite_context=True,
|
|
260
|
+
token_threshold=token_threshold,
|
|
261
261
|
)
|
|
262
262
|
|
|
263
263
|
|
|
@@ -267,11 +267,11 @@ async def track_tool_use(
|
|
|
267
267
|
result: Any,
|
|
268
268
|
session_id: Optional[str] = None,
|
|
269
269
|
error: Optional[str] = None,
|
|
270
|
-
**metadata
|
|
270
|
+
**metadata,
|
|
271
271
|
) -> str:
|
|
272
272
|
"""
|
|
273
273
|
Track a tool/function call in a session.
|
|
274
|
-
|
|
274
|
+
|
|
275
275
|
Args:
|
|
276
276
|
tool_name: Name of the tool that was called.
|
|
277
277
|
arguments: Arguments passed to the tool.
|
|
@@ -279,10 +279,10 @@ async def track_tool_use(
|
|
|
279
279
|
session_id: Optional existing session ID.
|
|
280
280
|
error: Optional error if the tool failed.
|
|
281
281
|
**metadata: Additional metadata to store.
|
|
282
|
-
|
|
282
|
+
|
|
283
283
|
Returns:
|
|
284
284
|
The session ID.
|
|
285
|
-
|
|
285
|
+
|
|
286
286
|
Example:
|
|
287
287
|
```python
|
|
288
288
|
session_id = await track_tool_use(
|
|
@@ -295,28 +295,23 @@ async def track_tool_use(
|
|
|
295
295
|
"""
|
|
296
296
|
sm = SessionManager(session_id=session_id)
|
|
297
297
|
return await sm.tool_used(
|
|
298
|
-
tool_name=tool_name,
|
|
299
|
-
arguments=arguments,
|
|
300
|
-
result=result,
|
|
301
|
-
error=error,
|
|
302
|
-
**metadata
|
|
298
|
+
tool_name=tool_name, arguments=arguments, result=result, error=error, **metadata
|
|
303
299
|
)
|
|
304
300
|
|
|
305
301
|
|
|
306
302
|
async def get_session_stats(
|
|
307
|
-
session_id: str,
|
|
308
|
-
include_all_segments: bool = False
|
|
303
|
+
session_id: str, include_all_segments: bool = False
|
|
309
304
|
) -> Dict[str, Any]:
|
|
310
305
|
"""
|
|
311
306
|
Get statistics for an existing session.
|
|
312
|
-
|
|
307
|
+
|
|
313
308
|
Args:
|
|
314
309
|
session_id: The session ID to get stats for.
|
|
315
310
|
include_all_segments: For infinite context sessions, include all segments.
|
|
316
|
-
|
|
311
|
+
|
|
317
312
|
Returns:
|
|
318
313
|
Dictionary with session statistics.
|
|
319
|
-
|
|
314
|
+
|
|
320
315
|
Example:
|
|
321
316
|
```python
|
|
322
317
|
stats = await get_session_stats("session-123")
|
|
@@ -329,19 +324,18 @@ async def get_session_stats(
|
|
|
329
324
|
|
|
330
325
|
|
|
331
326
|
async def get_conversation_history(
|
|
332
|
-
session_id: str,
|
|
333
|
-
include_all_segments: bool = False
|
|
327
|
+
session_id: str, include_all_segments: bool = False
|
|
334
328
|
) -> List[Dict[str, Any]]:
|
|
335
329
|
"""
|
|
336
330
|
Get the conversation history for a session.
|
|
337
|
-
|
|
331
|
+
|
|
338
332
|
Args:
|
|
339
333
|
session_id: The session ID to get history for.
|
|
340
334
|
include_all_segments: For infinite context sessions, include all segments.
|
|
341
|
-
|
|
335
|
+
|
|
342
336
|
Returns:
|
|
343
337
|
List of conversation turns.
|
|
344
|
-
|
|
338
|
+
|
|
345
339
|
Example:
|
|
346
340
|
```python
|
|
347
341
|
history = await get_conversation_history("session-123")
|
|
@@ -355,4 +349,4 @@ async def get_conversation_history(
|
|
|
355
349
|
|
|
356
350
|
# Backwards compatibility aliases
|
|
357
351
|
track_llm_interaction = track_llm_call
|
|
358
|
-
quick_stats = quick_conversation
|
|
352
|
+
quick_stats = quick_conversation
|