chuk-ai-session-manager 0.7__py3-none-any.whl → 0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +84 -40
- chuk_ai_session_manager/api/__init__.py +1 -1
- chuk_ai_session_manager/api/simple_api.py +53 -59
- chuk_ai_session_manager/exceptions.py +31 -17
- chuk_ai_session_manager/guards/__init__.py +118 -0
- chuk_ai_session_manager/guards/bindings.py +217 -0
- chuk_ai_session_manager/guards/cache.py +163 -0
- chuk_ai_session_manager/guards/manager.py +819 -0
- chuk_ai_session_manager/guards/models.py +498 -0
- chuk_ai_session_manager/guards/ungrounded.py +159 -0
- chuk_ai_session_manager/infinite_conversation.py +86 -79
- chuk_ai_session_manager/memory/__init__.py +247 -0
- chuk_ai_session_manager/memory/artifacts_bridge.py +469 -0
- chuk_ai_session_manager/memory/context_packer.py +347 -0
- chuk_ai_session_manager/memory/fault_handler.py +507 -0
- chuk_ai_session_manager/memory/manifest.py +307 -0
- chuk_ai_session_manager/memory/models.py +1084 -0
- chuk_ai_session_manager/memory/mutation_log.py +186 -0
- chuk_ai_session_manager/memory/pack_cache.py +206 -0
- chuk_ai_session_manager/memory/page_table.py +275 -0
- chuk_ai_session_manager/memory/prefetcher.py +192 -0
- chuk_ai_session_manager/memory/tlb.py +247 -0
- chuk_ai_session_manager/memory/vm_prompts.py +238 -0
- chuk_ai_session_manager/memory/working_set.py +574 -0
- chuk_ai_session_manager/models/__init__.py +21 -9
- chuk_ai_session_manager/models/event_source.py +3 -1
- chuk_ai_session_manager/models/event_type.py +10 -1
- chuk_ai_session_manager/models/session.py +103 -68
- chuk_ai_session_manager/models/session_event.py +69 -68
- chuk_ai_session_manager/models/session_metadata.py +9 -10
- chuk_ai_session_manager/models/session_run.py +21 -22
- chuk_ai_session_manager/models/token_usage.py +76 -76
- chuk_ai_session_manager/procedural_memory/__init__.py +70 -0
- chuk_ai_session_manager/procedural_memory/formatter.py +407 -0
- chuk_ai_session_manager/procedural_memory/manager.py +523 -0
- chuk_ai_session_manager/procedural_memory/models.py +371 -0
- chuk_ai_session_manager/sample_tools.py +79 -46
- chuk_ai_session_manager/session_aware_tool_processor.py +27 -16
- chuk_ai_session_manager/session_manager.py +238 -197
- chuk_ai_session_manager/session_prompt_builder.py +163 -111
- chuk_ai_session_manager/session_storage.py +45 -52
- {chuk_ai_session_manager-0.7.dist-info → chuk_ai_session_manager-0.8.dist-info}/METADATA +78 -2
- chuk_ai_session_manager-0.8.dist-info/RECORD +45 -0
- {chuk_ai_session_manager-0.7.dist-info → chuk_ai_session_manager-0.8.dist-info}/WHEEL +1 -1
- chuk_ai_session_manager-0.7.dist-info/RECORD +0 -22
- {chuk_ai_session_manager-0.7.dist-info → chuk_ai_session_manager-0.8.dist-info}/top_level.txt +0 -0
|
@@ -5,16 +5,16 @@ Token usage tracking models for the chuk session manager.
|
|
|
5
5
|
This module provides models for tracking token usage in LLM interactions
|
|
6
6
|
with proper async support.
|
|
7
7
|
"""
|
|
8
|
+
|
|
8
9
|
from __future__ import annotations
|
|
9
|
-
from
|
|
10
|
-
from typing import Dict, Optional, Union, List, Any
|
|
11
|
-
from uuid import uuid4
|
|
10
|
+
from typing import Dict, Optional, Union, Any
|
|
12
11
|
from pydantic import BaseModel, Field, ConfigDict
|
|
13
12
|
import asyncio
|
|
14
13
|
|
|
15
14
|
# Try to import tiktoken, but make it optional
|
|
16
15
|
try:
|
|
17
16
|
import tiktoken
|
|
17
|
+
|
|
18
18
|
TIKTOKEN_AVAILABLE = True
|
|
19
19
|
except ImportError:
|
|
20
20
|
TIKTOKEN_AVAILABLE = False
|
|
@@ -23,7 +23,7 @@ except ImportError:
|
|
|
23
23
|
class TokenUsage(BaseModel):
|
|
24
24
|
"""
|
|
25
25
|
Tracks token usage for LLM interactions.
|
|
26
|
-
|
|
26
|
+
|
|
27
27
|
Attributes:
|
|
28
28
|
prompt_tokens: Number of tokens in the prompt/input
|
|
29
29
|
completion_tokens: Number of tokens in the completion/output
|
|
@@ -31,28 +31,31 @@ class TokenUsage(BaseModel):
|
|
|
31
31
|
model: The model used for the interaction (helps with pricing calculations)
|
|
32
32
|
estimated_cost_usd: Estimated cost in USD (if pricing info is available)
|
|
33
33
|
"""
|
|
34
|
+
|
|
34
35
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
35
|
-
|
|
36
|
+
|
|
36
37
|
prompt_tokens: int = 0
|
|
37
38
|
completion_tokens: int = 0
|
|
38
39
|
total_tokens: int = Field(default=0)
|
|
39
40
|
model: str = ""
|
|
40
41
|
estimated_cost_usd: Optional[float] = None
|
|
41
|
-
|
|
42
|
+
|
|
42
43
|
def __init__(self, **data):
|
|
43
44
|
super().__init__(**data)
|
|
44
45
|
# Auto-calculate total tokens if not explicitly provided
|
|
45
|
-
if self.total_tokens == 0 and (
|
|
46
|
+
if self.total_tokens == 0 and (
|
|
47
|
+
self.prompt_tokens > 0 or self.completion_tokens > 0
|
|
48
|
+
):
|
|
46
49
|
self.total_tokens = self.prompt_tokens + self.completion_tokens
|
|
47
|
-
|
|
50
|
+
|
|
48
51
|
# Auto-calculate estimated cost if model is provided
|
|
49
52
|
if self.model and self.estimated_cost_usd is None:
|
|
50
53
|
self.estimated_cost_usd = self._calculate_cost_sync()
|
|
51
|
-
|
|
54
|
+
|
|
52
55
|
def _calculate_cost_sync(self) -> float:
|
|
53
56
|
"""
|
|
54
57
|
Synchronous implementation of calculate_cost.
|
|
55
|
-
|
|
58
|
+
|
|
56
59
|
Returns:
|
|
57
60
|
Estimated cost in USD
|
|
58
61
|
"""
|
|
@@ -62,40 +65,38 @@ class TokenUsage(BaseModel):
|
|
|
62
65
|
"gpt-4": {"input": 0.03, "output": 0.06},
|
|
63
66
|
"gpt-4-turbo": {"input": 0.01, "output": 0.03},
|
|
64
67
|
"gpt-3.5-turbo": {"input": 0.0005, "output": 0.0015},
|
|
65
|
-
|
|
66
68
|
# Claude models
|
|
67
69
|
"claude-3-opus": {"input": 0.015, "output": 0.075},
|
|
68
70
|
"claude-3-sonnet": {"input": 0.003, "output": 0.015},
|
|
69
71
|
"claude-3-haiku": {"input": 0.00025, "output": 0.00125},
|
|
70
|
-
|
|
71
72
|
# Fallback for unknown models
|
|
72
|
-
"default": {"input": 0.001, "output": 0.002}
|
|
73
|
+
"default": {"input": 0.001, "output": 0.002},
|
|
73
74
|
}
|
|
74
|
-
|
|
75
|
+
|
|
75
76
|
# Get pricing for this model or use default
|
|
76
77
|
model_pricing = pricing.get(self.model.lower(), pricing["default"])
|
|
77
|
-
|
|
78
|
+
|
|
78
79
|
# Calculate cost
|
|
79
80
|
input_cost = (self.prompt_tokens / 1000) * model_pricing["input"]
|
|
80
81
|
output_cost = (self.completion_tokens / 1000) * model_pricing["output"]
|
|
81
|
-
|
|
82
|
+
|
|
82
83
|
return round(input_cost + output_cost, 6)
|
|
83
|
-
|
|
84
|
+
|
|
84
85
|
async def calculate_cost(self) -> float:
|
|
85
86
|
"""
|
|
86
87
|
Async version of calculate_cost.
|
|
87
|
-
|
|
88
|
+
|
|
88
89
|
Returns:
|
|
89
90
|
Estimated cost in USD
|
|
90
91
|
"""
|
|
91
92
|
# Token calculation is CPU-bound, so run in executor
|
|
92
93
|
loop = asyncio.get_running_loop()
|
|
93
94
|
return await loop.run_in_executor(None, self._calculate_cost_sync)
|
|
94
|
-
|
|
95
|
+
|
|
95
96
|
def _update_sync(self, prompt_tokens: int = 0, completion_tokens: int = 0) -> None:
|
|
96
97
|
"""
|
|
97
98
|
Synchronous implementation of update.
|
|
98
|
-
|
|
99
|
+
|
|
99
100
|
Args:
|
|
100
101
|
prompt_tokens: Additional prompt tokens to add
|
|
101
102
|
completion_tokens: Additional completion tokens to add
|
|
@@ -103,14 +104,14 @@ class TokenUsage(BaseModel):
|
|
|
103
104
|
self.prompt_tokens += prompt_tokens
|
|
104
105
|
self.completion_tokens += completion_tokens
|
|
105
106
|
self.total_tokens = self.prompt_tokens + self.completion_tokens
|
|
106
|
-
|
|
107
|
+
|
|
107
108
|
if self.model:
|
|
108
109
|
self.estimated_cost_usd = self._calculate_cost_sync()
|
|
109
|
-
|
|
110
|
+
|
|
110
111
|
async def update(self, prompt_tokens: int = 0, completion_tokens: int = 0) -> None:
|
|
111
112
|
"""
|
|
112
113
|
Async version of update.
|
|
113
|
-
|
|
114
|
+
|
|
114
115
|
Args:
|
|
115
116
|
prompt_tokens: Additional prompt tokens to add
|
|
116
117
|
completion_tokens: Additional completion tokens to add
|
|
@@ -118,88 +119,85 @@ class TokenUsage(BaseModel):
|
|
|
118
119
|
self.prompt_tokens += prompt_tokens
|
|
119
120
|
self.completion_tokens += completion_tokens
|
|
120
121
|
self.total_tokens = self.prompt_tokens + self.completion_tokens
|
|
121
|
-
|
|
122
|
+
|
|
122
123
|
if self.model:
|
|
123
124
|
self.estimated_cost_usd = await self.calculate_cost()
|
|
124
|
-
|
|
125
|
+
|
|
125
126
|
@classmethod
|
|
126
127
|
def _from_text_sync(
|
|
127
|
-
cls,
|
|
128
|
-
prompt: str,
|
|
129
|
-
completion: Optional[str] = None,
|
|
130
|
-
model: str = "gpt-3.5-turbo"
|
|
128
|
+
cls, prompt: str, completion: Optional[str] = None, model: str = "gpt-3.5-turbo"
|
|
131
129
|
) -> TokenUsage:
|
|
132
130
|
"""
|
|
133
131
|
Synchronous implementation of from_text.
|
|
134
|
-
|
|
132
|
+
|
|
135
133
|
Args:
|
|
136
134
|
prompt: The prompt/input text
|
|
137
135
|
completion: The completion/output text (optional)
|
|
138
136
|
model: The model name to use for counting and pricing
|
|
139
|
-
|
|
137
|
+
|
|
140
138
|
Returns:
|
|
141
139
|
A TokenUsage instance with token counts
|
|
142
140
|
"""
|
|
143
141
|
prompt_tokens = cls._count_tokens_sync(prompt, model)
|
|
144
|
-
completion_tokens =
|
|
145
|
-
|
|
142
|
+
completion_tokens = (
|
|
143
|
+
cls._count_tokens_sync(completion, model) if completion else 0
|
|
144
|
+
)
|
|
145
|
+
|
|
146
146
|
return cls(
|
|
147
147
|
prompt_tokens=prompt_tokens,
|
|
148
148
|
completion_tokens=completion_tokens,
|
|
149
|
-
model=model
|
|
149
|
+
model=model,
|
|
150
150
|
)
|
|
151
|
-
|
|
151
|
+
|
|
152
152
|
@classmethod
|
|
153
153
|
async def from_text(
|
|
154
|
-
cls,
|
|
155
|
-
prompt: str,
|
|
156
|
-
completion: Optional[str] = None,
|
|
157
|
-
model: str = "gpt-3.5-turbo"
|
|
154
|
+
cls, prompt: str, completion: Optional[str] = None, model: str = "gpt-3.5-turbo"
|
|
158
155
|
) -> TokenUsage:
|
|
159
156
|
"""
|
|
160
157
|
Async version of from_text.
|
|
161
|
-
|
|
158
|
+
|
|
162
159
|
Args:
|
|
163
160
|
prompt: The prompt/input text
|
|
164
161
|
completion: The completion/output text (optional)
|
|
165
162
|
model: The model name to use for counting and pricing
|
|
166
|
-
|
|
163
|
+
|
|
167
164
|
Returns:
|
|
168
165
|
A TokenUsage instance with token counts
|
|
169
166
|
"""
|
|
170
167
|
# Run token counting in executor since it's CPU-bound
|
|
171
168
|
loop = asyncio.get_running_loop()
|
|
172
169
|
return await loop.run_in_executor(
|
|
173
|
-
None,
|
|
174
|
-
lambda: cls._from_text_sync(prompt, completion, model)
|
|
170
|
+
None, lambda: cls._from_text_sync(prompt, completion, model)
|
|
175
171
|
)
|
|
176
|
-
|
|
172
|
+
|
|
177
173
|
@staticmethod
|
|
178
|
-
def _count_tokens_sync(
|
|
174
|
+
def _count_tokens_sync(
|
|
175
|
+
text: Optional[Union[str, Any]], model: str = "gpt-3.5-turbo"
|
|
176
|
+
) -> int:
|
|
179
177
|
"""
|
|
180
178
|
Synchronous implementation of count_tokens.
|
|
181
|
-
|
|
179
|
+
|
|
182
180
|
Args:
|
|
183
181
|
text: The text to count tokens for
|
|
184
182
|
model: The model name to use for counting
|
|
185
|
-
|
|
183
|
+
|
|
186
184
|
Returns:
|
|
187
185
|
The number of tokens
|
|
188
186
|
"""
|
|
189
187
|
if text is None:
|
|
190
188
|
return 0
|
|
191
|
-
|
|
189
|
+
|
|
192
190
|
# Convert to string if not already a string
|
|
193
191
|
if not isinstance(text, str):
|
|
194
192
|
try:
|
|
195
193
|
text = str(text)
|
|
196
194
|
except Exception:
|
|
197
195
|
return 0
|
|
198
|
-
|
|
196
|
+
|
|
199
197
|
# Empty string has 0 tokens
|
|
200
198
|
if not text:
|
|
201
199
|
return 0
|
|
202
|
-
|
|
200
|
+
|
|
203
201
|
if TIKTOKEN_AVAILABLE:
|
|
204
202
|
try:
|
|
205
203
|
encoding = tiktoken.encoding_for_model(model)
|
|
@@ -212,53 +210,54 @@ class TokenUsage(BaseModel):
|
|
|
212
210
|
except Exception:
|
|
213
211
|
# If all else fails, use the approximation
|
|
214
212
|
pass
|
|
215
|
-
|
|
213
|
+
|
|
216
214
|
# Simple approximation: ~4 chars per token for English text
|
|
217
215
|
return int(len(text) / 4)
|
|
218
|
-
|
|
216
|
+
|
|
219
217
|
@staticmethod
|
|
220
|
-
async def count_tokens(
|
|
218
|
+
async def count_tokens(
|
|
219
|
+
text: Optional[Union[str, Any]], model: str = "gpt-3.5-turbo"
|
|
220
|
+
) -> int:
|
|
221
221
|
"""
|
|
222
222
|
Async version of count_tokens.
|
|
223
|
-
|
|
223
|
+
|
|
224
224
|
Args:
|
|
225
225
|
text: The text to count tokens for
|
|
226
226
|
model: The model name to use for counting
|
|
227
|
-
|
|
227
|
+
|
|
228
228
|
Returns:
|
|
229
229
|
The number of tokens
|
|
230
230
|
"""
|
|
231
231
|
# Run in executor since token counting is CPU-bound
|
|
232
232
|
loop = asyncio.get_running_loop()
|
|
233
233
|
return await loop.run_in_executor(
|
|
234
|
-
None,
|
|
235
|
-
lambda: TokenUsage._count_tokens_sync(text, model)
|
|
234
|
+
None, lambda: TokenUsage._count_tokens_sync(text, model)
|
|
236
235
|
)
|
|
237
|
-
|
|
236
|
+
|
|
238
237
|
def __add__(self, other: TokenUsage) -> TokenUsage:
|
|
239
238
|
"""
|
|
240
239
|
Add two TokenUsage instances together.
|
|
241
|
-
|
|
240
|
+
|
|
242
241
|
Args:
|
|
243
242
|
other: Another TokenUsage instance
|
|
244
|
-
|
|
243
|
+
|
|
245
244
|
Returns:
|
|
246
245
|
A new TokenUsage instance with combined counts
|
|
247
246
|
"""
|
|
248
247
|
# Use the model from self if it exists, otherwise use the other's model
|
|
249
248
|
model = self.model if self.model else other.model
|
|
250
|
-
|
|
249
|
+
|
|
251
250
|
return TokenUsage(
|
|
252
251
|
prompt_tokens=self.prompt_tokens + other.prompt_tokens,
|
|
253
252
|
completion_tokens=self.completion_tokens + other.completion_tokens,
|
|
254
|
-
model=model
|
|
253
|
+
model=model,
|
|
255
254
|
)
|
|
256
255
|
|
|
257
256
|
|
|
258
257
|
class TokenSummary(BaseModel):
|
|
259
258
|
"""
|
|
260
259
|
Summarizes token usage across multiple interactions.
|
|
261
|
-
|
|
260
|
+
|
|
262
261
|
Attributes:
|
|
263
262
|
total_prompt_tokens: Total tokens used in prompts
|
|
264
263
|
total_completion_tokens: Total tokens used in completions
|
|
@@ -266,62 +265,63 @@ class TokenSummary(BaseModel):
|
|
|
266
265
|
usage_by_model: Breakdown of usage by model
|
|
267
266
|
total_estimated_cost_usd: Total estimated cost across all models
|
|
268
267
|
"""
|
|
268
|
+
|
|
269
269
|
total_prompt_tokens: int = 0
|
|
270
270
|
total_completion_tokens: int = 0
|
|
271
271
|
total_tokens: int = 0
|
|
272
272
|
usage_by_model: Dict[str, TokenUsage] = Field(default_factory=dict)
|
|
273
273
|
total_estimated_cost_usd: float = 0.0
|
|
274
|
-
|
|
274
|
+
|
|
275
275
|
def _add_usage_sync(self, usage: TokenUsage) -> None:
|
|
276
276
|
"""
|
|
277
277
|
Synchronous implementation of add_usage.
|
|
278
|
-
|
|
278
|
+
|
|
279
279
|
Args:
|
|
280
280
|
usage: The TokenUsage to add
|
|
281
281
|
"""
|
|
282
282
|
self.total_prompt_tokens += usage.prompt_tokens
|
|
283
283
|
self.total_completion_tokens += usage.completion_tokens
|
|
284
284
|
self.total_tokens += usage.total_tokens
|
|
285
|
-
|
|
285
|
+
|
|
286
286
|
if usage.estimated_cost_usd is not None:
|
|
287
287
|
self.total_estimated_cost_usd += usage.estimated_cost_usd
|
|
288
|
-
|
|
288
|
+
|
|
289
289
|
if usage.model:
|
|
290
290
|
if usage.model in self.usage_by_model:
|
|
291
291
|
self.usage_by_model[usage.model]._update_sync(
|
|
292
292
|
prompt_tokens=usage.prompt_tokens,
|
|
293
|
-
completion_tokens=usage.completion_tokens
|
|
293
|
+
completion_tokens=usage.completion_tokens,
|
|
294
294
|
)
|
|
295
295
|
else:
|
|
296
296
|
self.usage_by_model[usage.model] = TokenUsage(
|
|
297
297
|
prompt_tokens=usage.prompt_tokens,
|
|
298
298
|
completion_tokens=usage.completion_tokens,
|
|
299
|
-
model=usage.model
|
|
299
|
+
model=usage.model,
|
|
300
300
|
)
|
|
301
|
-
|
|
301
|
+
|
|
302
302
|
async def add_usage(self, usage: TokenUsage) -> None:
|
|
303
303
|
"""
|
|
304
304
|
Async version of add_usage.
|
|
305
|
-
|
|
305
|
+
|
|
306
306
|
Args:
|
|
307
307
|
usage: The TokenUsage to add
|
|
308
308
|
"""
|
|
309
309
|
self.total_prompt_tokens += usage.prompt_tokens
|
|
310
310
|
self.total_completion_tokens += usage.completion_tokens
|
|
311
311
|
self.total_tokens += usage.total_tokens
|
|
312
|
-
|
|
312
|
+
|
|
313
313
|
if usage.estimated_cost_usd is not None:
|
|
314
314
|
self.total_estimated_cost_usd += usage.estimated_cost_usd
|
|
315
|
-
|
|
315
|
+
|
|
316
316
|
if usage.model:
|
|
317
317
|
if usage.model in self.usage_by_model:
|
|
318
318
|
await self.usage_by_model[usage.model].update(
|
|
319
319
|
prompt_tokens=usage.prompt_tokens,
|
|
320
|
-
completion_tokens=usage.completion_tokens
|
|
320
|
+
completion_tokens=usage.completion_tokens,
|
|
321
321
|
)
|
|
322
322
|
else:
|
|
323
323
|
self.usage_by_model[usage.model] = TokenUsage(
|
|
324
324
|
prompt_tokens=usage.prompt_tokens,
|
|
325
325
|
completion_tokens=usage.completion_tokens,
|
|
326
|
-
model=usage.model
|
|
327
|
-
)
|
|
326
|
+
model=usage.model,
|
|
327
|
+
)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# chuk_ai_session_manager/procedural_memory/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
Procedural Memory System for Tool Usage Learning.
|
|
4
|
+
|
|
5
|
+
This module provides:
|
|
6
|
+
- Tool invocation tracing (what was called, when, with what args)
|
|
7
|
+
- Outcome tracking (success/failure + error types)
|
|
8
|
+
- Fix detection (linking failures to subsequent successful fixes)
|
|
9
|
+
- Pattern aggregation (learned "recipes" that work)
|
|
10
|
+
- Session persistence (via chuk-ai-session-manager)
|
|
11
|
+
|
|
12
|
+
Memory Hierarchy:
|
|
13
|
+
- L1: Hot cache (in ConversationProcessor) - not this module
|
|
14
|
+
- L2: ToolLog (session tool traces) - this module
|
|
15
|
+
- L3: ToolPatterns (aggregated knowledge) - this module
|
|
16
|
+
|
|
17
|
+
Usage:
|
|
18
|
+
from chuk_ai_session_manager.procedural_memory import (
|
|
19
|
+
ToolMemoryManager,
|
|
20
|
+
ToolOutcome,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Create manager (optionally bound to a session)
|
|
24
|
+
manager = ToolMemoryManager.create(session_id="abc")
|
|
25
|
+
|
|
26
|
+
# Record a tool call
|
|
27
|
+
await manager.record_call(
|
|
28
|
+
tool_name="solver_solve_plan",
|
|
29
|
+
arguments={"tasks": 10},
|
|
30
|
+
result={"status": "sat"},
|
|
31
|
+
outcome=ToolOutcome.SUCCESS,
|
|
32
|
+
context_goal="schedule F1 testing"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Get context for paging into model
|
|
36
|
+
from chuk_ai_session_manager.procedural_memory import ProceduralContextFormatter
|
|
37
|
+
formatter = ProceduralContextFormatter()
|
|
38
|
+
context = formatter.format_for_tools(manager, ["solver_solve_plan"])
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
from chuk_ai_session_manager.procedural_memory.models import (
|
|
42
|
+
ToolOutcome,
|
|
43
|
+
ToolLogEntry,
|
|
44
|
+
ToolPattern,
|
|
45
|
+
ToolFixRelation,
|
|
46
|
+
ProceduralMemory,
|
|
47
|
+
ErrorPattern,
|
|
48
|
+
SuccessPattern,
|
|
49
|
+
)
|
|
50
|
+
from chuk_ai_session_manager.procedural_memory.manager import ToolMemoryManager
|
|
51
|
+
from chuk_ai_session_manager.procedural_memory.formatter import (
|
|
52
|
+
ProceduralContextFormatter,
|
|
53
|
+
FormatterConfig,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
__all__ = [
|
|
57
|
+
# Models
|
|
58
|
+
"ToolOutcome",
|
|
59
|
+
"ToolLogEntry",
|
|
60
|
+
"ToolPattern",
|
|
61
|
+
"ToolFixRelation",
|
|
62
|
+
"ProceduralMemory",
|
|
63
|
+
"ErrorPattern",
|
|
64
|
+
"SuccessPattern",
|
|
65
|
+
# Manager
|
|
66
|
+
"ToolMemoryManager",
|
|
67
|
+
# Formatter
|
|
68
|
+
"ProceduralContextFormatter",
|
|
69
|
+
"FormatterConfig",
|
|
70
|
+
]
|