supaclaw 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,143 @@
1
+ /**
2
+ * Context Window Management
3
+ * Implements token budgeting, smart context selection, and lost-in-middle mitigation
4
+ */
5
+ import { Message, Memory, Learning, Entity } from './index';
6
+ export interface ContextBudget {
7
+ total: number;
8
+ systemPrompt: number;
9
+ recentMessages: number;
10
+ memories: number;
11
+ learnings: number;
12
+ entities: number;
13
+ reserve: number;
14
+ }
15
+ export interface ContextItem {
16
+ type: 'message' | 'memory' | 'learning' | 'entity';
17
+ content: string;
18
+ importance: number;
19
+ timestamp: string;
20
+ tokenCount: number;
21
+ metadata?: Record<string, unknown>;
22
+ }
23
+ export interface ContextWindow {
24
+ items: ContextItem[];
25
+ totalTokens: number;
26
+ budget: ContextBudget;
27
+ truncated: boolean;
28
+ }
29
+ /**
30
+ * Estimates token count using rough heuristic
31
+ * 1 token ≈ 4 characters for English text
32
+ */
33
+ export declare function estimateTokens(text: string): number;
34
+ /**
35
+ * Estimates token count more accurately using word count
36
+ * Better approximation: 1 token ≈ 0.75 words
37
+ */
38
+ export declare function estimateTokensAccurate(text: string): number;
39
+ /**
40
+ * Create a context budget for different model context windows
41
+ */
42
+ export declare function createContextBudget(opts: {
43
+ modelContextSize?: number;
44
+ systemPromptSize?: number;
45
+ reserveSize?: number;
46
+ recentMessagesPct?: number;
47
+ memoriesPct?: number;
48
+ learningsPct?: number;
49
+ entitiesPct?: number;
50
+ }): ContextBudget;
51
+ /**
52
+ * Convert messages to context items
53
+ */
54
+ export declare function messagesToContextItems(messages: Message[]): ContextItem[];
55
+ /**
56
+ * Convert memories to context items
57
+ */
58
+ export declare function memoriesToContextItems(memories: Memory[]): ContextItem[];
59
+ /**
60
+ * Convert learnings to context items
61
+ */
62
+ export declare function learningsToContextItems(learnings: Learning[]): ContextItem[];
63
+ /**
64
+ * Convert entities to context items
65
+ */
66
+ export declare function entitiesToContextItems(entities: Entity[]): ContextItem[];
67
+ /**
68
+ * Select items within budget using smart prioritization
69
+ * Implements lost-in-middle mitigation by placing high-importance items at edges
70
+ */
71
+ export declare function selectContextItems(items: ContextItem[], budget: number, opts?: {
72
+ recencyWeight?: number;
73
+ importanceWeight?: number;
74
+ }): ContextItem[];
75
+ /**
76
+ * Arrange items to mitigate "lost in the middle" effect
77
+ * Places highest-importance items at the beginning and end
78
+ * Medium-importance items go in the middle
79
+ *
80
+ * Research shows LLMs pay more attention to the beginning and end of context
81
+ */
82
+ export declare function arrangeForLostInMiddle(items: ContextItem[]): ContextItem[];
83
+ /**
84
+ * Build a complete context window with budget management
85
+ */
86
+ export declare function buildContextWindow(opts: {
87
+ messages: Message[];
88
+ memories: Memory[];
89
+ learnings: Learning[];
90
+ entities: Entity[];
91
+ budget: ContextBudget;
92
+ useLostInMiddleFix?: boolean;
93
+ recencyWeight?: number;
94
+ importanceWeight?: number;
95
+ }): ContextWindow;
96
+ /**
97
+ * Format context window as string for injection into prompt
98
+ */
99
+ export declare function formatContextWindow(window: ContextWindow, opts?: {
100
+ includeMetadata?: boolean;
101
+ groupByType?: boolean;
102
+ }): string;
103
+ /**
104
+ * Get context window stats
105
+ */
106
+ export declare function getContextStats(window: ContextWindow): {
107
+ totalItems: number;
108
+ totalTokens: number;
109
+ budgetUsed: number;
110
+ budgetRemaining: number;
111
+ itemsByType: Record<string, number>;
112
+ truncated: boolean;
113
+ };
114
+ /**
115
+ * Adaptive context budgeting
116
+ * Adjusts budget allocation based on available content
117
+ */
118
+ export declare function createAdaptiveBudget(opts: {
119
+ modelContextSize?: number;
120
+ messageCount: number;
121
+ memoryCount: number;
122
+ learningCount: number;
123
+ entityCount: number;
124
+ }): ContextBudget;
125
+ /**
126
+ * Model-specific context budgets
127
+ */
128
+ export declare const MODEL_BUDGETS: {
129
+ 'claude-3-opus': ContextBudget;
130
+ 'claude-3-sonnet': ContextBudget;
131
+ 'claude-3-haiku': ContextBudget;
132
+ 'claude-3.5-sonnet': ContextBudget;
133
+ 'gpt-4-turbo': ContextBudget;
134
+ 'gpt-4': ContextBudget;
135
+ 'gpt-3.5-turbo': ContextBudget;
136
+ 'gemini-pro': ContextBudget;
137
+ 'llama-3-70b': ContextBudget;
138
+ default: ContextBudget;
139
+ };
140
+ /**
141
+ * Get budget for a specific model
142
+ */
143
+ export declare function getBudgetForModel(model: string): ContextBudget;
@@ -0,0 +1,360 @@
1
+ "use strict";
2
+ /**
3
+ * Context Window Management
4
+ * Implements token budgeting, smart context selection, and lost-in-middle mitigation
5
+ */
6
+ Object.defineProperty(exports, "__esModule", { value: true });
7
+ exports.MODEL_BUDGETS = void 0;
8
+ exports.estimateTokens = estimateTokens;
9
+ exports.estimateTokensAccurate = estimateTokensAccurate;
10
+ exports.createContextBudget = createContextBudget;
11
+ exports.messagesToContextItems = messagesToContextItems;
12
+ exports.memoriesToContextItems = memoriesToContextItems;
13
+ exports.learningsToContextItems = learningsToContextItems;
14
+ exports.entitiesToContextItems = entitiesToContextItems;
15
+ exports.selectContextItems = selectContextItems;
16
+ exports.arrangeForLostInMiddle = arrangeForLostInMiddle;
17
+ exports.buildContextWindow = buildContextWindow;
18
+ exports.formatContextWindow = formatContextWindow;
19
+ exports.getContextStats = getContextStats;
20
+ exports.createAdaptiveBudget = createAdaptiveBudget;
21
+ exports.getBudgetForModel = getBudgetForModel;
22
+ /**
23
+ * Estimates token count using rough heuristic
24
+ * 1 token ≈ 4 characters for English text
25
+ */
26
+ function estimateTokens(text) {
27
+ return Math.ceil(text.length / 4);
28
+ }
29
+ /**
30
+ * Estimates token count more accurately using word count
31
+ * Better approximation: 1 token ≈ 0.75 words
32
+ */
33
+ function estimateTokensAccurate(text) {
34
+ const words = text.trim().split(/\s+/).length;
35
+ return Math.ceil(words / 0.75);
36
+ }
37
+ /**
38
+ * Create a context budget for different model context windows
39
+ */
40
+ function createContextBudget(opts) {
41
+ const total = opts.modelContextSize || 128000;
42
+ const systemPrompt = opts.systemPromptSize || 2000;
43
+ const reserve = opts.reserveSize || 4000;
44
+ const available = total - systemPrompt - reserve;
45
+ const recentMessagesPct = opts.recentMessagesPct || 0.4;
46
+ const memoriesPct = opts.memoriesPct || 0.3;
47
+ const learningsPct = opts.learningsPct || 0.2;
48
+ const entitiesPct = opts.entitiesPct || 0.1;
49
+ return {
50
+ total,
51
+ systemPrompt,
52
+ reserve,
53
+ recentMessages: Math.floor(available * recentMessagesPct),
54
+ memories: Math.floor(available * memoriesPct),
55
+ learnings: Math.floor(available * learningsPct),
56
+ entities: Math.floor(available * entitiesPct)
57
+ };
58
+ }
59
+ /**
60
+ * Convert messages to context items
61
+ */
62
+ function messagesToContextItems(messages) {
63
+ return messages.map(msg => ({
64
+ type: 'message',
65
+ content: `${msg.role}: ${msg.content}`,
66
+ importance: msg.role === 'user' ? 0.8 : 0.6, // User messages slightly more important
67
+ timestamp: msg.created_at,
68
+ tokenCount: msg.token_count || estimateTokens(msg.content),
69
+ metadata: { id: msg.id, session_id: msg.session_id, ...msg.metadata }
70
+ }));
71
+ }
72
+ /**
73
+ * Convert memories to context items
74
+ */
75
+ function memoriesToContextItems(memories) {
76
+ return memories.map(mem => ({
77
+ type: 'memory',
78
+ content: `[Memory: ${mem.category || 'general'}] ${mem.content}`,
79
+ importance: mem.importance,
80
+ timestamp: mem.created_at,
81
+ tokenCount: estimateTokens(mem.content),
82
+ metadata: { id: mem.id, category: mem.category, ...mem.metadata }
83
+ }));
84
+ }
85
+ /**
86
+ * Convert learnings to context items
87
+ */
88
+ function learningsToContextItems(learnings) {
89
+ return learnings.map(learn => ({
90
+ type: 'learning',
91
+ content: `[Learning: ${learn.category}] ${learn.lesson}${learn.action ? '\nAction: ' + learn.action : ''}`,
92
+ importance: learn.severity === 'critical' ? 0.9 : learn.severity === 'warning' ? 0.7 : 0.5,
93
+ timestamp: learn.created_at,
94
+ tokenCount: estimateTokens(learn.lesson + (learn.action || '')),
95
+ metadata: { id: learn.id, severity: learn.severity, applied: learn.applied_count, ...learn.metadata }
96
+ }));
97
+ }
98
+ /**
99
+ * Convert entities to context items
100
+ */
101
+ function entitiesToContextItems(entities) {
102
+ return entities.map(entity => ({
103
+ type: 'entity',
104
+ content: `[Entity: ${entity.entity_type}] ${entity.name}${entity.description ? ': ' + entity.description : ''}`,
105
+ importance: Math.min(entity.mention_count / 20, 1), // More mentions = more important
106
+ timestamp: entity.last_seen_at,
107
+ tokenCount: estimateTokens(entity.name + (entity.description || '')),
108
+ metadata: { id: entity.id, type: entity.entity_type, mentions: entity.mention_count }
109
+ }));
110
+ }
111
+ /**
112
+ * Select items within budget using smart prioritization
113
+ * Implements lost-in-middle mitigation by placing high-importance items at edges
114
+ */
115
+ function selectContextItems(items, budget, opts = {}) {
116
+ const recencyWeight = opts.recencyWeight ?? 0.3;
117
+ const importanceWeight = opts.importanceWeight ?? 0.7;
118
+ // Calculate composite score for each item
119
+ const now = Date.now();
120
+ const itemsWithScores = items.map(item => {
121
+ const age = now - new Date(item.timestamp).getTime();
122
+ const daysSinceCreated = age / (1000 * 60 * 60 * 24);
123
+ // Recency score: exponential decay over 30 days
124
+ const recencyScore = Math.exp(-daysSinceCreated / 30);
125
+ // Composite score
126
+ const score = (importanceWeight * item.importance) + (recencyWeight * recencyScore);
127
+ return { item, score };
128
+ });
129
+ // Sort by score descending
130
+ itemsWithScores.sort((a, b) => b.score - a.score);
131
+ // Select items within budget
132
+ let totalTokens = 0;
133
+ const selected = [];
134
+ for (const { item } of itemsWithScores) {
135
+ if (totalTokens + item.tokenCount <= budget) {
136
+ selected.push(item);
137
+ totalTokens += item.tokenCount;
138
+ }
139
+ }
140
+ return selected;
141
+ }
142
+ /**
143
+ * Arrange items to mitigate "lost in the middle" effect
144
+ * Places highest-importance items at the beginning and end
145
+ * Medium-importance items go in the middle
146
+ *
147
+ * Research shows LLMs pay more attention to the beginning and end of context
148
+ */
149
+ function arrangeForLostInMiddle(items) {
150
+ if (items.length <= 3) {
151
+ return items; // Too few items to rearrange
152
+ }
153
+ // Sort by importance
154
+ const sorted = [...items].sort((a, b) => b.importance - a.importance);
155
+ const arranged = [];
156
+ const half = Math.ceil(sorted.length / 2);
157
+ // High-importance items at beginning
158
+ for (let i = 0; i < half; i++) {
159
+ if (i % 2 === 0) {
160
+ arranged.push(sorted[i]);
161
+ }
162
+ }
163
+ // Medium-importance items in middle
164
+ const middle = sorted.slice(half);
165
+ arranged.push(...middle);
166
+ // Remaining high-importance items at end
167
+ for (let i = 0; i < half; i++) {
168
+ if (i % 2 === 1) {
169
+ arranged.push(sorted[i]);
170
+ }
171
+ }
172
+ return arranged;
173
+ }
174
+ /**
175
+ * Build a complete context window with budget management
176
+ */
177
+ function buildContextWindow(opts) {
178
+ const { messages, memories, learnings, entities, budget, useLostInMiddleFix = true, recencyWeight, importanceWeight } = opts;
179
+ // Convert to context items
180
+ const messageItems = messagesToContextItems(messages);
181
+ const memoryItems = memoriesToContextItems(memories);
182
+ const learningItems = learningsToContextItems(learnings);
183
+ const entityItems = entitiesToContextItems(entities);
184
+ // Select within budget for each category
185
+ const selectedMessages = selectContextItems(messageItems, budget.recentMessages, {
186
+ recencyWeight,
187
+ importanceWeight
188
+ });
189
+ const selectedMemories = selectContextItems(memoryItems, budget.memories, {
190
+ recencyWeight,
191
+ importanceWeight
192
+ });
193
+ const selectedLearnings = selectContextItems(learningItems, budget.learnings, {
194
+ recencyWeight,
195
+ importanceWeight
196
+ });
197
+ const selectedEntities = selectContextItems(entityItems, budget.entities, {
198
+ recencyWeight,
199
+ importanceWeight
200
+ });
201
+ // Combine all items
202
+ let allItems = [
203
+ ...selectedMessages,
204
+ ...selectedMemories,
205
+ ...selectedLearnings,
206
+ ...selectedEntities
207
+ ];
208
+ // Apply lost-in-middle mitigation
209
+ if (useLostInMiddleFix) {
210
+ allItems = arrangeForLostInMiddle(allItems);
211
+ }
212
+ else {
213
+ // Default: chronological order
214
+ allItems.sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime());
215
+ }
216
+ const totalTokens = allItems.reduce((sum, item) => sum + item.tokenCount, 0);
217
+ const truncated = messageItems.length > selectedMessages.length ||
218
+ memoryItems.length > selectedMemories.length ||
219
+ learningItems.length > selectedLearnings.length ||
220
+ entityItems.length > selectedEntities.length;
221
+ return {
222
+ items: allItems,
223
+ totalTokens,
224
+ budget,
225
+ truncated
226
+ };
227
+ }
228
+ /**
229
+ * Format context window as string for injection into prompt
230
+ */
231
+ function formatContextWindow(window, opts = {}) {
232
+ const { items } = window;
233
+ const lines = [];
234
+ if (opts.groupByType) {
235
+ // Group by type
236
+ const messages = items.filter(i => i.type === 'message');
237
+ const memories = items.filter(i => i.type === 'memory');
238
+ const learnings = items.filter(i => i.type === 'learning');
239
+ const entities = items.filter(i => i.type === 'entity');
240
+ if (memories.length > 0) {
241
+ lines.push('# Relevant Memories');
242
+ lines.push('');
243
+ memories.forEach(m => lines.push(m.content));
244
+ lines.push('');
245
+ }
246
+ if (learnings.length > 0) {
247
+ lines.push('# Relevant Learnings');
248
+ lines.push('');
249
+ learnings.forEach(l => lines.push(l.content));
250
+ lines.push('');
251
+ }
252
+ if (entities.length > 0) {
253
+ lines.push('# Known Entities');
254
+ lines.push('');
255
+ entities.forEach(e => lines.push(e.content));
256
+ lines.push('');
257
+ }
258
+ if (messages.length > 0) {
259
+ lines.push('# Recent Conversation');
260
+ lines.push('');
261
+ messages.forEach(m => {
262
+ const content = m.content;
263
+ if (opts.includeMetadata && m.metadata) {
264
+ lines.push(`${content} (importance: ${m.importance.toFixed(2)})`);
265
+ }
266
+ else {
267
+ lines.push(content);
268
+ }
269
+ });
270
+ }
271
+ }
272
+ else {
273
+ // Chronological order
274
+ items.forEach(item => {
275
+ if (opts.includeMetadata) {
276
+ lines.push(`${item.content} [${item.type}, importance: ${item.importance.toFixed(2)}]`);
277
+ }
278
+ else {
279
+ lines.push(item.content);
280
+ }
281
+ });
282
+ }
283
+ return lines.join('\n');
284
+ }
285
+ /**
286
+ * Get context window stats
287
+ */
288
+ function getContextStats(window) {
289
+ const itemsByType = {};
290
+ window.items.forEach(item => {
291
+ itemsByType[item.type] = (itemsByType[item.type] || 0) + 1;
292
+ });
293
+ const totalBudget = window.budget.recentMessages +
294
+ window.budget.memories +
295
+ window.budget.learnings +
296
+ window.budget.entities;
297
+ return {
298
+ totalItems: window.items.length,
299
+ totalTokens: window.totalTokens,
300
+ budgetUsed: window.totalTokens / totalBudget,
301
+ budgetRemaining: totalBudget - window.totalTokens,
302
+ itemsByType,
303
+ truncated: window.truncated
304
+ };
305
+ }
306
+ /**
307
+ * Adaptive context budgeting
308
+ * Adjusts budget allocation based on available content
309
+ */
310
+ function createAdaptiveBudget(opts) {
311
+ const total = opts.modelContextSize || 128000;
312
+ const systemPrompt = 2000;
313
+ const reserve = 4000;
314
+ const available = total - systemPrompt - reserve;
315
+ // Calculate weights based on content availability
316
+ const totalItems = opts.messageCount + opts.memoryCount + opts.learningCount + opts.entityCount;
317
+ if (totalItems === 0) {
318
+ return createContextBudget({ modelContextSize: total });
319
+ }
320
+ const messagePct = opts.messageCount / totalItems;
321
+ const memoryPct = opts.memoryCount / totalItems;
322
+ const learningPct = opts.learningCount / totalItems;
323
+ const entityPct = opts.entityCount / totalItems;
324
+ // Normalize (ensure sum = 1)
325
+ const sum = messagePct + memoryPct + learningPct + entityPct;
326
+ return {
327
+ total,
328
+ systemPrompt,
329
+ reserve,
330
+ recentMessages: Math.floor(available * (messagePct / sum)),
331
+ memories: Math.floor(available * (memoryPct / sum)),
332
+ learnings: Math.floor(available * (learningPct / sum)),
333
+ entities: Math.floor(available * (entityPct / sum))
334
+ };
335
+ }
336
+ /**
337
+ * Model-specific context budgets
338
+ */
339
+ exports.MODEL_BUDGETS = {
340
+ // Claude models
341
+ 'claude-3-opus': createContextBudget({ modelContextSize: 200000 }),
342
+ 'claude-3-sonnet': createContextBudget({ modelContextSize: 200000 }),
343
+ 'claude-3-haiku': createContextBudget({ modelContextSize: 200000 }),
344
+ 'claude-3.5-sonnet': createContextBudget({ modelContextSize: 200000 }),
345
+ // GPT models
346
+ 'gpt-4-turbo': createContextBudget({ modelContextSize: 128000 }),
347
+ 'gpt-4': createContextBudget({ modelContextSize: 8192 }),
348
+ 'gpt-3.5-turbo': createContextBudget({ modelContextSize: 16384 }),
349
+ // Other models
350
+ 'gemini-pro': createContextBudget({ modelContextSize: 32000 }),
351
+ 'llama-3-70b': createContextBudget({ modelContextSize: 8192 }),
352
+ // Default
353
+ 'default': createContextBudget({ modelContextSize: 128000 })
354
+ };
355
+ /**
356
+ * Get budget for a specific model
357
+ */
358
+ function getBudgetForModel(model) {
359
+ return exports.MODEL_BUDGETS[model] || exports.MODEL_BUDGETS.default;
360
+ }
@@ -0,0 +1,100 @@
1
+ /**
2
+ * Error handling and retry logic for OpenClaw Memory
3
+ *
4
+ * Provides:
5
+ * - Custom error types
6
+ * - Retry logic with exponential backoff
7
+ * - Circuit breaker pattern
8
+ * - Error recovery strategies
9
+ */
10
+ export declare class OpenClawError extends Error {
11
+ code: string;
12
+ details?: unknown | undefined;
13
+ constructor(message: string, code: string, details?: unknown | undefined);
14
+ }
15
+ export declare class DatabaseError extends OpenClawError {
16
+ constructor(message: string, details?: unknown);
17
+ }
18
+ export declare class EmbeddingError extends OpenClawError {
19
+ constructor(message: string, details?: unknown);
20
+ }
21
+ export declare class ValidationError extends OpenClawError {
22
+ constructor(message: string, details?: unknown);
23
+ }
24
+ export declare class RateLimitError extends OpenClawError {
25
+ constructor(message: string, details?: unknown);
26
+ }
27
+ export interface RetryOptions {
28
+ maxAttempts?: number;
29
+ initialDelayMs?: number;
30
+ maxDelayMs?: number;
31
+ backoffMultiplier?: number;
32
+ shouldRetry?: (error: Error) => boolean;
33
+ onRetry?: (attempt: number, error: Error) => void;
34
+ }
35
+ /**
36
+ * Retry a function with exponential backoff
37
+ */
38
+ export declare function retry<T>(fn: () => Promise<T>, options?: RetryOptions): Promise<T>;
39
+ /**
40
+ * Circuit breaker pattern
41
+ * Prevents cascading failures by failing fast when error rate is high
42
+ */
43
+ export declare class CircuitBreaker {
44
+ private failureThreshold;
45
+ private recoveryTimeMs;
46
+ private successThreshold;
47
+ private failures;
48
+ private successes;
49
+ private lastFailureTime;
50
+ private state;
51
+ constructor(failureThreshold?: number, recoveryTimeMs?: number, // 1 minute
52
+ successThreshold?: number);
53
+ execute<T>(fn: () => Promise<T>): Promise<T>;
54
+ private onSuccess;
55
+ private onFailure;
56
+ reset(): void;
57
+ getState(): {
58
+ state: "closed" | "open" | "half-open";
59
+ failures: number;
60
+ successes: number;
61
+ lastFailureTime: number;
62
+ };
63
+ }
64
+ /**
65
+ * Wrap database operations with error handling
66
+ */
67
+ export declare function wrapDatabaseOperation<T>(operation: () => Promise<T>, errorContext: string): Promise<T>;
68
+ /**
69
+ * Wrap embedding operations with error handling
70
+ */
71
+ export declare function wrapEmbeddingOperation<T>(operation: () => Promise<T>, errorContext: string): Promise<T>;
72
+ /**
73
+ * Validate inputs
74
+ */
75
+ export declare function validateInput(condition: boolean, message: string, details?: unknown): void;
76
+ /**
77
+ * Safe JSON parse
78
+ */
79
+ export declare function safeJsonParse<T>(json: string, fallback: T): T;
80
+ /**
81
+ * Safe async operation with timeout
82
+ */
83
+ export declare function withTimeout<T>(promise: Promise<T>, timeoutMs: number, errorMessage?: string): Promise<T>;
84
+ /**
85
+ * Graceful degradation helper
86
+ */
87
+ export declare function gracefulFallback<T>(primary: () => Promise<T>, fallback: () => T | Promise<T>, errorContext: string): Promise<T>;
88
+ /**
89
+ * Batch operation with error handling
90
+ * Continues processing even if some items fail
91
+ */
92
+ export declare function batchWithErrorHandling<T, R>(items: T[], operation: (item: T) => Promise<R>, options?: {
93
+ continueOnError?: boolean;
94
+ onError?: (item: T, error: Error) => void;
95
+ }): Promise<Array<{
96
+ success: boolean;
97
+ result?: R;
98
+ error?: Error;
99
+ item: T;
100
+ }>>;