@riotprompt/riotprompt 0.0.8 → 0.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/.kodrdriv-test-cache.json +6 -0
  2. package/BUG-ANALYSIS.md +523 -0
  3. package/CODE-REVIEW-SUMMARY.md +330 -0
  4. package/FIXES-APPLIED.md +437 -0
  5. package/README.md +2 -2
  6. package/dist/builder.js +3 -0
  7. package/dist/builder.js.map +1 -1
  8. package/dist/chat.d.ts +1 -1
  9. package/dist/chat.js +2 -5
  10. package/dist/chat.js.map +1 -1
  11. package/dist/constants.js +1 -2
  12. package/dist/constants.js.map +1 -1
  13. package/dist/context-manager.d.ts +136 -0
  14. package/dist/context-manager.js +243 -0
  15. package/dist/context-manager.js.map +1 -0
  16. package/dist/conversation-logger.d.ts +285 -0
  17. package/dist/conversation-logger.js +491 -0
  18. package/dist/conversation-logger.js.map +1 -0
  19. package/dist/conversation.d.ts +277 -0
  20. package/dist/conversation.js +649 -0
  21. package/dist/conversation.js.map +1 -0
  22. package/dist/formatter.js.map +1 -1
  23. package/dist/items/section.js +3 -3
  24. package/dist/items/section.js.map +1 -1
  25. package/dist/iteration-strategy.d.ts +233 -0
  26. package/dist/iteration-strategy.js +520 -0
  27. package/dist/iteration-strategy.js.map +1 -0
  28. package/dist/loader.js +21 -3
  29. package/dist/loader.js.map +1 -1
  30. package/dist/message-builder.d.ts +156 -0
  31. package/dist/message-builder.js +256 -0
  32. package/dist/message-builder.js.map +1 -0
  33. package/dist/model-config.d.ts +115 -0
  34. package/dist/model-config.js +205 -0
  35. package/dist/model-config.js.map +1 -0
  36. package/dist/override.js +8 -1
  37. package/dist/override.js.map +1 -1
  38. package/dist/parser.js +3 -3
  39. package/dist/parser.js.map +1 -1
  40. package/dist/recipes.d.ts +42 -0
  41. package/dist/recipes.js +189 -4
  42. package/dist/recipes.js.map +1 -1
  43. package/dist/reflection.d.ts +250 -0
  44. package/dist/reflection.js +419 -0
  45. package/dist/reflection.js.map +1 -0
  46. package/dist/riotprompt.cjs +3854 -178
  47. package/dist/riotprompt.cjs.map +1 -1
  48. package/dist/riotprompt.d.ts +20 -2
  49. package/dist/riotprompt.js +10 -1
  50. package/dist/riotprompt.js.map +1 -1
  51. package/dist/token-budget.d.ts +177 -0
  52. package/dist/token-budget.js +401 -0
  53. package/dist/token-budget.js.map +1 -0
  54. package/dist/tools.d.ts +239 -0
  55. package/dist/tools.js +324 -0
  56. package/dist/tools.js.map +1 -0
  57. package/dist/util/general.js +1 -1
  58. package/dist/util/general.js.map +1 -1
  59. package/package.json +23 -20
@@ -13,7 +13,16 @@ export * as Loader from './loader';
13
13
  export * as Override from './override';
14
14
  export * as Builder from './builder';
15
15
  export * as Recipes from './recipes';
16
- export { cook, recipe, registerTemplates, getTemplates, clearTemplates } from './recipes';
16
+ export { cook, recipe, registerTemplates, getTemplates, clearTemplates, generateToolGuidance } from './recipes';
17
+ export { ConversationBuilder } from './conversation';
18
+ export { ContextManager } from './context-manager';
19
+ export { TokenCounter, TokenBudgetManager } from './token-budget';
20
+ export { MessageBuilder, MessageTemplates } from './message-builder';
21
+ export { ConversationLogger, ConversationReplayer } from './conversation-logger';
22
+ export { ToolRegistry } from './tools';
23
+ export { StrategyExecutor, IterationStrategyFactory } from './iteration-strategy';
24
+ export { MetricsCollector, ReflectionReportGenerator } from './reflection';
25
+ export { ModelRegistry, getModelRegistry, resetModelRegistry, getPersonaRole, getEncoding, supportsToolCalls, getModelFamily, configureModel } from './model-config';
17
26
  export type { Content } from './items/content';
18
27
  export type { Context } from './items/context';
19
28
  export type { Instruction } from './items/instruction';
@@ -25,4 +34,13 @@ export type { Prompt } from './prompt';
25
34
  export type { FormatOptions, SectionSeparator, SectionTitleProperty } from './formatter';
26
35
  export type { Model, Request } from './chat';
27
36
  export type { Logger } from './logger';
28
- export type { RecipeConfig, ContentItem, TemplateConfig } from './recipes';
37
+ export type { RecipeConfig, ContentItem, TemplateConfig, ToolGuidanceConfig } from './recipes';
38
+ export type { ConversationMessage, ConversationBuilderConfig, ConversationMetadata, ConversationState, InjectOptions, ToolCall } from './conversation';
39
+ export type { DynamicContentItem, TrackedContextItem, ContextStats } from './context-manager';
40
+ export type { TokenUsage, TokenBudgetConfig, CompressionStats, CompressionStrategy } from './token-budget';
41
+ export type { SemanticRole, MessageMetadata } from './message-builder';
42
+ export type { IterationStrategy, StrategyPhase, StrategyState, StrategyResult, StrategyContext, PhaseResult, ToolResult, LLMClient, ToolUsagePolicy, Insight } from './iteration-strategy';
43
+ export type { ReflectionReport, ReflectionConfig, AgenticExecutionMetrics, ToolExecutionMetric, ToolStats, Recommendation, ToolEffectivenessAnalysis, PerformanceInsights, QualityAssessment } from './reflection';
44
+ export type { LogConfig, LogFormat, LoggedConversation, ConversationLogMetadata, LoggedMessage, ToolCallLog, ConversationSummary, ReplayOptions, ReplayResult } from './conversation-logger';
45
+ export type { Tool, ToolParameter, ToolContext, ToolExample, ToolCost, OpenAITool, AnthropicTool, ToolDefinition, ToolUsageStats } from './tools';
46
+ export type { ModelConfig, PersonaRole, TokenizerEncoding } from './model-config';
@@ -20,5 +20,14 @@ import * as builder from './builder.js';
20
20
  export { builder as Builder };
21
21
  import * as recipes from './recipes.js';
22
22
  export { recipes as Recipes };
23
- export { clearTemplates, cook, getTemplates, recipe, registerTemplates } from './recipes.js';
23
+ export { clearTemplates, cook, generateToolGuidance, getTemplates, recipe, registerTemplates } from './recipes.js';
24
+ export { ConversationBuilder } from './conversation.js';
25
+ export { ContextManager } from './context-manager.js';
26
+ export { TokenBudgetManager, TokenCounter } from './token-budget.js';
27
+ export { MessageBuilder, MessageTemplates } from './message-builder.js';
28
+ export { ConversationLogger, ConversationReplayer } from './conversation-logger.js';
29
+ export { ToolRegistry } from './tools.js';
30
+ export { IterationStrategyFactory, StrategyExecutor } from './iteration-strategy.js';
31
+ export { MetricsCollector, ReflectionReportGenerator } from './reflection.js';
32
+ export { ModelRegistry, configureModel, getEncoding, getModelFamily, getModelRegistry, getPersonaRole, resetModelRegistry, supportsToolCalls } from './model-config.js';
24
33
  //# sourceMappingURL=riotprompt.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"riotprompt.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;"}
1
+ {"version":3,"file":"riotprompt.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}
@@ -0,0 +1,177 @@
1
+ import { ConversationMessage } from './conversation';
2
+ import { Model } from './chat';
3
+ /**
4
+ * Token usage information
5
+ */
6
+ export interface TokenUsage {
7
+ used: number;
8
+ max: number;
9
+ remaining: number;
10
+ percentage: number;
11
+ }
12
+ /**
13
+ * Compression statistics
14
+ */
15
+ export interface CompressionStats {
16
+ messagesBefore: number;
17
+ messagesAfter: number;
18
+ tokensBefore: number;
19
+ tokensAfter: number;
20
+ tokensSaved: number;
21
+ strategy: CompressionStrategy;
22
+ }
23
+ /**
24
+ * Compression strategy
25
+ */
26
+ export type CompressionStrategy = 'priority-based' | 'fifo' | 'summarize' | 'adaptive';
27
+ /**
28
+ * Token budget configuration
29
+ */
30
+ export interface TokenBudgetConfig {
31
+ max: number;
32
+ reserveForResponse: number;
33
+ warningThreshold?: number;
34
+ strategy: CompressionStrategy;
35
+ onBudgetExceeded: 'compress' | 'error' | 'warn' | 'truncate';
36
+ preserveRecent?: number;
37
+ preserveSystem?: boolean;
38
+ preserveHighPriority?: boolean;
39
+ onWarning?: (usage: TokenUsage) => void;
40
+ onCompression?: (stats: CompressionStats) => void;
41
+ }
42
+ /**
43
+ * TokenCounter counts tokens using tiktoken for accurate model-specific counting.
44
+ *
45
+ * Features:
46
+ * - Model-specific token counting
47
+ * - Message overhead calculation
48
+ * - Tool call token estimation
49
+ * - Response token estimation
50
+ *
51
+ * @example
52
+ * ```typescript
53
+ * const counter = new TokenCounter('gpt-4o');
54
+ *
55
+ * const tokens = counter.count('Hello, world!');
56
+ * console.log(`Text uses ${tokens} tokens`);
57
+ *
58
+ * const messageTokens = counter.countMessage({
59
+ * role: 'user',
60
+ * content: 'What is the weather?'
61
+ * });
62
+ * ```
63
+ */
64
+ export declare class TokenCounter {
65
+ private encoder;
66
+ private model;
67
+ private logger;
68
+ constructor(model: Model, logger?: any);
69
+ /**
70
+ * Count tokens in text
71
+ */
72
+ count(text: string): number;
73
+ /**
74
+ * Count tokens in a single message
75
+ */
76
+ countMessage(message: ConversationMessage): number;
77
+ /**
78
+ * Count tokens in entire conversation
79
+ */
80
+ countConversation(messages: ConversationMessage[]): number;
81
+ /**
82
+ * Count with additional overhead estimation
83
+ */
84
+ countWithOverhead(messages: ConversationMessage[], includeToolOverhead?: boolean): number;
85
+ /**
86
+ * Estimate tokens needed for response
87
+ */
88
+ estimateResponseTokens(messages: ConversationMessage[]): number;
89
+ /**
90
+ * Map RiotPrompt model to Tiktoken model using model registry
91
+ */
92
+ private mapToTiktokenModel;
93
+ /**
94
+ * Free encoder resources
95
+ */
96
+ dispose(): void;
97
+ }
98
+ /**
99
+ * TokenBudgetManager manages token budgets and compression strategies.
100
+ *
101
+ * Features:
102
+ * - Monitor token usage
103
+ * - Automatic compression when budget exceeded
104
+ * - Multiple compression strategies
105
+ * - Priority-based message retention
106
+ * - Usage statistics and callbacks
107
+ *
108
+ * @example
109
+ * ```typescript
110
+ * const manager = new TokenBudgetManager({
111
+ * max: 8000,
112
+ * reserveForResponse: 1000,
113
+ * strategy: 'priority-based',
114
+ * onBudgetExceeded: 'compress'
115
+ * }, 'gpt-4o');
116
+ *
117
+ * // Check if message can be added
118
+ * if (manager.canAddMessage(message)) {
119
+ * messages.push(message);
120
+ * } else {
121
+ * // Compress conversation
122
+ * messages = manager.compress(messages);
123
+ * messages.push(message);
124
+ * }
125
+ * ```
126
+ */
127
+ export declare class TokenBudgetManager {
128
+ private config;
129
+ private counter;
130
+ private logger;
131
+ constructor(config: TokenBudgetConfig, model: Model, logger?: any);
132
+ /**
133
+ * Get current token usage
134
+ */
135
+ getCurrentUsage(messages: ConversationMessage[]): TokenUsage;
136
+ /**
137
+ * Get remaining tokens available
138
+ */
139
+ getRemainingTokens(messages: ConversationMessage[]): number;
140
+ /**
141
+ * Check if near token limit
142
+ */
143
+ isNearLimit(messages: ConversationMessage[], threshold?: number): boolean;
144
+ /**
145
+ * Check if a message can be added without exceeding budget
146
+ */
147
+ canAddMessage(message: ConversationMessage, currentMessages: ConversationMessage[]): boolean;
148
+ /**
149
+ * Compress messages according to strategy
150
+ */
151
+ compress(messages: ConversationMessage[]): ConversationMessage[];
152
+ /**
153
+ * Compress by priority (keep high-priority messages)
154
+ */
155
+ private compressByPriority;
156
+ /**
157
+ * Compress using FIFO (remove oldest first) - optimized with Set
158
+ */
159
+ private compressFIFO;
160
+ /**
161
+ * Adaptive compression based on conversation phase
162
+ */
163
+ private compressAdaptive;
164
+ /**
165
+ * Calculate message priority for compression
166
+ */
167
+ private calculatePriority;
168
+ /**
169
+ * Truncate to exact number of messages
170
+ */
171
+ truncate(messages: ConversationMessage[], maxMessages: number): ConversationMessage[];
172
+ /**
173
+ * Dispose resources
174
+ */
175
+ dispose(): void;
176
+ }
177
+ export default TokenBudgetManager;
@@ -0,0 +1,401 @@
1
+ import { encoding_for_model } from 'tiktoken';
2
+ import { wrapLogger, DEFAULT_LOGGER } from './logger.js';
3
+ import { getEncoding } from './model-config.js';
4
+
5
+ function _define_property(obj, key, value) {
6
+ if (key in obj) {
7
+ Object.defineProperty(obj, key, {
8
+ value: value,
9
+ enumerable: true,
10
+ configurable: true,
11
+ writable: true
12
+ });
13
+ } else {
14
+ obj[key] = value;
15
+ }
16
+ return obj;
17
+ }
18
+ // ===== TOKEN COUNTER =====
19
+ /**
20
+ * TokenCounter counts tokens using tiktoken for accurate model-specific counting.
21
+ *
22
+ * Features:
23
+ * - Model-specific token counting
24
+ * - Message overhead calculation
25
+ * - Tool call token estimation
26
+ * - Response token estimation
27
+ *
28
+ * @example
29
+ * ```typescript
30
+ * const counter = new TokenCounter('gpt-4o');
31
+ *
32
+ * const tokens = counter.count('Hello, world!');
33
+ * console.log(`Text uses ${tokens} tokens`);
34
+ *
35
+ * const messageTokens = counter.countMessage({
36
+ * role: 'user',
37
+ * content: 'What is the weather?'
38
+ * });
39
+ * ```
40
+ */ class TokenCounter {
41
+ /**
42
+ * Count tokens in text
43
+ */ count(text) {
44
+ if (!text) return 0;
45
+ return this.encoder.encode(text).length;
46
+ }
47
+ /**
48
+ * Count tokens in a single message
49
+ */ countMessage(message) {
50
+ let tokens = 4; // Base overhead per message
51
+ // Content tokens
52
+ if (message.content) {
53
+ tokens += this.count(message.content);
54
+ }
55
+ // Role tokens
56
+ tokens += 1;
57
+ // Tool call tokens
58
+ if (message.tool_calls) {
59
+ for (const toolCall of message.tool_calls){
60
+ tokens += this.count(JSON.stringify(toolCall));
61
+ tokens += 3; // Tool call overhead
62
+ }
63
+ }
64
+ // Tool result tokens
65
+ if (message.tool_call_id) {
66
+ tokens += this.count(message.tool_call_id);
67
+ tokens += 2; // Tool result overhead
68
+ }
69
+ return tokens;
70
+ }
71
+ /**
72
+ * Count tokens in entire conversation
73
+ */ countConversation(messages) {
74
+ let total = 3; // Conversation start overhead
75
+ for (const message of messages){
76
+ total += this.countMessage(message);
77
+ }
78
+ return total;
79
+ }
80
+ /**
81
+ * Count with additional overhead estimation
82
+ */ countWithOverhead(messages, includeToolOverhead = false) {
83
+ let total = this.countConversation(messages);
84
+ // Add tool definition overhead if tools are present
85
+ if (includeToolOverhead) {
86
+ const hasTools = messages.some((m)=>m.tool_calls && m.tool_calls.length > 0);
87
+ if (hasTools) {
88
+ total += 100; // Estimated tool definition overhead
89
+ }
90
+ }
91
+ return total;
92
+ }
93
+ /**
94
+ * Estimate tokens needed for response
95
+ */ estimateResponseTokens(messages) {
96
+ // Heuristic: average response is about 20% of input
97
+ const inputTokens = this.countConversation(messages);
98
+ return Math.max(500, Math.floor(inputTokens * 0.2));
99
+ }
100
+ /**
101
+ * Map RiotPrompt model to Tiktoken model using model registry
102
+ */ mapToTiktokenModel(model) {
103
+ const encoding = getEncoding(model);
104
+ // Map our encoding types to tiktoken models
105
+ switch(encoding){
106
+ case 'gpt-4o':
107
+ case 'o200k_base':
108
+ return 'gpt-4o';
109
+ case 'cl100k_base':
110
+ return 'gpt-3.5-turbo';
111
+ default:
112
+ return 'gpt-4o';
113
+ }
114
+ }
115
+ /**
116
+ * Free encoder resources
117
+ */ dispose() {
118
+ this.encoder.free();
119
+ }
120
+ constructor(model, logger){
121
+ _define_property(this, "encoder", void 0);
122
+ _define_property(this, "model", void 0);
123
+ _define_property(this, "logger", void 0);
124
+ this.model = model;
125
+ this.logger = wrapLogger(logger || DEFAULT_LOGGER, 'TokenCounter');
126
+ // Map RiotPrompt models to Tiktoken models
127
+ const tiktokenModel = this.mapToTiktokenModel(model);
128
+ this.encoder = encoding_for_model(tiktokenModel);
129
+ this.logger.debug('Created TokenCounter', {
130
+ model
131
+ });
132
+ }
133
+ }
134
+ // ===== TOKEN BUDGET MANAGER =====
135
+ /**
136
+ * TokenBudgetManager manages token budgets and compression strategies.
137
+ *
138
+ * Features:
139
+ * - Monitor token usage
140
+ * - Automatic compression when budget exceeded
141
+ * - Multiple compression strategies
142
+ * - Priority-based message retention
143
+ * - Usage statistics and callbacks
144
+ *
145
+ * @example
146
+ * ```typescript
147
+ * const manager = new TokenBudgetManager({
148
+ * max: 8000,
149
+ * reserveForResponse: 1000,
150
+ * strategy: 'priority-based',
151
+ * onBudgetExceeded: 'compress'
152
+ * }, 'gpt-4o');
153
+ *
154
+ * // Check if message can be added
155
+ * if (manager.canAddMessage(message)) {
156
+ * messages.push(message);
157
+ * } else {
158
+ * // Compress conversation
159
+ * messages = manager.compress(messages);
160
+ * messages.push(message);
161
+ * }
162
+ * ```
163
+ */ class TokenBudgetManager {
164
+ /**
165
+ * Get current token usage
166
+ */ getCurrentUsage(messages) {
167
+ const used = this.counter.countConversation(messages);
168
+ const max = this.config.max;
169
+ const remaining = Math.max(0, max - used - this.config.reserveForResponse);
170
+ const percentage = used / max * 100;
171
+ return {
172
+ used,
173
+ max,
174
+ remaining,
175
+ percentage
176
+ };
177
+ }
178
+ /**
179
+ * Get remaining tokens available
180
+ */ getRemainingTokens(messages) {
181
+ return this.getCurrentUsage(messages).remaining;
182
+ }
183
+ /**
184
+ * Check if near token limit
185
+ */ isNearLimit(messages, threshold) {
186
+ const usage = this.getCurrentUsage(messages);
187
+ const checkThreshold = threshold !== null && threshold !== void 0 ? threshold : this.config.warningThreshold;
188
+ const isNear = usage.percentage >= checkThreshold * 100;
189
+ if (isNear) {
190
+ var _this_config_onWarning, _this_config;
191
+ (_this_config_onWarning = (_this_config = this.config).onWarning) === null || _this_config_onWarning === void 0 ? void 0 : _this_config_onWarning.call(_this_config, usage);
192
+ }
193
+ return isNear;
194
+ }
195
+ /**
196
+ * Check if a message can be added without exceeding budget
197
+ */ canAddMessage(message, currentMessages) {
198
+ const currentTokens = this.counter.countConversation(currentMessages);
199
+ const messageTokens = this.counter.countMessage(message);
200
+ const total = currentTokens + messageTokens + this.config.reserveForResponse;
201
+ return total <= this.config.max;
202
+ }
203
+ /**
204
+ * Compress messages according to strategy
205
+ */ compress(messages) {
206
+ var _this_config_onCompression, _this_config;
207
+ const before = messages.length;
208
+ const tokensBefore = this.counter.countConversation(messages);
209
+ const targetTokens = this.config.max - this.config.reserveForResponse;
210
+ this.logger.debug('Compressing messages', {
211
+ before,
212
+ tokensBefore,
213
+ targetTokens,
214
+ strategy: this.config.strategy
215
+ });
216
+ // No compression needed
217
+ if (tokensBefore <= targetTokens) {
218
+ return messages;
219
+ }
220
+ let compressed;
221
+ switch(this.config.strategy){
222
+ case 'priority-based':
223
+ compressed = this.compressByPriority(messages, targetTokens);
224
+ break;
225
+ case 'fifo':
226
+ compressed = this.compressFIFO(messages, targetTokens);
227
+ break;
228
+ case 'adaptive':
229
+ compressed = this.compressAdaptive(messages, targetTokens);
230
+ break;
231
+ case 'summarize':
232
+ // For now, fall back to FIFO (summarization would require LLM call)
233
+ compressed = this.compressFIFO(messages, targetTokens);
234
+ break;
235
+ default:
236
+ compressed = this.compressFIFO(messages, targetTokens);
237
+ }
238
+ const tokensAfter = this.counter.countConversation(compressed);
239
+ const stats = {
240
+ messagesBefore: before,
241
+ messagesAfter: compressed.length,
242
+ tokensBefore,
243
+ tokensAfter,
244
+ tokensSaved: tokensBefore - tokensAfter,
245
+ strategy: this.config.strategy
246
+ };
247
+ (_this_config_onCompression = (_this_config = this.config).onCompression) === null || _this_config_onCompression === void 0 ? void 0 : _this_config_onCompression.call(_this_config, stats);
248
+ this.logger.info('Compressed conversation', stats);
249
+ return compressed;
250
+ }
251
+ /**
252
+ * Compress by priority (keep high-priority messages)
253
+ */ compressByPriority(messages, targetTokens) {
254
+ // Calculate priority for each message
255
+ const withPriority = messages.map((msg, idx)=>({
256
+ message: msg,
257
+ priority: this.calculatePriority(msg, idx, messages.length),
258
+ tokens: this.counter.countMessage(msg),
259
+ index: idx
260
+ }));
261
+ // Sort by priority (descending)
262
+ withPriority.sort((a, b)=>b.priority - a.priority);
263
+ // Keep highest priority messages that fit in budget
264
+ const kept = [];
265
+ let totalTokens = 0;
266
+ for (const item of withPriority){
267
+ if (totalTokens + item.tokens <= targetTokens) {
268
+ kept.push(item);
269
+ totalTokens += item.tokens;
270
+ }
271
+ }
272
+ // Sort back to original order
273
+ kept.sort((a, b)=>a.index - b.index);
274
+ return kept.map((item)=>item.message);
275
+ }
276
+ /**
277
+ * Compress using FIFO (remove oldest first) - optimized with Set
278
+ */ compressFIFO(messages, targetTokens) {
279
+ var _this_config_preserveRecent;
280
+ const preservedSet = new Set();
281
+ let totalTokens = 0;
282
+ // Always preserve system messages if configured
283
+ const systemMessages = messages.filter((m)=>m.role === 'system');
284
+ if (this.config.preserveSystem) {
285
+ for (const msg of systemMessages){
286
+ preservedSet.add(msg);
287
+ totalTokens += this.counter.countMessage(msg);
288
+ }
289
+ }
290
+ // Preserve recent messages
291
+ const recentCount = (_this_config_preserveRecent = this.config.preserveRecent) !== null && _this_config_preserveRecent !== void 0 ? _this_config_preserveRecent : 3;
292
+ const recentMessages = messages.slice(-recentCount).filter((m)=>m.role !== 'system');
293
+ for (const msg of recentMessages){
294
+ if (!preservedSet.has(msg)) {
295
+ const tokens = this.counter.countMessage(msg);
296
+ if (totalTokens + tokens <= targetTokens) {
297
+ preservedSet.add(msg);
298
+ totalTokens += tokens;
299
+ }
300
+ }
301
+ }
302
+ // Add older messages if space available
303
+ const otherMessages = messages.filter((m)=>!preservedSet.has(m) && m.role !== 'system');
304
+ for(let i = otherMessages.length - 1; i >= 0; i--){
305
+ const msg = otherMessages[i];
306
+ const tokens = this.counter.countMessage(msg);
307
+ if (totalTokens + tokens <= targetTokens) {
308
+ preservedSet.add(msg);
309
+ totalTokens += tokens;
310
+ } else {
311
+ break;
312
+ }
313
+ }
314
+ // Sort to maintain conversation order - use Set for O(1) lookup
315
+ return messages.filter((m)=>preservedSet.has(m));
316
+ }
317
+ /**
318
+ * Adaptive compression based on conversation phase
319
+ */ compressAdaptive(messages, targetTokens) {
320
+ const messageCount = messages.length;
321
+ // Early phase: minimal compression (keep most messages)
322
+ if (messageCount <= 5) {
323
+ return this.compressFIFO(messages, targetTokens);
324
+ }
325
+ // Mid phase: moderate compression
326
+ if (messageCount <= 15) {
327
+ // Temporarily modify preserveRecent, then restore
328
+ const originalPreserveRecent = this.config.preserveRecent;
329
+ this.config.preserveRecent = 5;
330
+ const result = this.compressFIFO(messages, targetTokens);
331
+ this.config.preserveRecent = originalPreserveRecent;
332
+ return result;
333
+ }
334
+ // Late phase: aggressive compression (priority-based)
335
+ return this.compressByPriority(messages, targetTokens);
336
+ }
337
+ /**
338
+ * Calculate message priority for compression
339
+ */ calculatePriority(message, index, total) {
340
+ let priority = 1.0;
341
+ // System messages: highest priority
342
+ if (message.role === 'system') {
343
+ priority = 10.0;
344
+ }
345
+ // Recent messages: higher priority
346
+ const recencyBonus = index / total;
347
+ priority += recencyBonus * 2;
348
+ // Tool results: moderate priority
349
+ if (message.role === 'tool') {
350
+ priority += 0.5;
351
+ }
352
+ // Messages with tool calls: keep for context
353
+ if (message.tool_calls && message.tool_calls.length > 0) {
354
+ priority += 0.8;
355
+ }
356
+ return priority;
357
+ }
358
+ /**
359
+ * Truncate to exact number of messages
360
+ */ truncate(messages, maxMessages) {
361
+ if (messages.length <= maxMessages) {
362
+ return messages;
363
+ }
364
+ // Keep system messages + recent messages
365
+ const systemMessages = messages.filter((m)=>m.role === 'system');
366
+ const otherMessages = messages.filter((m)=>m.role !== 'system');
367
+ const recentOther = otherMessages.slice(-(maxMessages - systemMessages.length));
368
+ return [
369
+ ...systemMessages,
370
+ ...recentOther
371
+ ];
372
+ }
373
+ /**
374
+ * Dispose resources
375
+ */ dispose() {
376
+ this.counter.dispose();
377
+ }
378
+ constructor(config, model, logger){
379
+ _define_property(this, "config", void 0);
380
+ _define_property(this, "counter", void 0);
381
+ _define_property(this, "logger", void 0);
382
+ this.config = {
383
+ warningThreshold: 0.8,
384
+ preserveRecent: 3,
385
+ preserveSystem: true,
386
+ preserveHighPriority: true,
387
+ onWarning: ()=>{},
388
+ onCompression: ()=>{},
389
+ ...config
390
+ };
391
+ this.counter = new TokenCounter(model, logger);
392
+ this.logger = wrapLogger(logger || DEFAULT_LOGGER, 'TokenBudgetManager');
393
+ this.logger.debug('Created TokenBudgetManager', {
394
+ max: this.config.max,
395
+ strategy: this.config.strategy
396
+ });
397
+ }
398
+ }
399
+
400
+ export { TokenBudgetManager, TokenCounter, TokenBudgetManager as default };
401
+ //# sourceMappingURL=token-budget.js.map