@claudetools/tools 0.8.2 → 0.8.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/dist/cli.js +41 -0
  2. package/dist/context/deduplication.d.ts +72 -0
  3. package/dist/context/deduplication.js +77 -0
  4. package/dist/context/deduplication.test.d.ts +6 -0
  5. package/dist/context/deduplication.test.js +84 -0
  6. package/dist/context/emergency-eviction.d.ts +73 -0
  7. package/dist/context/emergency-eviction.example.d.ts +13 -0
  8. package/dist/context/emergency-eviction.example.js +94 -0
  9. package/dist/context/emergency-eviction.js +226 -0
  10. package/dist/context/eviction-engine.d.ts +76 -0
  11. package/dist/context/eviction-engine.example.d.ts +7 -0
  12. package/dist/context/eviction-engine.example.js +144 -0
  13. package/dist/context/eviction-engine.js +176 -0
  14. package/dist/context/example-usage.d.ts +1 -0
  15. package/dist/context/example-usage.js +128 -0
  16. package/dist/context/exchange-summariser.d.ts +80 -0
  17. package/dist/context/exchange-summariser.js +261 -0
  18. package/dist/context/health-monitor.d.ts +97 -0
  19. package/dist/context/health-monitor.example.d.ts +1 -0
  20. package/dist/context/health-monitor.example.js +164 -0
  21. package/dist/context/health-monitor.js +210 -0
  22. package/dist/context/importance-scorer.d.ts +94 -0
  23. package/dist/context/importance-scorer.example.d.ts +1 -0
  24. package/dist/context/importance-scorer.example.js +140 -0
  25. package/dist/context/importance-scorer.js +187 -0
  26. package/dist/context/index.d.ts +9 -0
  27. package/dist/context/index.js +16 -0
  28. package/dist/context/session-helper.d.ts +10 -0
  29. package/dist/context/session-helper.js +51 -0
  30. package/dist/context/session-store.d.ts +94 -0
  31. package/dist/context/session-store.js +286 -0
  32. package/dist/context/usage-estimator.d.ts +131 -0
  33. package/dist/context/usage-estimator.js +260 -0
  34. package/dist/context/usage-estimator.test.d.ts +1 -0
  35. package/dist/context/usage-estimator.test.js +208 -0
  36. package/dist/context-cli.d.ts +16 -0
  37. package/dist/context-cli.js +309 -0
  38. package/dist/evaluation/build-dataset.d.ts +1 -0
  39. package/dist/evaluation/build-dataset.js +135 -0
  40. package/dist/evaluation/threshold-eval.d.ts +63 -0
  41. package/dist/evaluation/threshold-eval.js +250 -0
  42. package/dist/handlers/codedna-handlers.d.ts +2 -2
  43. package/dist/handlers/tool-handlers.js +126 -165
  44. package/dist/helpers/api-client.d.ts +5 -1
  45. package/dist/helpers/api-client.js +3 -1
  46. package/dist/helpers/compact-formatter.d.ts +51 -0
  47. package/dist/helpers/compact-formatter.js +130 -0
  48. package/dist/helpers/engagement-tracker.d.ts +10 -0
  49. package/dist/helpers/engagement-tracker.js +61 -0
  50. package/dist/helpers/error-tracking.js +1 -1
  51. package/dist/helpers/session-validation.d.ts +76 -0
  52. package/dist/helpers/session-validation.js +221 -0
  53. package/dist/helpers/usage-analytics.js +1 -1
  54. package/dist/hooks/index.d.ts +4 -0
  55. package/dist/hooks/index.js +6 -0
  56. package/dist/hooks/post-tool-use-hook-cli.d.ts +2 -0
  57. package/dist/hooks/post-tool-use-hook-cli.js +34 -0
  58. package/dist/hooks/post-tool-use.d.ts +67 -0
  59. package/dist/hooks/post-tool-use.js +234 -0
  60. package/dist/hooks/stop-hook-cli.d.ts +2 -0
  61. package/dist/hooks/stop-hook-cli.js +34 -0
  62. package/dist/hooks/stop.d.ts +64 -0
  63. package/dist/hooks/stop.js +192 -0
  64. package/dist/index.d.ts +3 -0
  65. package/dist/index.js +2 -0
  66. package/dist/logger.d.ts +1 -1
  67. package/dist/logger.js +4 -0
  68. package/dist/resources.js +3 -0
  69. package/dist/setup.js +206 -2
  70. package/dist/templates/claude-md.d.ts +1 -1
  71. package/dist/templates/claude-md.js +23 -35
  72. package/dist/templates/worker-prompt.js +35 -202
  73. package/dist/tools.js +26 -20
  74. package/package.json +6 -2
@@ -0,0 +1,131 @@
1
+ export interface TokenUsage {
2
+ injectedTokens: number;
3
+ toolOutputTokens: number;
4
+ userTokens: number;
5
+ assistantTokens: number;
6
+ totalEstimated: number;
7
+ }
8
+ export interface UsageSnapshot {
9
+ timestamp: string;
10
+ usage: TokenUsage;
11
+ fillPercentage: number;
12
+ modelLimit: number;
13
+ }
14
+ /**
15
+ * Model context limits (in tokens)
16
+ */
17
+ export declare const MODEL_CONTEXT_LIMITS: {
18
+ readonly SONNET: 200000;
19
+ readonly OPUS: 200000;
20
+ readonly HAIKU: 200000;
21
+ };
22
+ /**
23
+ * Default model limit to use
24
+ */
25
+ export declare const DEFAULT_MODEL_LIMIT: 200000;
26
+ export declare class UsageEstimator {
27
+ private usage;
28
+ private history;
29
+ private maxHistorySize;
30
+ constructor(maxHistorySize?: number);
31
+ /**
32
+ * Estimate tokens from text using character count approximation
33
+ * @param text - Text to estimate tokens for
34
+ * @returns Estimated token count
35
+ */
36
+ estimateTokens(text: string): number;
37
+ /**
38
+ * Track context injection (SessionStart, UserPromptSubmit)
39
+ * @param tokens - Number of tokens injected
40
+ */
41
+ trackInjection(tokens: number): void;
42
+ /**
43
+ * Track tool output tokens
44
+ * @param tokens - Number of tokens in tool output
45
+ */
46
+ trackToolOutput(tokens: number): void;
47
+ /**
48
+ * Track a conversation exchange (user + assistant)
49
+ * @param userTokens - Tokens in user message
50
+ * @param assistantTokens - Tokens in assistant response
51
+ */
52
+ trackExchange(userTokens: number, assistantTokens: number): void;
53
+ /**
54
+ * Track user message tokens
55
+ * @param tokens - Number of tokens in user message
56
+ */
57
+ trackUserMessage(tokens: number): void;
58
+ /**
59
+ * Track assistant response tokens
60
+ * @param tokens - Number of tokens in assistant response
61
+ */
62
+ trackAssistantMessage(tokens: number): void;
63
+ /**
64
+ * Get estimated context fill percentage
65
+ * @param modelLimit - Model context limit (defaults to Sonnet/Opus 200k)
66
+ * @returns Fill percentage (0-100)
67
+ */
68
+ getEstimatedFill(modelLimit?: number): number;
69
+ /**
70
+ * Get current usage statistics
71
+ * @returns Current token usage breakdown
72
+ */
73
+ getCurrentUsage(): TokenUsage;
74
+ /**
75
+ * Get usage history snapshots
76
+ * @returns Array of historical usage snapshots
77
+ */
78
+ getHistory(): UsageSnapshot[];
79
+ /**
80
+ * Check if context is approaching limit
81
+ * @param threshold - Percentage threshold (default 80%)
82
+ * @param modelLimit - Model context limit
83
+ * @returns true if fill exceeds threshold
84
+ */
85
+ isApproachingLimit(threshold?: number, modelLimit?: number): boolean;
86
+ /**
87
+ * Get remaining token budget
88
+ * @param modelLimit - Model context limit
89
+ * @returns Number of tokens remaining
90
+ */
91
+ getRemainingBudget(modelLimit?: number): number;
92
+ /**
93
+ * Reset all usage tracking
94
+ */
95
+ reset(): void;
96
+ /**
97
+ * Get a formatted usage report
98
+ * @param modelLimit - Model context limit
99
+ * @returns Human-readable usage report
100
+ */
101
+ getReport(modelLimit?: number): string;
102
+ /**
103
+ * Update total estimated tokens
104
+ */
105
+ private updateTotal;
106
+ /**
107
+ * Take a snapshot of current usage
108
+ */
109
+ private snapshot;
110
+ /**
111
+ * Get status emoji based on fill percentage
112
+ */
113
+ private getStatusEmoji;
114
+ /**
115
+ * Get status text based on fill percentage
116
+ */
117
+ private getStatusText;
118
+ }
119
+ export declare const usageEstimator: UsageEstimator;
120
+ /**
121
+ * Helper: Estimate tokens from text (convenience function)
122
+ */
123
+ export declare function estimateTokens(text: string): number;
124
+ /**
125
+ * Helper: Check if context is approaching limit
126
+ */
127
+ export declare function isContextNearLimit(threshold?: number): boolean;
128
+ /**
129
+ * Helper: Get current fill percentage
130
+ */
131
+ export declare function getContextFill(): number;
@@ -0,0 +1,260 @@
1
+ // =============================================================================
2
+ // Context Usage Estimator
3
+ // =============================================================================
4
+ //
5
+ // Track token usage per injection, maintain running totals, and estimate
6
+ // context fill percentage for automatic context management.
7
+ //
8
+ // Token estimation uses ~4 chars = 1 token approximation (conservative).
9
+ // Model context limits: Sonnet/Opus = 200k, Haiku = 200k
10
+ //
11
+ // =============================================================================
12
+ /**
13
+ * Model context limits (in tokens)
14
+ */
15
+ export const MODEL_CONTEXT_LIMITS = {
16
+ SONNET: 200_000,
17
+ OPUS: 200_000,
18
+ HAIKU: 200_000,
19
+ };
20
+ /**
21
+ * Default model limit to use
22
+ */
23
+ export const DEFAULT_MODEL_LIMIT = MODEL_CONTEXT_LIMITS.SONNET;
24
+ /**
25
+ * Conservative token estimation: ~4 characters = 1 token
26
+ * This tends to overestimate slightly, which is safer for context management.
27
+ */
28
+ const CHARS_PER_TOKEN = 4;
29
+ export class UsageEstimator {
30
+ usage;
31
+ history;
32
+ maxHistorySize;
33
+ constructor(maxHistorySize = 100) {
34
+ this.usage = {
35
+ injectedTokens: 0,
36
+ toolOutputTokens: 0,
37
+ userTokens: 0,
38
+ assistantTokens: 0,
39
+ totalEstimated: 0,
40
+ };
41
+ this.history = [];
42
+ this.maxHistorySize = maxHistorySize;
43
+ }
44
+ /**
45
+ * Estimate tokens from text using character count approximation
46
+ * @param text - Text to estimate tokens for
47
+ * @returns Estimated token count
48
+ */
49
+ estimateTokens(text) {
50
+ if (!text || text.length === 0) {
51
+ return 0;
52
+ }
53
+ return Math.ceil(text.length / CHARS_PER_TOKEN);
54
+ }
55
+ /**
56
+ * Track context injection (SessionStart, UserPromptSubmit)
57
+ * @param tokens - Number of tokens injected
58
+ */
59
+ trackInjection(tokens) {
60
+ this.usage.injectedTokens += tokens;
61
+ this.updateTotal();
62
+ this.snapshot();
63
+ }
64
+ /**
65
+ * Track tool output tokens
66
+ * @param tokens - Number of tokens in tool output
67
+ */
68
+ trackToolOutput(tokens) {
69
+ this.usage.toolOutputTokens += tokens;
70
+ this.updateTotal();
71
+ this.snapshot();
72
+ }
73
+ /**
74
+ * Track a conversation exchange (user + assistant)
75
+ * @param userTokens - Tokens in user message
76
+ * @param assistantTokens - Tokens in assistant response
77
+ */
78
+ trackExchange(userTokens, assistantTokens) {
79
+ this.usage.userTokens += userTokens;
80
+ this.usage.assistantTokens += assistantTokens;
81
+ this.updateTotal();
82
+ this.snapshot();
83
+ }
84
+ /**
85
+ * Track user message tokens
86
+ * @param tokens - Number of tokens in user message
87
+ */
88
+ trackUserMessage(tokens) {
89
+ this.usage.userTokens += tokens;
90
+ this.updateTotal();
91
+ this.snapshot();
92
+ }
93
+ /**
94
+ * Track assistant response tokens
95
+ * @param tokens - Number of tokens in assistant response
96
+ */
97
+ trackAssistantMessage(tokens) {
98
+ this.usage.assistantTokens += tokens;
99
+ this.updateTotal();
100
+ this.snapshot();
101
+ }
102
+ /**
103
+ * Get estimated context fill percentage
104
+ * @param modelLimit - Model context limit (defaults to Sonnet/Opus 200k)
105
+ * @returns Fill percentage (0-100)
106
+ */
107
+ getEstimatedFill(modelLimit = DEFAULT_MODEL_LIMIT) {
108
+ if (modelLimit <= 0) {
109
+ return 0;
110
+ }
111
+ const fillPercentage = (this.usage.totalEstimated / modelLimit) * 100;
112
+ return Math.min(100, Math.max(0, fillPercentage));
113
+ }
114
+ /**
115
+ * Get current usage statistics
116
+ * @returns Current token usage breakdown
117
+ */
118
+ getCurrentUsage() {
119
+ return { ...this.usage };
120
+ }
121
+ /**
122
+ * Get usage history snapshots
123
+ * @returns Array of historical usage snapshots
124
+ */
125
+ getHistory() {
126
+ return [...this.history];
127
+ }
128
+ /**
129
+ * Check if context is approaching limit
130
+ * @param threshold - Percentage threshold (default 80%)
131
+ * @param modelLimit - Model context limit
132
+ * @returns true if fill exceeds threshold
133
+ */
134
+ isApproachingLimit(threshold = 80, modelLimit = DEFAULT_MODEL_LIMIT) {
135
+ return this.getEstimatedFill(modelLimit) >= threshold;
136
+ }
137
+ /**
138
+ * Get remaining token budget
139
+ * @param modelLimit - Model context limit
140
+ * @returns Number of tokens remaining
141
+ */
142
+ getRemainingBudget(modelLimit = DEFAULT_MODEL_LIMIT) {
143
+ return Math.max(0, modelLimit - this.usage.totalEstimated);
144
+ }
145
+ /**
146
+ * Reset all usage tracking
147
+ */
148
+ reset() {
149
+ this.usage = {
150
+ injectedTokens: 0,
151
+ toolOutputTokens: 0,
152
+ userTokens: 0,
153
+ assistantTokens: 0,
154
+ totalEstimated: 0,
155
+ };
156
+ this.history = [];
157
+ }
158
+ /**
159
+ * Get a formatted usage report
160
+ * @param modelLimit - Model context limit
161
+ * @returns Human-readable usage report
162
+ */
163
+ getReport(modelLimit = DEFAULT_MODEL_LIMIT) {
164
+ const fill = this.getEstimatedFill(modelLimit);
165
+ const remaining = this.getRemainingBudget(modelLimit);
166
+ const lines = [
167
+ 'Context Usage Report',
168
+ '='.repeat(50),
169
+ '',
170
+ 'Token Breakdown:',
171
+ ` Injected Context: ${this.usage.injectedTokens.toLocaleString()} tokens`,
172
+ ` Tool Outputs: ${this.usage.toolOutputTokens.toLocaleString()} tokens`,
173
+ ` User Messages: ${this.usage.userTokens.toLocaleString()} tokens`,
174
+ ` Assistant: ${this.usage.assistantTokens.toLocaleString()} tokens`,
175
+ ` Total Estimated: ${this.usage.totalEstimated.toLocaleString()} tokens`,
176
+ '',
177
+ 'Context Window:',
178
+ ` Model Limit: ${modelLimit.toLocaleString()} tokens`,
179
+ ` Used: ${this.usage.totalEstimated.toLocaleString()} tokens (${fill.toFixed(1)}%)`,
180
+ ` Remaining: ${remaining.toLocaleString()} tokens`,
181
+ '',
182
+ `Status: ${this.getStatusEmoji(fill)} ${this.getStatusText(fill)}`,
183
+ '='.repeat(50),
184
+ ];
185
+ return lines.join('\n');
186
+ }
187
+ // ---------------------------------------------------------------------------
188
+ // Private Methods
189
+ // ---------------------------------------------------------------------------
190
+ /**
191
+ * Update total estimated tokens
192
+ */
193
+ updateTotal() {
194
+ this.usage.totalEstimated =
195
+ this.usage.injectedTokens +
196
+ this.usage.toolOutputTokens +
197
+ this.usage.userTokens +
198
+ this.usage.assistantTokens;
199
+ }
200
+ /**
201
+ * Take a snapshot of current usage
202
+ */
203
+ snapshot() {
204
+ const snapshot = {
205
+ timestamp: new Date().toISOString(),
206
+ usage: { ...this.usage },
207
+ fillPercentage: this.getEstimatedFill(),
208
+ modelLimit: DEFAULT_MODEL_LIMIT,
209
+ };
210
+ this.history.push(snapshot);
211
+ // Trim history if it exceeds max size
212
+ if (this.history.length > this.maxHistorySize) {
213
+ this.history = this.history.slice(-this.maxHistorySize);
214
+ }
215
+ }
216
+ /**
217
+ * Get status emoji based on fill percentage
218
+ */
219
+ getStatusEmoji(fill) {
220
+ if (fill >= 90)
221
+ return '🔴';
222
+ if (fill >= 80)
223
+ return '🟡';
224
+ if (fill >= 50)
225
+ return '🟢';
226
+ return '✅';
227
+ }
228
+ /**
229
+ * Get status text based on fill percentage
230
+ */
231
+ getStatusText(fill) {
232
+ if (fill >= 90)
233
+ return 'Critical - Context nearly full';
234
+ if (fill >= 80)
235
+ return 'Warning - Approaching limit';
236
+ if (fill >= 50)
237
+ return 'Good - Moderate usage';
238
+ return 'Excellent - Plenty of space';
239
+ }
240
+ }
241
+ // Export singleton instance for convenience
242
+ export const usageEstimator = new UsageEstimator();
243
+ /**
244
+ * Helper: Estimate tokens from text (convenience function)
245
+ */
246
+ export function estimateTokens(text) {
247
+ return usageEstimator.estimateTokens(text);
248
+ }
249
+ /**
250
+ * Helper: Check if context is approaching limit
251
+ */
252
+ export function isContextNearLimit(threshold = 80) {
253
+ return usageEstimator.isApproachingLimit(threshold);
254
+ }
255
+ /**
256
+ * Helper: Get current fill percentage
257
+ */
258
+ export function getContextFill() {
259
+ return usageEstimator.getEstimatedFill();
260
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,208 @@
1
+ // =============================================================================
2
+ // Context Usage Estimator Tests
3
+ // =============================================================================
4
+ import { UsageEstimator, MODEL_CONTEXT_LIMITS } from './usage-estimator.js';
5
+ describe('UsageEstimator', () => {
6
+ let estimator;
7
+ beforeEach(() => {
8
+ estimator = new UsageEstimator();
9
+ });
10
+ describe('estimateTokens', () => {
11
+ it('should estimate tokens using ~4 chars = 1 token', () => {
12
+ // Empty string
13
+ expect(estimator.estimateTokens('')).toBe(0);
14
+ // 4 characters = 1 token
15
+ expect(estimator.estimateTokens('test')).toBe(1);
16
+ // 8 characters = 2 tokens
17
+ expect(estimator.estimateTokens('test123!')).toBe(2);
18
+ // 100 characters = 25 tokens
19
+ const text100 = 'a'.repeat(100);
20
+ expect(estimator.estimateTokens(text100)).toBe(25);
21
+ // 1000 characters = 250 tokens
22
+ const text1000 = 'a'.repeat(1000);
23
+ expect(estimator.estimateTokens(text1000)).toBe(250);
24
+ });
25
+ it('should round up partial tokens', () => {
26
+ // 5 characters = 2 tokens (rounded up from 1.25)
27
+ expect(estimator.estimateTokens('hello')).toBe(2);
28
+ // 7 characters = 2 tokens (rounded up from 1.75)
29
+ expect(estimator.estimateTokens('testing')).toBe(2);
30
+ });
31
+ });
32
+ describe('trackInjection', () => {
33
+ it('should track injected context tokens', () => {
34
+ estimator.trackInjection(100);
35
+ expect(estimator.getCurrentUsage().injectedTokens).toBe(100);
36
+ expect(estimator.getCurrentUsage().totalEstimated).toBe(100);
37
+ estimator.trackInjection(50);
38
+ expect(estimator.getCurrentUsage().injectedTokens).toBe(150);
39
+ expect(estimator.getCurrentUsage().totalEstimated).toBe(150);
40
+ });
41
+ });
42
+ describe('trackToolOutput', () => {
43
+ it('should track tool output tokens', () => {
44
+ estimator.trackToolOutput(200);
45
+ expect(estimator.getCurrentUsage().toolOutputTokens).toBe(200);
46
+ expect(estimator.getCurrentUsage().totalEstimated).toBe(200);
47
+ estimator.trackToolOutput(100);
48
+ expect(estimator.getCurrentUsage().toolOutputTokens).toBe(300);
49
+ expect(estimator.getCurrentUsage().totalEstimated).toBe(300);
50
+ });
51
+ });
52
+ describe('trackExchange', () => {
53
+ it('should track user and assistant tokens', () => {
54
+ estimator.trackExchange(50, 150);
55
+ const usage = estimator.getCurrentUsage();
56
+ expect(usage.userTokens).toBe(50);
57
+ expect(usage.assistantTokens).toBe(150);
58
+ expect(usage.totalEstimated).toBe(200);
59
+ });
60
+ it('should accumulate multiple exchanges', () => {
61
+ estimator.trackExchange(50, 150);
62
+ estimator.trackExchange(30, 100);
63
+ const usage = estimator.getCurrentUsage();
64
+ expect(usage.userTokens).toBe(80);
65
+ expect(usage.assistantTokens).toBe(250);
66
+ expect(usage.totalEstimated).toBe(330);
67
+ });
68
+ });
69
+ describe('getEstimatedFill', () => {
70
+ it('should calculate fill percentage correctly', () => {
71
+ const limit = MODEL_CONTEXT_LIMITS.SONNET; // 200k
72
+ // 0 tokens = 0%
73
+ expect(estimator.getEstimatedFill(limit)).toBe(0);
74
+ // 100k tokens = 50%
75
+ estimator.trackInjection(100_000);
76
+ expect(estimator.getEstimatedFill(limit)).toBe(50);
77
+ // 200k tokens = 100%
78
+ estimator.trackToolOutput(100_000);
79
+ expect(estimator.getEstimatedFill(limit)).toBe(100);
80
+ });
81
+ it('should cap at 100% for over-limit usage', () => {
82
+ const limit = MODEL_CONTEXT_LIMITS.SONNET;
83
+ // 250k tokens (exceeds 200k limit)
84
+ estimator.trackInjection(250_000);
85
+ expect(estimator.getEstimatedFill(limit)).toBe(100);
86
+ });
87
+ it('should handle custom model limits', () => {
88
+ const customLimit = 100_000;
89
+ estimator.trackInjection(50_000);
90
+ expect(estimator.getEstimatedFill(customLimit)).toBe(50);
91
+ estimator.trackToolOutput(30_000);
92
+ expect(estimator.getEstimatedFill(customLimit)).toBe(80);
93
+ });
94
+ });
95
+ describe('isApproachingLimit', () => {
96
+ it('should detect when approaching limit', () => {
97
+ const limit = MODEL_CONTEXT_LIMITS.SONNET;
98
+ // Under threshold (80%)
99
+ estimator.trackInjection(150_000); // 75%
100
+ expect(estimator.isApproachingLimit(80, limit)).toBe(false);
101
+ // At threshold
102
+ estimator.trackToolOutput(10_000); // 80%
103
+ expect(estimator.isApproachingLimit(80, limit)).toBe(true);
104
+ // Over threshold
105
+ estimator.trackUserMessage(20_000); // 90%
106
+ expect(estimator.isApproachingLimit(80, limit)).toBe(true);
107
+ });
108
+ it('should support custom thresholds', () => {
109
+ const limit = MODEL_CONTEXT_LIMITS.SONNET;
110
+ estimator.trackInjection(100_000); // 50%
111
+ expect(estimator.isApproachingLimit(40, limit)).toBe(true);
112
+ expect(estimator.isApproachingLimit(60, limit)).toBe(false);
113
+ });
114
+ });
115
+ describe('getRemainingBudget', () => {
116
+ it('should calculate remaining token budget', () => {
117
+ const limit = MODEL_CONTEXT_LIMITS.SONNET;
118
+ // Full budget available
119
+ expect(estimator.getRemainingBudget(limit)).toBe(200_000);
120
+ // 50k used, 150k remaining
121
+ estimator.trackInjection(50_000);
122
+ expect(estimator.getRemainingBudget(limit)).toBe(150_000);
123
+ // 200k used, 0 remaining
124
+ estimator.trackToolOutput(150_000);
125
+ expect(estimator.getRemainingBudget(limit)).toBe(0);
126
+ });
127
+ it('should not return negative budget', () => {
128
+ const limit = MODEL_CONTEXT_LIMITS.SONNET;
129
+ // Exceed limit
130
+ estimator.trackInjection(250_000);
131
+ expect(estimator.getRemainingBudget(limit)).toBe(0);
132
+ });
133
+ });
134
+ describe('reset', () => {
135
+ it('should reset all usage tracking', () => {
136
+ estimator.trackInjection(100);
137
+ estimator.trackToolOutput(200);
138
+ estimator.trackExchange(50, 150);
139
+ expect(estimator.getCurrentUsage().totalEstimated).toBe(500);
140
+ estimator.reset();
141
+ const usage = estimator.getCurrentUsage();
142
+ expect(usage.injectedTokens).toBe(0);
143
+ expect(usage.toolOutputTokens).toBe(0);
144
+ expect(usage.userTokens).toBe(0);
145
+ expect(usage.assistantTokens).toBe(0);
146
+ expect(usage.totalEstimated).toBe(0);
147
+ expect(estimator.getHistory()).toHaveLength(0);
148
+ });
149
+ });
150
+ describe('history tracking', () => {
151
+ it('should maintain usage history', () => {
152
+ estimator.trackInjection(100);
153
+ estimator.trackToolOutput(200);
154
+ estimator.trackExchange(50, 150);
155
+ const history = estimator.getHistory();
156
+ expect(history.length).toBeGreaterThan(0);
157
+ // Each snapshot should have required fields
158
+ history.forEach(snapshot => {
159
+ expect(snapshot).toHaveProperty('timestamp');
160
+ expect(snapshot).toHaveProperty('usage');
161
+ expect(snapshot).toHaveProperty('fillPercentage');
162
+ expect(snapshot).toHaveProperty('modelLimit');
163
+ });
164
+ });
165
+ it('should limit history size', () => {
166
+ const smallEstimator = new UsageEstimator(5);
167
+ // Add more snapshots than max size
168
+ for (let i = 0; i < 10; i++) {
169
+ smallEstimator.trackInjection(10);
170
+ }
171
+ const history = smallEstimator.getHistory();
172
+ expect(history.length).toBeLessThanOrEqual(5);
173
+ });
174
+ });
175
+ describe('integration scenario', () => {
176
+ it('should track realistic conversation flow', () => {
177
+ const limit = MODEL_CONTEXT_LIMITS.SONNET;
178
+ // Initial context injection (SessionStart)
179
+ const sessionContext = 'System instructions and context...'.repeat(100);
180
+ estimator.trackInjection(estimator.estimateTokens(sessionContext));
181
+ // User message with context injection
182
+ const userMessage = 'Help me debug this code...'.repeat(50);
183
+ estimator.trackUserMessage(estimator.estimateTokens(userMessage));
184
+ const contextInjection = 'Relevant memory facts...'.repeat(200);
185
+ estimator.trackInjection(estimator.estimateTokens(contextInjection));
186
+ // Tool calls and outputs
187
+ estimator.trackToolOutput(estimator.estimateTokens('Tool output...'.repeat(100)));
188
+ // Assistant response
189
+ const assistantResponse = 'Here is the solution...'.repeat(300);
190
+ estimator.trackAssistantMessage(estimator.estimateTokens(assistantResponse));
191
+ // Verify tracking
192
+ const usage = estimator.getCurrentUsage();
193
+ expect(usage.totalEstimated).toBeGreaterThan(0);
194
+ expect(usage.injectedTokens).toBeGreaterThan(0);
195
+ expect(usage.toolOutputTokens).toBeGreaterThan(0);
196
+ expect(usage.userTokens).toBeGreaterThan(0);
197
+ expect(usage.assistantTokens).toBeGreaterThan(0);
198
+ // Should not be near limit yet
199
+ expect(estimator.isApproachingLimit(80, limit)).toBe(false);
200
+ // Fill percentage should be reasonable
201
+ const fill = estimator.getEstimatedFill(limit);
202
+ expect(fill).toBeGreaterThan(0);
203
+ expect(fill).toBeLessThan(20); // Should be well under 20%
204
+ // Should have remaining budget
205
+ expect(estimator.getRemainingBudget(limit)).toBeGreaterThan(150_000);
206
+ });
207
+ });
208
+ });
@@ -0,0 +1,16 @@
1
+ /**
2
+ * claudetools context status - Show current session context usage
3
+ */
4
+ export declare function contextStatus(args: string[]): Promise<void>;
5
+ /**
6
+ * claudetools context evict - Manually trigger eviction cycle
7
+ */
8
+ export declare function contextEvict(args: string[]): Promise<void>;
9
+ /**
10
+ * claudetools context summarise - Summarise and compress exchanges
11
+ */
12
+ export declare function contextSummarise(args: string[]): Promise<void>;
13
+ /**
14
+ * claudetools context reset - Clear session state
15
+ */
16
+ export declare function contextReset(args: string[]): Promise<void>;