@grapine.ai/contextprune 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +426 -1
  2. package/dist/cli/commands/analyze.d.ts +2 -0
  3. package/dist/cli/commands/analyze.js +161 -0
  4. package/dist/cli/commands/compress.d.ts +2 -0
  5. package/dist/cli/commands/compress.js +65 -0
  6. package/dist/cli/commands/watch.d.ts +2 -0
  7. package/dist/cli/commands/watch.js +432 -0
  8. package/dist/cli/dashboard/index.html +720 -0
  9. package/dist/cli/index.d.ts +2 -0
  10. package/dist/cli/index.js +19 -0
  11. package/dist/cli/labels.d.ts +4 -0
  12. package/dist/cli/labels.js +35 -0
  13. package/dist/cli/parse-input.d.ts +33 -0
  14. package/dist/cli/parse-input.js +191 -0
  15. package/dist/src/brief/index.d.ts +2 -0
  16. package/dist/src/brief/index.js +101 -0
  17. package/dist/src/classifier/confidence.d.ts +4 -0
  18. package/dist/src/classifier/confidence.js +23 -0
  19. package/dist/src/classifier/index.d.ts +11 -0
  20. package/dist/src/classifier/index.js +217 -0
  21. package/dist/src/classifier/patterns.d.ts +7 -0
  22. package/dist/src/classifier/patterns.js +81 -0
  23. package/dist/src/compression/engine.d.ts +23 -0
  24. package/dist/src/compression/engine.js +363 -0
  25. package/dist/src/index.d.ts +41 -0
  26. package/dist/src/index.js +120 -0
  27. package/dist/src/pipeline/index.d.ts +5 -0
  28. package/dist/src/pipeline/index.js +167 -0
  29. package/dist/src/scorer/index.d.ts +4 -0
  30. package/dist/src/scorer/index.js +136 -0
  31. package/dist/src/scorer/session-extractor.d.ts +2 -0
  32. package/dist/src/scorer/session-extractor.js +57 -0
  33. package/dist/src/strategy/selector.d.ts +3 -0
  34. package/dist/src/strategy/selector.js +158 -0
  35. package/dist/src/tokenizer/index.d.ts +18 -0
  36. package/dist/src/tokenizer/index.js +195 -0
  37. package/dist/src/types.d.ts +161 -0
  38. package/dist/src/types.js +5 -0
  39. package/dist/src/utils/index.d.ts +4 -0
  40. package/dist/src/utils/index.js +48 -0
  41. package/dist/src/validation/coherence.d.ts +3 -0
  42. package/dist/src/validation/coherence.js +87 -0
  43. package/license.md +14 -0
  44. package/package.json +76 -41
  45. package/index.js +0 -1
@@ -0,0 +1,195 @@
1
+ "use strict";
2
+ // src/tokenizer/index.ts
3
+ // Model-aware token counting.
4
+ // Uses tiktoken for OpenAI models.
5
+ // Falls back to character estimation when tokenizer unavailable.
6
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
7
+ if (k2 === undefined) k2 = k;
8
+ var desc = Object.getOwnPropertyDescriptor(m, k);
9
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
10
+ desc = { enumerable: true, get: function() { return m[k]; } };
11
+ }
12
+ Object.defineProperty(o, k2, desc);
13
+ }) : (function(o, m, k, k2) {
14
+ if (k2 === undefined) k2 = k;
15
+ o[k2] = m[k];
16
+ }));
17
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
18
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
19
+ }) : function(o, v) {
20
+ o["default"] = v;
21
+ });
22
+ var __importStar = (this && this.__importStar) || (function () {
23
+ var ownKeys = function(o) {
24
+ ownKeys = Object.getOwnPropertyNames || function (o) {
25
+ var ar = [];
26
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
27
+ return ar;
28
+ };
29
+ return ownKeys(o);
30
+ };
31
+ return function (mod) {
32
+ if (mod && mod.__esModule) return mod;
33
+ var result = {};
34
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
35
+ __setModuleDefault(result, mod);
36
+ return result;
37
+ };
38
+ })();
39
+ Object.defineProperty(exports, "__esModule", { value: true });
40
+ exports.countTokens = countTokens;
41
+ exports.estimate = estimate;
42
+ exports.countMessagesTokens = countMessagesTokens;
43
+ exports.getModelMaxTokens = getModelMaxTokens;
44
+ const MODEL_MAX_TOKENS = {
45
+ // ── Anthropic Claude 4.x ─────────────────────────────────────────────────
46
+ 'claude-opus-4-6': 200000,
47
+ 'claude-sonnet-4-6': 200000,
48
+ // Claude 4.5 aliases
49
+ 'claude-sonnet-4-5': 200000,
50
+ 'claude-opus-4-5': 200000,
51
+ 'claude-haiku-4-5': 200000,
52
+ // Claude 3.x
53
+ 'claude-3-5-sonnet-20241022': 200000,
54
+ 'claude-3-5-haiku-20241022': 200000,
55
+ 'claude-3-opus-20240229': 200000,
56
+ 'claude-3-sonnet-20240229': 200000,
57
+ 'claude-3-haiku-20240307': 200000,
58
+ // ── OpenAI GPT-4o ─────────────────────────────────────────────────────────
59
+ 'gpt-4o': 128000,
60
+ 'gpt-4o-mini': 128000,
61
+ 'gpt-4o-2024-11-20': 128000,
62
+ 'gpt-4o-2024-08-06': 128000,
63
+ // OpenAI GPT-4
64
+ 'gpt-4-turbo': 128000,
65
+ 'gpt-4-turbo-preview': 128000,
66
+ 'gpt-4': 8192,
67
+ 'gpt-4-32k': 32768,
68
+ // OpenAI GPT-3.5
69
+ 'gpt-3.5-turbo': 16385,
70
+ 'gpt-3.5-turbo-16k': 16385,
71
+ // OpenAI o-series
72
+ 'o1': 200000,
73
+ 'o1-preview': 128000,
74
+ 'o1-mini': 128000,
75
+ 'o3': 200000,
76
+ 'o3-mini': 200000,
77
+ // OpenAI GPT-4.1
78
+ 'gpt-4.1': 1047576,
79
+ 'gpt-4.1-mini': 1047576,
80
+ 'gpt-4.1-nano': 1047576,
81
+ // ── Google Gemini ─────────────────────────────────────────────────────────
82
+ 'gemini-2.5-pro': 1048576,
83
+ 'gemini-2.5-flash': 1048576,
84
+ 'gemini-2.0-flash': 1048576,
85
+ 'gemini-1.5-pro': 2097152,
86
+ 'gemini-1.5-flash': 1048576,
87
+ 'gemini-1.0-pro': 32760,
88
+ // ── Meta Llama (via any OpenAI-compatible host) ───────────────────────────
89
+ 'meta-llama/llama-3.3-70b-instruct': 131072,
90
+ 'meta-llama/llama-3.1-70b-instruct': 131072,
91
+ 'meta-llama/llama-3.1-8b-instruct': 131072,
92
+ 'meta-llama/llama-3-70b-instruct': 8192,
93
+ // Short aliases used by some hosts
94
+ 'llama-3.3-70b-instruct': 131072,
95
+ 'llama-3.1-70b-instruct': 131072,
96
+ // ── Mistral ───────────────────────────────────────────────────────────────
97
+ 'mistral-large-latest': 131072,
98
+ 'mistral-medium-latest': 131072,
99
+ 'mistral-small-latest': 131072,
100
+ 'mistral-7b-instruct': 32768,
101
+ 'mixtral-8x7b-instruct': 32768,
102
+ 'mixtral-8x22b-instruct': 65536,
103
+ 'codestral-latest': 262144,
104
+ // ── Groq-hosted models (same weights, Groq context limits) ────────────────
105
+ 'llama3-70b-8192': 8192,
106
+ 'llama3-8b-8192': 8192,
107
+ 'mixtral-8x7b-32768': 32768,
108
+ 'gemma2-9b-it': 8192,
109
+ // ── Cohere ────────────────────────────────────────────────────────────────
110
+ 'command-r-plus': 128000,
111
+ 'command-r': 128000,
112
+ 'command': 4096,
113
+ // ── OpenRouter prefixed aliases (provider/model format) ───────────────────
114
+ 'openai/gpt-4o': 128000,
115
+ 'openai/gpt-4o-mini': 128000,
116
+ 'openai/o1': 200000,
117
+ 'openai/o3-mini': 200000,
118
+ 'anthropic/claude-opus-4-6': 200000,
119
+ 'anthropic/claude-sonnet-4-6': 200000,
120
+ 'anthropic/claude-haiku-4-5': 200000,
121
+ 'anthropic/claude-haiku-4.5': 200000, // dot variant
122
+ 'anthropic/claude-sonnet-4-5': 200000,
123
+ 'anthropic/claude-sonnet-4.5': 200000, // dot variant
124
+ 'anthropic/claude-3-5-sonnet': 200000,
125
+ 'anthropic/claude-3-5-haiku': 200000,
126
+ 'anthropic/claude-3-haiku': 200000,
127
+ 'google/gemini-2.5-pro': 1048576,
128
+ 'google/gemini-2.5-flash': 1048576,
129
+ 'google/gemini-2.0-flash': 1048576,
130
+ 'meta-llama/llama-3.3-70b': 131072,
131
+ 'mistralai/mistral-large': 131072,
132
+ 'mistralai/mixtral-8x7b-instruct': 32768,
133
+ 'cohere/command-r-plus': 128000,
134
+ 'deepseek/deepseek-chat': 163840,
135
+ 'deepseek/deepseek-r1': 163840,
136
+ // ── DeepSeek ─────────────────────────────────────────────────────────────
137
+ 'deepseek-chat': 163840,
138
+ 'deepseek-reasoner': 163840,
139
+ };
140
+ // Approximate: 1 token ≈ 4 characters for English text
141
+ // Used only in the fast-path estimate() function — never in production counts
142
+ const CHARS_PER_TOKEN = 4;
143
+ let tiktokenEncoder = null;
144
+ async function getTiktokenEncoder() {
145
+ if (tiktokenEncoder)
146
+ return tiktokenEncoder;
147
+ try {
148
+ const { get_encoding } = await Promise.resolve().then(() => __importStar(require('tiktoken')));
149
+ tiktokenEncoder = get_encoding('cl100k_base'); // works for GPT-4, Claude approximation
150
+ return tiktokenEncoder;
151
+ }
152
+ catch {
153
+ return null;
154
+ }
155
+ }
156
+ /**
157
+ * Exact token count using model-appropriate tokenizer.
158
+ * Falls back to character estimation if tokenizer unavailable.
159
+ */
160
+ async function countTokens(text, model) {
161
+ const encoder = await getTiktokenEncoder();
162
+ if (encoder) {
163
+ try {
164
+ return encoder.encode(text).length;
165
+ }
166
+ catch {
167
+ // fall through to estimate
168
+ }
169
+ }
170
+ return estimate(text);
171
+ }
172
+ /**
173
+ * Fast approximate count — used only for UI/real-time displays.
174
+ * Never use this for compression decisions.
175
+ */
176
+ function estimate(text) {
177
+ return Math.ceil(text.length / CHARS_PER_TOKEN);
178
+ }
179
+ /**
180
+ * Count tokens across an entire messages array.
181
+ */
182
+ async function countMessagesTokens(messages, model) {
183
+ let total = 0;
184
+ for (const msg of messages) {
185
+ const text = typeof msg.content === 'string'
186
+ ? msg.content
187
+ : JSON.stringify(msg.content);
188
+ total += await countTokens(text, model);
189
+ total += 4; // per-message overhead (role tokens + formatting)
190
+ }
191
+ return total;
192
+ }
193
+ function getModelMaxTokens(model) {
194
+ return MODEL_MAX_TOKENS[model] ?? 128000;
195
+ }
@@ -0,0 +1,161 @@
1
+ export interface ContentBlock {
2
+ type: 'text' | 'tool_use' | 'tool_result' | 'image';
3
+ text?: string;
4
+ id?: string;
5
+ tool_use_id?: string;
6
+ name?: string;
7
+ input?: Record<string, unknown>;
8
+ content?: string;
9
+ }
10
+ export interface LLMMessage {
11
+ role: 'system' | 'user' | 'assistant';
12
+ content: string | ContentBlock[];
13
+ }
14
+ export type MessageClassification = 'SYSTEM_CONSTRAINT' | 'TASK_DEFINITION' | 'TOOL_OUTPUT_ACTIVE' | 'TOOL_OUTPUT_STALE' | 'ERROR_ACTIVE' | 'ERROR_RESOLVED' | 'REASONING_INTERMEDIATE' | 'DECISION_FINAL' | 'USER_CORRECTION' | 'PROGRESS_MARKER' | 'CONVERSATIONAL';
15
+ export type CompressionStrategy = 'PRESERVE' | 'DROP' | 'EXTRACT_RESULT' | 'COLLAPSE_TO_MARKER' | 'SUMMARIZE' | 'DEDUPLICATE';
16
+ export type RelevanceTier = 'CRITICAL' | 'HIGH' | 'MEDIUM' | 'LOW' | 'NEGLIGIBLE';
17
+ export type ConfidenceTier = 'HIGH' | 'MEDIUM' | 'LOW';
18
+ export interface AnnotatedMessage {
19
+ id: string;
20
+ originalIndex: number;
21
+ original: LLMMessage;
22
+ classification: MessageClassification;
23
+ classificationConfidence: number;
24
+ classificationMethod: 'rule' | 'heuristic';
25
+ relevanceScore: number;
26
+ relevanceFactors: {
27
+ recencyScore: number;
28
+ referenceCount: number;
29
+ taskProximity: number;
30
+ isPinned: boolean;
31
+ };
32
+ tokenCount: number;
33
+ compressedTokenCount?: number;
34
+ compressionStrategy: CompressionStrategy;
35
+ compressionApplied: boolean;
36
+ compressedContent?: string;
37
+ resolvedBy?: string;
38
+ duplicateOf?: string;
39
+ summarizedInto?: string;
40
+ }
41
+ export interface SessionState {
42
+ sessionId: string;
43
+ createdAt: Date;
44
+ lastUpdatedAt: Date;
45
+ taskContext: {
46
+ originalGoal: string | null;
47
+ currentSubtask: string | null;
48
+ completedSubtasks: string[];
49
+ activeConstraints: string[];
50
+ technologyStack: string[];
51
+ };
52
+ tokenBudget: {
53
+ modelMaxTokens: number;
54
+ currentUsage: number;
55
+ utilizationPercent: number;
56
+ warningThreshold: number;
57
+ criticalThreshold: number;
58
+ };
59
+ messages: Map<string, AnnotatedMessage>;
60
+ messageOrder: string[];
61
+ compressionHistory: CompressionEvent[];
62
+ }
63
+ export interface CompressionDecision {
64
+ messageId: string;
65
+ originalIndex: number;
66
+ classification: MessageClassification;
67
+ strategy: CompressionStrategy;
68
+ reasoning: string;
69
+ tokensBefore: number;
70
+ tokensAfter: number;
71
+ }
72
+ export interface CompressionEvent {
73
+ eventId: string;
74
+ timestamp: Date;
75
+ trigger: 'manual' | 'threshold' | 'api_call';
76
+ messagesEvaluated: number;
77
+ messagesCompressed: number;
78
+ messagesDropped: number;
79
+ messagesPreserved: number;
80
+ tokensBefore: number;
81
+ tokensAfter: number;
82
+ tokensSaved: number;
83
+ savingsPercent: number;
84
+ decisions: CompressionDecision[];
85
+ }
86
+ export interface CompressionOptions {
87
+ warningThreshold?: number;
88
+ criticalThreshold?: number;
89
+ preserveCorrections?: boolean;
90
+ compressionMode?: 'auto' | 'manual' | 'suggest-only';
91
+ useLLMSummarization?: boolean;
92
+ actualInputTokens?: number;
93
+ }
94
+ export interface ContextInput {
95
+ messages: LLMMessage[];
96
+ model: string;
97
+ options?: CompressionOptions;
98
+ }
99
+ export interface CompressedContext {
100
+ messages: LLMMessage[];
101
+ summary: {
102
+ tokensBefore: number;
103
+ tokensAfter: number;
104
+ tokensSaved: number;
105
+ savingsPercent: number;
106
+ messagesRemoved: number;
107
+ compressionTriggered: boolean;
108
+ };
109
+ sessionId: string;
110
+ }
111
+ export interface ContextAnalysis {
112
+ sessionId: string;
113
+ annotatedMessages: AnnotatedMessage[];
114
+ breakdown: {
115
+ byClassification: Partial<Record<MessageClassification, {
116
+ count: number;
117
+ tokens: number;
118
+ percentOfTotal: number;
119
+ }>>;
120
+ byCompressionStrategy: Partial<Record<CompressionStrategy, {
121
+ count: number;
122
+ tokensBefore: number;
123
+ tokensAfter: number;
124
+ }>>;
125
+ };
126
+ topConsumers: Array<{
127
+ messageId: string;
128
+ originalIndex: number;
129
+ classification: MessageClassification;
130
+ tokenCount: number;
131
+ percentOfTotal: number;
132
+ compressionOpportunity: 'high' | 'medium' | 'low' | 'none';
133
+ reason: string;
134
+ }>;
135
+ recommendation: {
136
+ shouldCompress: boolean;
137
+ urgency: 'none' | 'suggested' | 'recommended' | 'critical';
138
+ projectedSavings: number;
139
+ projectedSavingsPercent: number;
140
+ message: string;
141
+ };
142
+ sessionState: SessionState;
143
+ compressionHistory: CompressionEvent[];
144
+ sessionBrief: string;
145
+ }
146
+ export interface ValidationWarning {
147
+ type: string;
148
+ message: string;
149
+ messageId?: string;
150
+ }
151
+ export interface ValidationBlocker {
152
+ type: string;
153
+ message: string;
154
+ recoveryAction: 'RESTORE_MESSAGE' | 'ABORT_COMPRESSION';
155
+ messageId?: string;
156
+ }
157
+ export interface ValidationResult {
158
+ passed: boolean;
159
+ warnings: ValidationWarning[];
160
+ blockers: ValidationBlocker[];
161
+ }
@@ -0,0 +1,5 @@
1
+ "use strict";
2
+ // src/types.ts
3
+ // Canonical types for ContextPrune. Every file imports from here.
4
+ // Do not redefine types locally.
5
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,4 @@
1
+ import type { LLMMessage } from '../types';
2
+ export declare function generateId(): string;
3
+ export declare function extractText(message: LLMMessage): string;
4
+ export declare function extractKeywords(text: string): string[];
@@ -0,0 +1,48 @@
1
+ "use strict";
2
+ // src/utils/index.ts
3
+ Object.defineProperty(exports, "__esModule", { value: true });
4
+ exports.generateId = generateId;
5
+ exports.extractText = extractText;
6
+ exports.extractKeywords = extractKeywords;
7
+ const crypto_1 = require("crypto");
8
+ function generateId() {
9
+ return (0, crypto_1.randomUUID)();
10
+ }
11
+ function extractText(message) {
12
+ if (typeof message.content === 'string')
13
+ return message.content;
14
+ return message.content
15
+ .map((b) => {
16
+ if (b.type === 'text')
17
+ return b.text ?? '';
18
+ if (b.type === 'thinking')
19
+ return b.thinking ?? '';
20
+ if (b.type === 'tool_use')
21
+ return JSON.stringify(b.input ?? {});
22
+ if (b.type === 'tool_result') {
23
+ const c = b.content;
24
+ if (typeof c === 'string')
25
+ return c;
26
+ if (Array.isArray(c))
27
+ return c.map((x) => x.text ?? '').join('\n');
28
+ }
29
+ return '';
30
+ })
31
+ .filter(Boolean)
32
+ .join('\n');
33
+ }
34
+ const STOP_WORDS = new Set([
35
+ 'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with',
36
+ 'by', 'from', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
37
+ 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should', 'may', 'might',
38
+ 'that', 'this', 'it', 'i', 'you', 'we', 'they', 'he', 'she', 'what', 'which',
39
+ 'not', 'as', 'if', 'so', 'up', 'out', 'get', 'use', 'can', 'just', 'then', 'than',
40
+ ]);
41
+ function extractKeywords(text) {
42
+ return text
43
+ .toLowerCase()
44
+ .replace(/[^a-z0-9_./\s-]/g, ' ')
45
+ .split(/\s+/)
46
+ .filter((w) => w.length > 3 && !STOP_WORDS.has(w))
47
+ .slice(0, 30);
48
+ }
@@ -0,0 +1,3 @@
1
+ import type { AnnotatedMessage, LLMMessage, SessionState, CompressionDecision, ValidationResult, ValidationBlocker } from '../types';
2
+ export declare function validate(original: AnnotatedMessage[], compressed: LLMMessage[], decisions: CompressionDecision[], _sessionState: SessionState): ValidationResult;
3
+ export declare function restoreMessages(original: AnnotatedMessage[], compressed: LLMMessage[], blockers: ValidationBlocker[]): LLMMessage[];
@@ -0,0 +1,87 @@
1
+ "use strict";
2
+ // src/validation/coherence.ts
3
+ Object.defineProperty(exports, "__esModule", { value: true });
4
+ exports.validate = validate;
5
+ exports.restoreMessages = restoreMessages;
6
+ const utils_1 = require("../utils");
7
+ function validate(original, compressed, decisions, _sessionState) {
8
+ const warnings = [];
9
+ const blockers = [];
10
+ // Check 1: Constraints not dropped
11
+ const constraints = original.filter((m) => m.classification === 'SYSTEM_CONSTRAINT' || m.classification === 'USER_CORRECTION');
12
+ for (const c of constraints) {
13
+ const decision = decisions.find((d) => d.messageId === c.id);
14
+ if (decision?.strategy === 'DROP' || decision?.strategy === 'DEDUPLICATE') {
15
+ blockers.push({
16
+ type: 'CONSTRAINT_DROPPED',
17
+ message: `Critical: ${c.classification} at turn ${c.originalIndex} was dropped.`,
18
+ recoveryAction: 'RESTORE_MESSAGE',
19
+ messageId: c.id,
20
+ });
21
+ }
22
+ }
23
+ // Check 2: Task definition preserved
24
+ const taskDef = original.find((m) => m.classification === 'TASK_DEFINITION');
25
+ if (taskDef) {
26
+ const d = decisions.find((dec) => dec.messageId === taskDef.id);
27
+ if (d?.strategy === 'DROP') {
28
+ blockers.push({
29
+ type: 'TASK_DEFINITION_DROPPED',
30
+ message: 'Critical: task definition was dropped.',
31
+ recoveryAction: 'RESTORE_MESSAGE',
32
+ messageId: taskDef.id,
33
+ });
34
+ }
35
+ }
36
+ // Check 3: Active errors preserved
37
+ const activeErrors = original.filter((m) => m.classification === 'ERROR_ACTIVE');
38
+ for (const err of activeErrors) {
39
+ const d = decisions.find((dec) => dec.messageId === err.id);
40
+ if (d && d.strategy !== 'PRESERVE') {
41
+ blockers.push({
42
+ type: 'ACTIVE_ERROR_COMPRESSED',
43
+ message: `Active error at turn ${err.originalIndex} was not preserved.`,
44
+ recoveryAction: 'RESTORE_MESSAGE',
45
+ messageId: err.id,
46
+ });
47
+ }
48
+ }
49
+ // Check 4: Non-empty output
50
+ if (compressed.length === 0) {
51
+ blockers.push({
52
+ type: 'EMPTY_OUTPUT',
53
+ message: 'Compression produced empty context. Aborting.',
54
+ recoveryAction: 'ABORT_COMPRESSION',
55
+ });
56
+ }
57
+ // Warning: low savings
58
+ const totalTokens = original.reduce((s, m) => s + m.tokenCount, 0);
59
+ const totalSaved = decisions.reduce((s, d) => s + (d.tokensBefore - d.tokensAfter), 0);
60
+ if (totalSaved / totalTokens < 0.10 && original.length > 10) {
61
+ warnings.push({
62
+ type: 'LOW_SAVINGS',
63
+ message: `Only ${Math.round((totalSaved / totalTokens) * 100)}% savings achieved. Context may be genuinely dense.`,
64
+ });
65
+ }
66
+ return { passed: blockers.length === 0, warnings, blockers };
67
+ }
68
+ function restoreMessages(original, compressed, blockers) {
69
+ const toRestore = blockers
70
+ .filter((b) => b.recoveryAction === 'RESTORE_MESSAGE' && b.messageId)
71
+ .map((b) => original.find((m) => m.id === b.messageId))
72
+ .filter((m) => m !== undefined);
73
+ if (toRestore.length === 0)
74
+ return compressed;
75
+ const result = [...compressed];
76
+ for (const message of toRestore) {
77
+ const compressedTexts = result.map((m) => (0, utils_1.extractText)(m));
78
+ const insertAfter = compressedTexts.reduce((best, _, i) => {
79
+ const orig = original.find((m) => (0, utils_1.extractText)(m.original) === compressedTexts[i]);
80
+ if (orig && orig.originalIndex < message.originalIndex)
81
+ return i;
82
+ return best;
83
+ }, -1);
84
+ result.splice(insertAfter + 1, 0, message.original);
85
+ }
86
+ return result;
87
+ }
package/license.md ADDED
@@ -0,0 +1,14 @@
1
+ ## END USER LICENSE AGREEMENT (EULA)
2
+
3
+ **1. Grant of License:** Grapine.AI ("Licensor") grants you a non-exclusive, royalty-free, non-transferable license to use the compiled version of this package for any purpose, including commercial applications.
4
+
5
+ **2. Restrictions:** You shall not:
6
+ - Decompile, reverse engineer, or disassemble the software to attempt to derive the source code.
7
+ - Modify, alter, or create derivative works of the software.
8
+ - Redistribute the package as a standalone product without prior written consent.
9
+
10
+ **3. Ownership:** All intellectual property rights, including the source code, remain the exclusive property of the Licensor.
11
+
12
+ **4. Disclaimer of Warranty:** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED.
13
+
14
+ **5. Limitation of Liability:** In no event shall the Licensor be liable for any damages arising out of the use or inability to use this software.
package/package.json CHANGED
@@ -1,41 +1,76 @@
1
- {
2
- "name": "@grapine.ai/contextprune",
3
- "version": "0.1.0",
4
- "description": "Intelligent context window management for LLM applications",
5
- "main": "dist/src/index.js",
6
- "types": "dist/src/index.d.ts",
7
- "bin": {
8
- "contextprune": "dist/cli/index.js"
9
- },
10
- "scripts": {
11
- "build": "tsc",
12
- "dev": "tsx watch src/index.ts",
13
- "test": "vitest",
14
- "test:run": "vitest run",
15
- "lint": "tsc --noEmit",
16
- "cli": "tsx cli/index.ts"
17
- },
18
- "keywords": [
19
- "llm",
20
- "context",
21
- "compression",
22
- "ai",
23
- "tokens"
24
- ],
25
- "license": "MIT",
26
- "dependencies": {
27
- "tiktoken": "^1.0.15",
28
- "commander": "^11.0.0",
29
- "chalk": "^5.3.0",
30
- "ora": "^7.0.1",
31
- "open": "^9.1.0"
32
- },
33
- "devDependencies": {
34
- "typescript": "^5.3.0",
35
- "@types/node": "^20.0.0",
36
- "vitest": "^1.0.0",
37
- "tsx": "^4.0.0"
38
- },
39
- "author": "",
40
- "type": "commonjs"
41
- }
1
+ {
2
+ "name": "@grapine.ai/contextprune",
3
+ "version": "0.1.1",
4
+ "description": "Garbage collection for LLM context windows.",
5
+ "author": "Grapine AI <hello@grapine.ai>",
6
+ "homepage": "https://www.contextprune.com",
7
+ "main": "dist/src/index.js",
8
+ "types": "dist/src/index.d.ts",
9
+ "bin": {
10
+ "contextprune": "./dist/cli/index.js"
11
+ },
12
+ "scripts": {
13
+ "build": "tsc -p tsconfig.prod.json && node -e \"require('fs').cpSync('cli/dashboard', 'dist/cli/dashboard', {recursive:true})\"",
14
+ "dev": "tsx watch src/index.ts",
15
+ "test": "vitest",
16
+ "test:run": "vitest run",
17
+ "lint": "tsc --noEmit",
18
+ "cli": "tsx cli/index.ts"
19
+ },
20
+ "keywords": [
21
+ "llm",
22
+ "context",
23
+ "compression",
24
+ "ai",
25
+ "tokens",
26
+ "context-window",
27
+ "context",
28
+ "contextprune",
29
+ "contextmanagement",
30
+ "anthropic",
31
+ "openai",
32
+ "claude",
33
+ "agent",
34
+ "agentic",
35
+ "pruning",
36
+ "summarization"
37
+ ],
38
+ "license": "SEE LICENSE IN LICENSE.md",
39
+ "repository": {
40
+ "type": "git",
41
+ "url": "https://github.com/grapine-ai/contextprune-examples-ts"
42
+ },
43
+ "bugs": {
44
+ "url": "https://github.com/grapine-ai/contextprune-examples-ts/issues"
45
+ },
46
+ "files": [
47
+ "dist/",
48
+ "README.md",
49
+ "license.md"
50
+ ],
51
+ "exports": {
52
+ ".": {
53
+ "require": "./dist/src/index.js",
54
+ "import": "./dist/src/index.mjs",
55
+ "types": "./dist/src/index.d.ts"
56
+ }
57
+ },
58
+ "engines": {
59
+ "node": ">=18.0.0"
60
+ },
61
+ "dependencies": {
62
+ "@anthropic-ai/sdk": "^0.78.0",
63
+ "chalk": "^5.3.0",
64
+ "commander": "^11.0.0",
65
+ "open": "^9.1.0",
66
+ "openai": "^6.27.0",
67
+ "ora": "^7.0.1",
68
+ "tiktoken": "^1.0.15"
69
+ },
70
+ "devDependencies": {
71
+ "@types/node": "^20.0.0",
72
+ "tsx": "^4.0.0",
73
+ "typescript": "^5.3.0",
74
+ "vitest": "^1.0.0"
75
+ }
76
+ }
package/index.js DELETED
@@ -1 +0,0 @@
1
- module.exports = {};