@hasna/terminal 2.3.2 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/dist/ai.js +56 -82
  2. package/dist/cache.js +3 -2
  3. package/dist/cli.js +1 -1
  4. package/dist/compression.js +8 -30
  5. package/dist/context-hints.js +20 -10
  6. package/dist/diff-cache.js +1 -1
  7. package/dist/discover.js +1 -1
  8. package/dist/economy.js +37 -5
  9. package/dist/expand-store.js +7 -1
  10. package/dist/mcp/server.js +44 -68
  11. package/dist/output-processor.js +10 -7
  12. package/dist/providers/anthropic.js +6 -2
  13. package/dist/providers/cerebras.js +6 -93
  14. package/dist/providers/groq.js +6 -93
  15. package/dist/providers/index.js +85 -36
  16. package/dist/providers/openai-compat.js +93 -0
  17. package/dist/providers/xai.js +6 -93
  18. package/dist/tokens.js +17 -0
  19. package/dist/tool-profiles.js +9 -2
  20. package/package.json +1 -1
  21. package/src/ai.ts +60 -90
  22. package/src/cache.ts +3 -2
  23. package/src/cli.tsx +1 -1
  24. package/src/compression.ts +8 -35
  25. package/src/context-hints.ts +20 -10
  26. package/src/diff-cache.ts +1 -1
  27. package/src/discover.ts +1 -1
  28. package/src/economy.ts +37 -5
  29. package/src/expand-store.ts +8 -1
  30. package/src/mcp/server.ts +45 -73
  31. package/src/output-processor.ts +11 -8
  32. package/src/providers/anthropic.ts +6 -2
  33. package/src/providers/base.ts +2 -0
  34. package/src/providers/cerebras.ts +6 -105
  35. package/src/providers/groq.ts +6 -105
  36. package/src/providers/index.ts +84 -33
  37. package/src/providers/openai-compat.ts +109 -0
  38. package/src/providers/xai.ts +6 -105
  39. package/src/tokens.ts +18 -0
  40. package/src/tool-profiles.ts +9 -2
  41. package/src/compression.test.ts +0 -49
  42. package/src/output-router.ts +0 -56
  43. package/src/parsers/base.ts +0 -72
  44. package/src/parsers/build.ts +0 -73
  45. package/src/parsers/errors.ts +0 -107
  46. package/src/parsers/files.ts +0 -91
  47. package/src/parsers/git.ts +0 -101
  48. package/src/parsers/index.ts +0 -66
  49. package/src/parsers/parsers.test.ts +0 -153
  50. package/src/parsers/tests.ts +0 -98
@@ -1,108 +1,9 @@
1
- // Groq provider — uses OpenAI-compatible API
2
- // Ultra-fast inference. Supports Llama, Qwen, Kimi models.
1
+ // Groq provider — ultra-fast inference
2
+ import { OpenAICompatibleProvider } from "./openai-compat.js";
3
3
 
4
- import type { LLMProvider, ProviderOptions, StreamCallbacks } from "./base.js";
5
-
6
- const GROQ_BASE_URL = "https://api.groq.com/openai/v1";
7
- const DEFAULT_MODEL = "openai/gpt-oss-120b";
8
-
9
- export class GroqProvider implements LLMProvider {
4
+ export class GroqProvider extends OpenAICompatibleProvider {
10
5
  readonly name = "groq";
11
- private apiKey: string;
12
-
13
- constructor() {
14
- this.apiKey = process.env.GROQ_API_KEY ?? "";
15
- }
16
-
17
- isAvailable(): boolean {
18
- return !!process.env.GROQ_API_KEY;
19
- }
20
-
21
- async complete(prompt: string, options: ProviderOptions): Promise<string> {
22
- const model = options.model ?? DEFAULT_MODEL;
23
- const res = await fetch(`${GROQ_BASE_URL}/chat/completions`, {
24
- method: "POST",
25
- headers: {
26
- "Content-Type": "application/json",
27
- Authorization: `Bearer ${this.apiKey}`,
28
- },
29
- body: JSON.stringify({
30
- model,
31
- max_tokens: options.maxTokens ?? 256,
32
- messages: [
33
- { role: "system", content: options.system },
34
- { role: "user", content: prompt },
35
- ],
36
- }),
37
- });
38
-
39
- if (!res.ok) {
40
- const text = await res.text();
41
- throw new Error(`Groq API error ${res.status}: ${text}`);
42
- }
43
-
44
- const json = (await res.json()) as any;
45
- return (json.choices?.[0]?.message?.content ?? "").trim();
46
- }
47
-
48
- async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
49
- const model = options.model ?? DEFAULT_MODEL;
50
- const res = await fetch(`${GROQ_BASE_URL}/chat/completions`, {
51
- method: "POST",
52
- headers: {
53
- "Content-Type": "application/json",
54
- Authorization: `Bearer ${this.apiKey}`,
55
- },
56
- body: JSON.stringify({
57
- model,
58
- max_tokens: options.maxTokens ?? 256,
59
- stream: true,
60
- messages: [
61
- { role: "system", content: options.system },
62
- { role: "user", content: prompt },
63
- ],
64
- }),
65
- });
66
-
67
- if (!res.ok) {
68
- const text = await res.text();
69
- throw new Error(`Groq API error ${res.status}: ${text}`);
70
- }
71
-
72
- let result = "";
73
- const reader = res.body?.getReader();
74
- if (!reader) throw new Error("No response body");
75
-
76
- const decoder = new TextDecoder();
77
- let buffer = "";
78
-
79
- while (true) {
80
- const { done, value } = await reader.read();
81
- if (done) break;
82
-
83
- buffer += decoder.decode(value, { stream: true });
84
- const lines = buffer.split("\n");
85
- buffer = lines.pop() ?? "";
86
-
87
- for (const line of lines) {
88
- const trimmed = line.trim();
89
- if (!trimmed.startsWith("data: ")) continue;
90
- const data = trimmed.slice(6);
91
- if (data === "[DONE]") break;
92
-
93
- try {
94
- const parsed = JSON.parse(data) as any;
95
- const delta = parsed.choices?.[0]?.delta?.content;
96
- if (delta) {
97
- result += delta;
98
- callbacks.onToken(result.trim());
99
- }
100
- } catch {
101
- // skip malformed chunks
102
- }
103
- }
104
- }
105
-
106
- return result.trim();
107
- }
6
+ protected readonly baseUrl = "https://api.groq.com/openai/v1";
7
+ protected readonly defaultModel = "openai/gpt-oss-120b";
8
+ protected readonly apiKeyEnvVar = "GROQ_API_KEY";
108
9
  }
@@ -1,6 +1,6 @@
1
- // Provider auto-detection and management
1
+ // Provider auto-detection and management — with fallback on failure
2
2
 
3
- import type { LLMProvider, ProviderConfig } from "./base.js";
3
+ import type { LLMProvider, ProviderConfig, ProviderOptions, StreamCallbacks } from "./base.js";
4
4
  import { DEFAULT_PROVIDER_CONFIG } from "./base.js";
5
5
  import { AnthropicProvider } from "./anthropic.js";
6
6
  import { CerebrasProvider } from "./cerebras.js";
@@ -11,10 +11,11 @@ export type { LLMProvider, ProviderOptions, StreamCallbacks, ProviderConfig } fr
11
11
  export { DEFAULT_PROVIDER_CONFIG } from "./base.js";
12
12
 
13
13
  let _provider: LLMProvider | null = null;
14
+ let _failedProviders: Set<string> = new Set();
14
15
 
15
16
  /** Get the active LLM provider. Auto-detects based on available API keys. */
16
17
  export function getProvider(config?: ProviderConfig): LLMProvider {
17
- if (_provider) return _provider;
18
+ if (_provider && !_failedProviders.has(_provider.name)) return _provider;
18
19
 
19
20
  const cfg = config ?? DEFAULT_PROVIDER_CONFIG;
20
21
  _provider = resolveProvider(cfg);
@@ -24,46 +25,51 @@ export function getProvider(config?: ProviderConfig): LLMProvider {
24
25
  /** Reset the cached provider (useful when config changes). */
25
26
  export function resetProvider() {
26
27
  _provider = null;
28
+ _failedProviders.clear();
29
+ }
30
+
31
+ /** Get a fallback-wrapped provider that tries alternatives on failure */
32
+ export function getProviderWithFallback(config?: ProviderConfig): LLMProvider {
33
+ const primary = getProvider(config);
34
+ return new FallbackProvider(primary);
27
35
  }
28
36
 
29
37
  function resolveProvider(config: ProviderConfig): LLMProvider {
30
- if (config.provider === "cerebras") {
31
- const p = new CerebrasProvider();
32
- if (!p.isAvailable()) throw new Error("CEREBRAS_API_KEY not set. Run: export CEREBRAS_API_KEY=your-key");
33
- return p;
38
+ if (config.provider !== "auto") {
39
+ const providers: Record<string, () => LLMProvider> = {
40
+ cerebras: () => new CerebrasProvider(),
41
+ anthropic: () => new AnthropicProvider(),
42
+ groq: () => new GroqProvider(),
43
+ xai: () => new XaiProvider(),
44
+ };
45
+ const factory = providers[config.provider];
46
+ if (factory) {
47
+ const p = factory();
48
+ if (!p.isAvailable()) throw new Error(`${config.provider.toUpperCase()}_API_KEY not set`);
49
+ return p;
50
+ }
34
51
  }
35
52
 
36
- if (config.provider === "anthropic") {
37
- const p = new AnthropicProvider();
38
- if (!p.isAvailable()) throw new Error("ANTHROPIC_API_KEY not set. Run: export ANTHROPIC_API_KEY=your-key");
39
- return p;
40
- }
53
+ // auto: prefer Cerebras, then xAI, then Groq, then Anthropic — skip failed
54
+ const candidates: LLMProvider[] = [
55
+ new CerebrasProvider(),
56
+ new XaiProvider(),
57
+ new GroqProvider(),
58
+ new AnthropicProvider(),
59
+ ];
41
60
 
42
- if (config.provider === "groq") {
43
- const p = new GroqProvider();
44
- if (!p.isAvailable()) throw new Error("GROQ_API_KEY not set. Run: export GROQ_API_KEY=your-key");
45
- return p;
61
+ for (const p of candidates) {
62
+ if (p.isAvailable() && !_failedProviders.has(p.name)) return p;
46
63
  }
47
64
 
48
- if (config.provider === "xai") {
49
- const p = new XaiProvider();
50
- if (!p.isAvailable()) throw new Error("XAI_API_KEY not set. Run: export XAI_API_KEY=your-key");
51
- return p;
65
+ // If all failed, clear failures and try again
66
+ if (_failedProviders.size > 0) {
67
+ _failedProviders.clear();
68
+ for (const p of candidates) {
69
+ if (p.isAvailable()) return p;
70
+ }
52
71
  }
53
72
 
54
- // auto: prefer Cerebras (qwen-235b, fast + accurate), then xAI, then Groq, then Anthropic
55
- const cerebras = new CerebrasProvider();
56
- if (cerebras.isAvailable()) return cerebras;
57
-
58
- const xai = new XaiProvider();
59
- if (xai.isAvailable()) return xai;
60
-
61
- const groq = new GroqProvider();
62
- if (groq.isAvailable()) return groq;
63
-
64
- const anthropic = new AnthropicProvider();
65
- if (anthropic.isAvailable()) return anthropic;
66
-
67
73
  throw new Error(
68
74
  "No API key found. Set one of:\n" +
69
75
  " export CEREBRAS_API_KEY=your-key (free, open-source)\n" +
@@ -73,6 +79,51 @@ function resolveProvider(config: ProviderConfig): LLMProvider {
73
79
  );
74
80
  }
75
81
 
82
+ /** Provider wrapper that falls back to alternatives on API errors */
83
+ class FallbackProvider implements LLMProvider {
84
+ readonly name: string;
85
+ private primary: LLMProvider;
86
+
87
+ constructor(primary: LLMProvider) {
88
+ this.primary = primary;
89
+ this.name = primary.name;
90
+ }
91
+
92
+ isAvailable(): boolean {
93
+ return this.primary.isAvailable();
94
+ }
95
+
96
+ async complete(prompt: string, options: ProviderOptions): Promise<string> {
97
+ try {
98
+ return await this.primary.complete(prompt, options);
99
+ } catch (err) {
100
+ const fallback = this.getFallback();
101
+ if (fallback) return fallback.complete(prompt, options);
102
+ throw err;
103
+ }
104
+ }
105
+
106
+ async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
107
+ try {
108
+ return await this.primary.stream(prompt, options, callbacks);
109
+ } catch (err) {
110
+ const fallback = this.getFallback();
111
+ if (fallback) return fallback.complete(prompt, options); // fallback doesn't stream
112
+ throw err;
113
+ }
114
+ }
115
+
116
+ private getFallback(): LLMProvider | null {
117
+ _failedProviders.add(this.primary.name);
118
+ _provider = null; // force re-resolve
119
+ try {
120
+ const next = getProvider();
121
+ if (next.name !== this.primary.name) return next;
122
+ } catch {}
123
+ return null;
124
+ }
125
+ }
126
+
76
127
  /** List available providers (for onboarding UI). */
77
128
  export function availableProviders(): { name: string; available: boolean }[] {
78
129
  return [
@@ -0,0 +1,109 @@
1
+ // Shared base class for OpenAI-compatible providers (Cerebras, Groq, xAI)
2
+ // Eliminates ~200 lines of duplicated streaming SSE parsing
3
+
4
+ import type { LLMProvider, ProviderOptions, StreamCallbacks } from "./base.js";
5
+
6
+ export abstract class OpenAICompatibleProvider implements LLMProvider {
7
+ abstract readonly name: string;
8
+ protected abstract readonly baseUrl: string;
9
+ protected abstract readonly defaultModel: string;
10
+ protected abstract readonly apiKeyEnvVar: string;
11
+
12
+ protected get apiKey(): string {
13
+ return process.env[this.apiKeyEnvVar] ?? "";
14
+ }
15
+
16
+ isAvailable(): boolean {
17
+ return !!process.env[this.apiKeyEnvVar];
18
+ }
19
+
20
+ async complete(prompt: string, options: ProviderOptions): Promise<string> {
21
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
22
+ method: "POST",
23
+ headers: {
24
+ "Content-Type": "application/json",
25
+ Authorization: `Bearer ${this.apiKey}`,
26
+ },
27
+ body: JSON.stringify({
28
+ model: options.model ?? this.defaultModel,
29
+ max_tokens: options.maxTokens ?? 256,
30
+ temperature: options.temperature ?? 0,
31
+ ...(options.stop ? { stop: options.stop } : {}),
32
+ messages: [
33
+ { role: "system", content: options.system },
34
+ { role: "user", content: prompt },
35
+ ],
36
+ }),
37
+ });
38
+
39
+ if (!res.ok) {
40
+ const text = await res.text();
41
+ throw new Error(`${this.name} API error ${res.status}: ${text}`);
42
+ }
43
+
44
+ const json = (await res.json()) as any;
45
+ return (json.choices?.[0]?.message?.content ?? "").trim();
46
+ }
47
+
48
+ async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
49
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
50
+ method: "POST",
51
+ headers: {
52
+ "Content-Type": "application/json",
53
+ Authorization: `Bearer ${this.apiKey}`,
54
+ },
55
+ body: JSON.stringify({
56
+ model: options.model ?? this.defaultModel,
57
+ max_tokens: options.maxTokens ?? 256,
58
+ temperature: options.temperature ?? 0,
59
+ stream: true,
60
+ ...(options.stop ? { stop: options.stop } : {}),
61
+ messages: [
62
+ { role: "system", content: options.system },
63
+ { role: "user", content: prompt },
64
+ ],
65
+ }),
66
+ });
67
+
68
+ if (!res.ok) {
69
+ const text = await res.text();
70
+ throw new Error(`${this.name} API error ${res.status}: ${text}`);
71
+ }
72
+
73
+ let result = "";
74
+ const reader = res.body?.getReader();
75
+ if (!reader) throw new Error("No response body");
76
+
77
+ const decoder = new TextDecoder();
78
+ let buffer = "";
79
+
80
+ while (true) {
81
+ const { done, value } = await reader.read();
82
+ if (done) break;
83
+
84
+ buffer += decoder.decode(value, { stream: true });
85
+ const lines = buffer.split("\n");
86
+ buffer = lines.pop() ?? "";
87
+
88
+ for (const line of lines) {
89
+ const trimmed = line.trim();
90
+ if (!trimmed.startsWith("data: ")) continue;
91
+ const data = trimmed.slice(6);
92
+ if (data === "[DONE]") break;
93
+
94
+ try {
95
+ const parsed = JSON.parse(data) as any;
96
+ const delta = parsed.choices?.[0]?.delta?.content;
97
+ if (delta) {
98
+ result += delta;
99
+ callbacks.onToken(result.trim());
100
+ }
101
+ } catch {
102
+ // skip malformed chunks
103
+ }
104
+ }
105
+ }
106
+
107
+ return result.trim();
108
+ }
109
+ }
@@ -1,108 +1,9 @@
1
- // xAI/Grok provider — uses OpenAI-compatible API
2
- // grok-code-fast-1 for code tasks, grok-4-fast for general queries.
1
+ // xAI/Grok provider — code-optimized models
2
+ import { OpenAICompatibleProvider } from "./openai-compat.js";
3
3
 
4
- import type { LLMProvider, ProviderOptions, StreamCallbacks } from "./base.js";
5
-
6
- const XAI_BASE_URL = "https://api.x.ai/v1";
7
- const DEFAULT_MODEL = "grok-code-fast-1";
8
-
9
- export class XaiProvider implements LLMProvider {
4
+ export class XaiProvider extends OpenAICompatibleProvider {
10
5
  readonly name = "xai";
11
- private apiKey: string;
12
-
13
- constructor() {
14
- this.apiKey = process.env.XAI_API_KEY ?? "";
15
- }
16
-
17
- isAvailable(): boolean {
18
- return !!process.env.XAI_API_KEY;
19
- }
20
-
21
- async complete(prompt: string, options: ProviderOptions): Promise<string> {
22
- const model = options.model ?? DEFAULT_MODEL;
23
- const res = await fetch(`${XAI_BASE_URL}/chat/completions`, {
24
- method: "POST",
25
- headers: {
26
- "Content-Type": "application/json",
27
- Authorization: `Bearer ${this.apiKey}`,
28
- },
29
- body: JSON.stringify({
30
- model,
31
- max_tokens: options.maxTokens ?? 256,
32
- messages: [
33
- { role: "system", content: options.system },
34
- { role: "user", content: prompt },
35
- ],
36
- }),
37
- });
38
-
39
- if (!res.ok) {
40
- const text = await res.text();
41
- throw new Error(`xAI API error ${res.status}: ${text}`);
42
- }
43
-
44
- const json = (await res.json()) as any;
45
- return (json.choices?.[0]?.message?.content ?? "").trim();
46
- }
47
-
48
- async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
49
- const model = options.model ?? DEFAULT_MODEL;
50
- const res = await fetch(`${XAI_BASE_URL}/chat/completions`, {
51
- method: "POST",
52
- headers: {
53
- "Content-Type": "application/json",
54
- Authorization: `Bearer ${this.apiKey}`,
55
- },
56
- body: JSON.stringify({
57
- model,
58
- max_tokens: options.maxTokens ?? 256,
59
- stream: true,
60
- messages: [
61
- { role: "system", content: options.system },
62
- { role: "user", content: prompt },
63
- ],
64
- }),
65
- });
66
-
67
- if (!res.ok) {
68
- const text = await res.text();
69
- throw new Error(`xAI API error ${res.status}: ${text}`);
70
- }
71
-
72
- let result = "";
73
- const reader = res.body?.getReader();
74
- if (!reader) throw new Error("No response body");
75
-
76
- const decoder = new TextDecoder();
77
- let buffer = "";
78
-
79
- while (true) {
80
- const { done, value } = await reader.read();
81
- if (done) break;
82
-
83
- buffer += decoder.decode(value, { stream: true });
84
- const lines = buffer.split("\n");
85
- buffer = lines.pop() ?? "";
86
-
87
- for (const line of lines) {
88
- const trimmed = line.trim();
89
- if (!trimmed.startsWith("data: ")) continue;
90
- const data = trimmed.slice(6);
91
- if (data === "[DONE]") break;
92
-
93
- try {
94
- const parsed = JSON.parse(data) as any;
95
- const delta = parsed.choices?.[0]?.delta?.content;
96
- if (delta) {
97
- result += delta;
98
- callbacks.onToken(result.trim());
99
- }
100
- } catch {
101
- // skip malformed chunks
102
- }
103
- }
104
- }
105
-
106
- return result.trim();
107
- }
6
+ protected readonly baseUrl = "https://api.x.ai/v1";
7
+ protected readonly defaultModel = "grok-code-fast-1";
8
+ protected readonly apiKeyEnvVar = "XAI_API_KEY";
108
9
  }
package/src/tokens.ts ADDED
@@ -0,0 +1,18 @@
1
+ // Token estimation utility — shared across all modules
2
+ // Uses content-aware heuristic: code/JSON averages ~3.3 chars/token,
3
+ // English prose averages ~4.2 chars/token.
4
+
5
+ /** Detect if content is primarily code/JSON vs English prose */
6
+ function isCodeLike(text: string): boolean {
7
+ // Count structural characters common in code/JSON
8
+ const structural = (text.match(/[{}[\]();:=<>,"'`|&\\/@#$%^*+~!?]/g) || []).length;
9
+ const ratio = structural / Math.max(text.length, 1);
10
+ return ratio > 0.08; // >8% structural chars = code-like
11
+ }
12
+
13
+ /** Estimate token count for a string with content-aware heuristic */
14
+ export function estimateTokens(text: string): number {
15
+ if (!text) return 0;
16
+ const charsPerToken = isCodeLike(text) ? 3.3 : 4.2;
17
+ return Math.ceil(text.length / charsPerToken);
18
+ }
@@ -107,12 +107,19 @@ function loadUserProfiles(): ToolProfile[] {
107
107
  return profiles;
108
108
  }
109
109
 
110
- /** Get all profiles — user profiles override builtins by name */
110
+ /** Get all profiles — user profiles override builtins by name (cached 30s) */
111
+ let _cachedProfiles: ToolProfile[] | null = null;
112
+ let _cachedProfilesAt = 0;
113
+
111
114
  export function getProfiles(): ToolProfile[] {
115
+ const now = Date.now();
116
+ if (_cachedProfiles && now - _cachedProfilesAt < 30_000) return _cachedProfiles;
112
117
  const user = loadUserProfiles();
113
118
  const userNames = new Set(user.map(p => p.name));
114
119
  const builtins = BUILTIN_PROFILES.filter(p => !userNames.has(p.name));
115
- return [...user, ...builtins];
120
+ _cachedProfiles = [...user, ...builtins];
121
+ _cachedProfilesAt = now;
122
+ return _cachedProfiles;
116
123
  }
117
124
 
118
125
  /** Find the matching profile for a command */
@@ -1,49 +0,0 @@
1
- import { describe, it, expect } from "bun:test";
2
- import { compress, stripAnsi } from "./compression.js";
3
-
4
- describe("stripAnsi", () => {
5
- it("removes ANSI escape codes", () => {
6
- expect(stripAnsi("\x1b[31mred\x1b[0m")).toBe("red");
7
- expect(stripAnsi("\x1b[1;32mbold green\x1b[0m")).toBe("bold green");
8
- });
9
-
10
- it("leaves clean text unchanged", () => {
11
- expect(stripAnsi("hello world")).toBe("hello world");
12
- });
13
- });
14
-
15
- describe("compress", () => {
16
- it("strips ANSI by default", () => {
17
- const result = compress("ls", "\x1b[32mfile.ts\x1b[0m");
18
- expect(result.content).not.toContain("\x1b");
19
- });
20
-
21
- it("uses structured parser when format=json", () => {
22
- const output = `total 16
23
- -rw-r--r-- 1 user staff 450 Mar 10 09:00 package.json
24
- drwxr-xr-x 5 user staff 160 Mar 10 09:00 src`;
25
-
26
- const result = compress("ls -la", output, { format: "json" });
27
- // Parser may skip JSON if it's larger than raw — just check it returned something
28
- expect(result.content).toBeTruthy();
29
- expect(result.compressedTokens).toBeGreaterThan(0);
30
- });
31
-
32
- it("respects maxTokens budget", () => {
33
- const longOutput = Array.from({ length: 100 }, (_, i) => `Line ${i}: some output text here`).join("\n");
34
- const result = compress("some-command", longOutput, { maxTokens: 50 });
35
- expect(result.compressedTokens).toBeLessThanOrEqual(60); // allow some slack
36
- });
37
-
38
- it("deduplicates similar lines", () => {
39
- const output = Array.from({ length: 20 }, (_, i) => `Compiling module ${i}...`).join("\n");
40
- const result = compress("build", output);
41
- expect(result.compressedTokens).toBeLessThan(result.originalTokens);
42
- });
43
-
44
- it("tracks savings on large output", () => {
45
- const output = Array.from({ length: 100 }, (_, i) => `Line ${i}: some long output text here that takes tokens`).join("\n");
46
- const result = compress("cmd", output, { maxTokens: 50 });
47
- expect(result.compressedTokens).toBeLessThan(result.originalTokens);
48
- });
49
- });
@@ -1,56 +0,0 @@
1
- // Output intelligence router — auto-detect command type and optimize output
2
-
3
- import { parseOutput, estimateTokens } from "./parsers/index.js";
4
- import { compress, stripAnsi } from "./compression.js";
5
- import { recordSaving } from "./economy.js";
6
-
7
- export interface RouterResult {
8
- raw: string;
9
- structured?: unknown;
10
- compressed?: string;
11
- parser?: string;
12
- tokensSaved: number;
13
- format: "raw" | "json" | "compressed";
14
- }
15
-
16
- /** Route command output through the best optimization path */
17
- export function routeOutput(command: string, output: string, maxTokens?: number): RouterResult {
18
- const clean = stripAnsi(output);
19
- const rawTokens = estimateTokens(clean);
20
-
21
- // Try structured parsing first
22
- const parsed = parseOutput(command, clean);
23
- if (parsed) {
24
- const json = JSON.stringify(parsed.data);
25
- const jsonTokens = estimateTokens(json);
26
- const saved = rawTokens - jsonTokens;
27
-
28
- if (saved > 0) {
29
- recordSaving("structured", saved);
30
- return {
31
- raw: clean,
32
- structured: parsed.data,
33
- parser: parsed.parser,
34
- tokensSaved: saved,
35
- format: "json",
36
- };
37
- }
38
- }
39
-
40
- // Try compression if structured didn't save enough
41
- if (maxTokens || rawTokens > 200) {
42
- const compressed = compress(command, clean, { maxTokens, format: "text" });
43
- if (compressed.tokensSaved > 0) {
44
- recordSaving("compressed", compressed.tokensSaved);
45
- return {
46
- raw: clean,
47
- compressed: compressed.content,
48
- tokensSaved: compressed.tokensSaved,
49
- format: "compressed",
50
- };
51
- }
52
- }
53
-
54
- // Return raw if no optimization helps
55
- return { raw: clean, tokensSaved: 0, format: "raw" };
56
- }