@hasna/terminal 2.3.1 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/dist/App.js +404 -0
  2. package/dist/Browse.js +79 -0
  3. package/dist/FuzzyPicker.js +47 -0
  4. package/dist/Onboarding.js +51 -0
  5. package/dist/Spinner.js +12 -0
  6. package/dist/StatusBar.js +49 -0
  7. package/dist/ai.js +296 -0
  8. package/dist/cache.js +42 -0
  9. package/dist/cli.js +1 -1
  10. package/dist/command-rewriter.js +64 -0
  11. package/dist/command-validator.js +86 -0
  12. package/dist/compression.js +85 -0
  13. package/dist/context-hints.js +285 -0
  14. package/dist/diff-cache.js +107 -0
  15. package/dist/discover.js +212 -0
  16. package/dist/economy.js +155 -0
  17. package/dist/expand-store.js +44 -0
  18. package/dist/file-cache.js +72 -0
  19. package/dist/file-index.js +62 -0
  20. package/dist/history.js +62 -0
  21. package/dist/lazy-executor.js +54 -0
  22. package/dist/line-dedup.js +59 -0
  23. package/dist/loop-detector.js +75 -0
  24. package/dist/mcp/install.js +98 -0
  25. package/dist/mcp/server.js +545 -0
  26. package/dist/noise-filter.js +86 -0
  27. package/dist/output-processor.js +132 -0
  28. package/dist/output-router.js +41 -0
  29. package/dist/output-store.js +111 -0
  30. package/dist/parsers/base.js +2 -0
  31. package/dist/parsers/build.js +64 -0
  32. package/dist/parsers/errors.js +101 -0
  33. package/dist/parsers/files.js +78 -0
  34. package/dist/parsers/git.js +99 -0
  35. package/dist/parsers/index.js +48 -0
  36. package/dist/parsers/tests.js +89 -0
  37. package/dist/providers/anthropic.js +43 -0
  38. package/dist/providers/base.js +4 -0
  39. package/dist/providers/cerebras.js +8 -0
  40. package/dist/providers/groq.js +8 -0
  41. package/dist/providers/index.js +122 -0
  42. package/dist/providers/openai-compat.js +93 -0
  43. package/dist/providers/xai.js +8 -0
  44. package/dist/recipes/model.js +20 -0
  45. package/dist/recipes/storage.js +136 -0
  46. package/dist/search/content-search.js +68 -0
  47. package/dist/search/file-search.js +61 -0
  48. package/dist/search/filters.js +34 -0
  49. package/dist/search/index.js +5 -0
  50. package/dist/search/semantic.js +320 -0
  51. package/dist/session-boot.js +59 -0
  52. package/dist/session-context.js +55 -0
  53. package/dist/sessions-db.js +173 -0
  54. package/dist/smart-display.js +286 -0
  55. package/dist/snapshots.js +51 -0
  56. package/dist/supervisor.js +112 -0
  57. package/dist/test-watchlist.js +131 -0
  58. package/dist/tokens.js +17 -0
  59. package/dist/tool-profiles.js +129 -0
  60. package/dist/tree.js +94 -0
  61. package/dist/usage-cache.js +65 -0
  62. package/package.json +8 -1
  63. package/src/ai.ts +60 -90
  64. package/src/cache.ts +3 -2
  65. package/src/cli.tsx +1 -1
  66. package/src/compression.ts +8 -35
  67. package/src/context-hints.ts +20 -10
  68. package/src/diff-cache.ts +1 -1
  69. package/src/discover.ts +1 -1
  70. package/src/economy.ts +37 -5
  71. package/src/expand-store.ts +8 -1
  72. package/src/mcp/server.ts +45 -73
  73. package/src/output-processor.ts +11 -8
  74. package/src/providers/anthropic.ts +6 -2
  75. package/src/providers/base.ts +2 -0
  76. package/src/providers/cerebras.ts +6 -105
  77. package/src/providers/groq.ts +6 -105
  78. package/src/providers/index.ts +84 -33
  79. package/src/providers/openai-compat.ts +109 -0
  80. package/src/providers/xai.ts +6 -105
  81. package/src/tokens.ts +18 -0
  82. package/src/tool-profiles.ts +9 -2
  83. package/.claude/scheduled_tasks.lock +0 -1
  84. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -20
  85. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -14
  86. package/CONTRIBUTING.md +0 -80
  87. package/benchmarks/benchmark.mjs +0 -115
  88. package/imported_modules.txt +0 -0
  89. package/src/compression.test.ts +0 -49
  90. package/src/output-router.ts +0 -56
  91. package/src/parsers/base.ts +0 -72
  92. package/src/parsers/build.ts +0 -73
  93. package/src/parsers/errors.ts +0 -107
  94. package/src/parsers/files.ts +0 -91
  95. package/src/parsers/git.ts +0 -101
  96. package/src/parsers/index.ts +0 -66
  97. package/src/parsers/parsers.test.ts +0 -153
  98. package/src/parsers/tests.ts +0 -98
  99. package/tsconfig.json +0 -15
@@ -1,108 +1,9 @@
1
- // Groq provider — uses OpenAI-compatible API
2
- // Ultra-fast inference. Supports Llama, Qwen, Kimi models.
1
+ // Groq provider — ultra-fast inference
2
+ import { OpenAICompatibleProvider } from "./openai-compat.js";
3
3
 
4
- import type { LLMProvider, ProviderOptions, StreamCallbacks } from "./base.js";
5
-
6
- const GROQ_BASE_URL = "https://api.groq.com/openai/v1";
7
- const DEFAULT_MODEL = "openai/gpt-oss-120b";
8
-
9
- export class GroqProvider implements LLMProvider {
4
+ export class GroqProvider extends OpenAICompatibleProvider {
10
5
  readonly name = "groq";
11
- private apiKey: string;
12
-
13
- constructor() {
14
- this.apiKey = process.env.GROQ_API_KEY ?? "";
15
- }
16
-
17
- isAvailable(): boolean {
18
- return !!process.env.GROQ_API_KEY;
19
- }
20
-
21
- async complete(prompt: string, options: ProviderOptions): Promise<string> {
22
- const model = options.model ?? DEFAULT_MODEL;
23
- const res = await fetch(`${GROQ_BASE_URL}/chat/completions`, {
24
- method: "POST",
25
- headers: {
26
- "Content-Type": "application/json",
27
- Authorization: `Bearer ${this.apiKey}`,
28
- },
29
- body: JSON.stringify({
30
- model,
31
- max_tokens: options.maxTokens ?? 256,
32
- messages: [
33
- { role: "system", content: options.system },
34
- { role: "user", content: prompt },
35
- ],
36
- }),
37
- });
38
-
39
- if (!res.ok) {
40
- const text = await res.text();
41
- throw new Error(`Groq API error ${res.status}: ${text}`);
42
- }
43
-
44
- const json = (await res.json()) as any;
45
- return (json.choices?.[0]?.message?.content ?? "").trim();
46
- }
47
-
48
- async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
49
- const model = options.model ?? DEFAULT_MODEL;
50
- const res = await fetch(`${GROQ_BASE_URL}/chat/completions`, {
51
- method: "POST",
52
- headers: {
53
- "Content-Type": "application/json",
54
- Authorization: `Bearer ${this.apiKey}`,
55
- },
56
- body: JSON.stringify({
57
- model,
58
- max_tokens: options.maxTokens ?? 256,
59
- stream: true,
60
- messages: [
61
- { role: "system", content: options.system },
62
- { role: "user", content: prompt },
63
- ],
64
- }),
65
- });
66
-
67
- if (!res.ok) {
68
- const text = await res.text();
69
- throw new Error(`Groq API error ${res.status}: ${text}`);
70
- }
71
-
72
- let result = "";
73
- const reader = res.body?.getReader();
74
- if (!reader) throw new Error("No response body");
75
-
76
- const decoder = new TextDecoder();
77
- let buffer = "";
78
-
79
- while (true) {
80
- const { done, value } = await reader.read();
81
- if (done) break;
82
-
83
- buffer += decoder.decode(value, { stream: true });
84
- const lines = buffer.split("\n");
85
- buffer = lines.pop() ?? "";
86
-
87
- for (const line of lines) {
88
- const trimmed = line.trim();
89
- if (!trimmed.startsWith("data: ")) continue;
90
- const data = trimmed.slice(6);
91
- if (data === "[DONE]") break;
92
-
93
- try {
94
- const parsed = JSON.parse(data) as any;
95
- const delta = parsed.choices?.[0]?.delta?.content;
96
- if (delta) {
97
- result += delta;
98
- callbacks.onToken(result.trim());
99
- }
100
- } catch {
101
- // skip malformed chunks
102
- }
103
- }
104
- }
105
-
106
- return result.trim();
107
- }
6
+ protected readonly baseUrl = "https://api.groq.com/openai/v1";
7
+ protected readonly defaultModel = "openai/gpt-oss-120b";
8
+ protected readonly apiKeyEnvVar = "GROQ_API_KEY";
108
9
  }
@@ -1,6 +1,6 @@
1
- // Provider auto-detection and management
1
+ // Provider auto-detection and management — with fallback on failure
2
2
 
3
- import type { LLMProvider, ProviderConfig } from "./base.js";
3
+ import type { LLMProvider, ProviderConfig, ProviderOptions, StreamCallbacks } from "./base.js";
4
4
  import { DEFAULT_PROVIDER_CONFIG } from "./base.js";
5
5
  import { AnthropicProvider } from "./anthropic.js";
6
6
  import { CerebrasProvider } from "./cerebras.js";
@@ -11,10 +11,11 @@ export type { LLMProvider, ProviderOptions, StreamCallbacks, ProviderConfig } fr
11
11
  export { DEFAULT_PROVIDER_CONFIG } from "./base.js";
12
12
 
13
13
  let _provider: LLMProvider | null = null;
14
+ let _failedProviders: Set<string> = new Set();
14
15
 
15
16
  /** Get the active LLM provider. Auto-detects based on available API keys. */
16
17
  export function getProvider(config?: ProviderConfig): LLMProvider {
17
- if (_provider) return _provider;
18
+ if (_provider && !_failedProviders.has(_provider.name)) return _provider;
18
19
 
19
20
  const cfg = config ?? DEFAULT_PROVIDER_CONFIG;
20
21
  _provider = resolveProvider(cfg);
@@ -24,46 +25,51 @@ export function getProvider(config?: ProviderConfig): LLMProvider {
24
25
  /** Reset the cached provider (useful when config changes). */
25
26
  export function resetProvider() {
26
27
  _provider = null;
28
+ _failedProviders.clear();
29
+ }
30
+
31
+ /** Get a fallback-wrapped provider that tries alternatives on failure */
32
+ export function getProviderWithFallback(config?: ProviderConfig): LLMProvider {
33
+ const primary = getProvider(config);
34
+ return new FallbackProvider(primary);
27
35
  }
28
36
 
29
37
  function resolveProvider(config: ProviderConfig): LLMProvider {
30
- if (config.provider === "cerebras") {
31
- const p = new CerebrasProvider();
32
- if (!p.isAvailable()) throw new Error("CEREBRAS_API_KEY not set. Run: export CEREBRAS_API_KEY=your-key");
33
- return p;
38
+ if (config.provider !== "auto") {
39
+ const providers: Record<string, () => LLMProvider> = {
40
+ cerebras: () => new CerebrasProvider(),
41
+ anthropic: () => new AnthropicProvider(),
42
+ groq: () => new GroqProvider(),
43
+ xai: () => new XaiProvider(),
44
+ };
45
+ const factory = providers[config.provider];
46
+ if (factory) {
47
+ const p = factory();
48
+ if (!p.isAvailable()) throw new Error(`${config.provider.toUpperCase()}_API_KEY not set`);
49
+ return p;
50
+ }
34
51
  }
35
52
 
36
- if (config.provider === "anthropic") {
37
- const p = new AnthropicProvider();
38
- if (!p.isAvailable()) throw new Error("ANTHROPIC_API_KEY not set. Run: export ANTHROPIC_API_KEY=your-key");
39
- return p;
40
- }
53
+ // auto: prefer Cerebras, then xAI, then Groq, then Anthropic — skip failed
54
+ const candidates: LLMProvider[] = [
55
+ new CerebrasProvider(),
56
+ new XaiProvider(),
57
+ new GroqProvider(),
58
+ new AnthropicProvider(),
59
+ ];
41
60
 
42
- if (config.provider === "groq") {
43
- const p = new GroqProvider();
44
- if (!p.isAvailable()) throw new Error("GROQ_API_KEY not set. Run: export GROQ_API_KEY=your-key");
45
- return p;
61
+ for (const p of candidates) {
62
+ if (p.isAvailable() && !_failedProviders.has(p.name)) return p;
46
63
  }
47
64
 
48
- if (config.provider === "xai") {
49
- const p = new XaiProvider();
50
- if (!p.isAvailable()) throw new Error("XAI_API_KEY not set. Run: export XAI_API_KEY=your-key");
51
- return p;
65
+ // If all failed, clear failures and try again
66
+ if (_failedProviders.size > 0) {
67
+ _failedProviders.clear();
68
+ for (const p of candidates) {
69
+ if (p.isAvailable()) return p;
70
+ }
52
71
  }
53
72
 
54
- // auto: prefer Cerebras (qwen-235b, fast + accurate), then xAI, then Groq, then Anthropic
55
- const cerebras = new CerebrasProvider();
56
- if (cerebras.isAvailable()) return cerebras;
57
-
58
- const xai = new XaiProvider();
59
- if (xai.isAvailable()) return xai;
60
-
61
- const groq = new GroqProvider();
62
- if (groq.isAvailable()) return groq;
63
-
64
- const anthropic = new AnthropicProvider();
65
- if (anthropic.isAvailable()) return anthropic;
66
-
67
73
  throw new Error(
68
74
  "No API key found. Set one of:\n" +
69
75
  " export CEREBRAS_API_KEY=your-key (free, open-source)\n" +
@@ -73,6 +79,51 @@ function resolveProvider(config: ProviderConfig): LLMProvider {
73
79
  );
74
80
  }
75
81
 
82
+ /** Provider wrapper that falls back to alternatives on API errors */
83
+ class FallbackProvider implements LLMProvider {
84
+ readonly name: string;
85
+ private primary: LLMProvider;
86
+
87
+ constructor(primary: LLMProvider) {
88
+ this.primary = primary;
89
+ this.name = primary.name;
90
+ }
91
+
92
+ isAvailable(): boolean {
93
+ return this.primary.isAvailable();
94
+ }
95
+
96
+ async complete(prompt: string, options: ProviderOptions): Promise<string> {
97
+ try {
98
+ return await this.primary.complete(prompt, options);
99
+ } catch (err) {
100
+ const fallback = this.getFallback();
101
+ if (fallback) return fallback.complete(prompt, options);
102
+ throw err;
103
+ }
104
+ }
105
+
106
+ async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
107
+ try {
108
+ return await this.primary.stream(prompt, options, callbacks);
109
+ } catch (err) {
110
+ const fallback = this.getFallback();
111
+ if (fallback) return fallback.complete(prompt, options); // fallback doesn't stream
112
+ throw err;
113
+ }
114
+ }
115
+
116
+ private getFallback(): LLMProvider | null {
117
+ _failedProviders.add(this.primary.name);
118
+ _provider = null; // force re-resolve
119
+ try {
120
+ const next = getProvider();
121
+ if (next.name !== this.primary.name) return next;
122
+ } catch {}
123
+ return null;
124
+ }
125
+ }
126
+
76
127
  /** List available providers (for onboarding UI). */
77
128
  export function availableProviders(): { name: string; available: boolean }[] {
78
129
  return [
@@ -0,0 +1,109 @@
1
+ // Shared base class for OpenAI-compatible providers (Cerebras, Groq, xAI)
2
+ // Eliminates ~200 lines of duplicated streaming SSE parsing
3
+
4
+ import type { LLMProvider, ProviderOptions, StreamCallbacks } from "./base.js";
5
+
6
+ export abstract class OpenAICompatibleProvider implements LLMProvider {
7
+ abstract readonly name: string;
8
+ protected abstract readonly baseUrl: string;
9
+ protected abstract readonly defaultModel: string;
10
+ protected abstract readonly apiKeyEnvVar: string;
11
+
12
+ protected get apiKey(): string {
13
+ return process.env[this.apiKeyEnvVar] ?? "";
14
+ }
15
+
16
+ isAvailable(): boolean {
17
+ return !!process.env[this.apiKeyEnvVar];
18
+ }
19
+
20
+ async complete(prompt: string, options: ProviderOptions): Promise<string> {
21
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
22
+ method: "POST",
23
+ headers: {
24
+ "Content-Type": "application/json",
25
+ Authorization: `Bearer ${this.apiKey}`,
26
+ },
27
+ body: JSON.stringify({
28
+ model: options.model ?? this.defaultModel,
29
+ max_tokens: options.maxTokens ?? 256,
30
+ temperature: options.temperature ?? 0,
31
+ ...(options.stop ? { stop: options.stop } : {}),
32
+ messages: [
33
+ { role: "system", content: options.system },
34
+ { role: "user", content: prompt },
35
+ ],
36
+ }),
37
+ });
38
+
39
+ if (!res.ok) {
40
+ const text = await res.text();
41
+ throw new Error(`${this.name} API error ${res.status}: ${text}`);
42
+ }
43
+
44
+ const json = (await res.json()) as any;
45
+ return (json.choices?.[0]?.message?.content ?? "").trim();
46
+ }
47
+
48
+ async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
49
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
50
+ method: "POST",
51
+ headers: {
52
+ "Content-Type": "application/json",
53
+ Authorization: `Bearer ${this.apiKey}`,
54
+ },
55
+ body: JSON.stringify({
56
+ model: options.model ?? this.defaultModel,
57
+ max_tokens: options.maxTokens ?? 256,
58
+ temperature: options.temperature ?? 0,
59
+ stream: true,
60
+ ...(options.stop ? { stop: options.stop } : {}),
61
+ messages: [
62
+ { role: "system", content: options.system },
63
+ { role: "user", content: prompt },
64
+ ],
65
+ }),
66
+ });
67
+
68
+ if (!res.ok) {
69
+ const text = await res.text();
70
+ throw new Error(`${this.name} API error ${res.status}: ${text}`);
71
+ }
72
+
73
+ let result = "";
74
+ const reader = res.body?.getReader();
75
+ if (!reader) throw new Error("No response body");
76
+
77
+ const decoder = new TextDecoder();
78
+ let buffer = "";
79
+
80
+ while (true) {
81
+ const { done, value } = await reader.read();
82
+ if (done) break;
83
+
84
+ buffer += decoder.decode(value, { stream: true });
85
+ const lines = buffer.split("\n");
86
+ buffer = lines.pop() ?? "";
87
+
88
+ for (const line of lines) {
89
+ const trimmed = line.trim();
90
+ if (!trimmed.startsWith("data: ")) continue;
91
+ const data = trimmed.slice(6);
92
+ if (data === "[DONE]") break;
93
+
94
+ try {
95
+ const parsed = JSON.parse(data) as any;
96
+ const delta = parsed.choices?.[0]?.delta?.content;
97
+ if (delta) {
98
+ result += delta;
99
+ callbacks.onToken(result.trim());
100
+ }
101
+ } catch {
102
+ // skip malformed chunks
103
+ }
104
+ }
105
+ }
106
+
107
+ return result.trim();
108
+ }
109
+ }
@@ -1,108 +1,9 @@
1
- // xAI/Grok provider — uses OpenAI-compatible API
2
- // grok-code-fast-1 for code tasks, grok-4-fast for general queries.
1
+ // xAI/Grok provider — code-optimized models
2
+ import { OpenAICompatibleProvider } from "./openai-compat.js";
3
3
 
4
- import type { LLMProvider, ProviderOptions, StreamCallbacks } from "./base.js";
5
-
6
- const XAI_BASE_URL = "https://api.x.ai/v1";
7
- const DEFAULT_MODEL = "grok-code-fast-1";
8
-
9
- export class XaiProvider implements LLMProvider {
4
+ export class XaiProvider extends OpenAICompatibleProvider {
10
5
  readonly name = "xai";
11
- private apiKey: string;
12
-
13
- constructor() {
14
- this.apiKey = process.env.XAI_API_KEY ?? "";
15
- }
16
-
17
- isAvailable(): boolean {
18
- return !!process.env.XAI_API_KEY;
19
- }
20
-
21
- async complete(prompt: string, options: ProviderOptions): Promise<string> {
22
- const model = options.model ?? DEFAULT_MODEL;
23
- const res = await fetch(`${XAI_BASE_URL}/chat/completions`, {
24
- method: "POST",
25
- headers: {
26
- "Content-Type": "application/json",
27
- Authorization: `Bearer ${this.apiKey}`,
28
- },
29
- body: JSON.stringify({
30
- model,
31
- max_tokens: options.maxTokens ?? 256,
32
- messages: [
33
- { role: "system", content: options.system },
34
- { role: "user", content: prompt },
35
- ],
36
- }),
37
- });
38
-
39
- if (!res.ok) {
40
- const text = await res.text();
41
- throw new Error(`xAI API error ${res.status}: ${text}`);
42
- }
43
-
44
- const json = (await res.json()) as any;
45
- return (json.choices?.[0]?.message?.content ?? "").trim();
46
- }
47
-
48
- async stream(prompt: string, options: ProviderOptions, callbacks: StreamCallbacks): Promise<string> {
49
- const model = options.model ?? DEFAULT_MODEL;
50
- const res = await fetch(`${XAI_BASE_URL}/chat/completions`, {
51
- method: "POST",
52
- headers: {
53
- "Content-Type": "application/json",
54
- Authorization: `Bearer ${this.apiKey}`,
55
- },
56
- body: JSON.stringify({
57
- model,
58
- max_tokens: options.maxTokens ?? 256,
59
- stream: true,
60
- messages: [
61
- { role: "system", content: options.system },
62
- { role: "user", content: prompt },
63
- ],
64
- }),
65
- });
66
-
67
- if (!res.ok) {
68
- const text = await res.text();
69
- throw new Error(`xAI API error ${res.status}: ${text}`);
70
- }
71
-
72
- let result = "";
73
- const reader = res.body?.getReader();
74
- if (!reader) throw new Error("No response body");
75
-
76
- const decoder = new TextDecoder();
77
- let buffer = "";
78
-
79
- while (true) {
80
- const { done, value } = await reader.read();
81
- if (done) break;
82
-
83
- buffer += decoder.decode(value, { stream: true });
84
- const lines = buffer.split("\n");
85
- buffer = lines.pop() ?? "";
86
-
87
- for (const line of lines) {
88
- const trimmed = line.trim();
89
- if (!trimmed.startsWith("data: ")) continue;
90
- const data = trimmed.slice(6);
91
- if (data === "[DONE]") break;
92
-
93
- try {
94
- const parsed = JSON.parse(data) as any;
95
- const delta = parsed.choices?.[0]?.delta?.content;
96
- if (delta) {
97
- result += delta;
98
- callbacks.onToken(result.trim());
99
- }
100
- } catch {
101
- // skip malformed chunks
102
- }
103
- }
104
- }
105
-
106
- return result.trim();
107
- }
6
+ protected readonly baseUrl = "https://api.x.ai/v1";
7
+ protected readonly defaultModel = "grok-code-fast-1";
8
+ protected readonly apiKeyEnvVar = "XAI_API_KEY";
108
9
  }
package/src/tokens.ts ADDED
@@ -0,0 +1,18 @@
1
+ // Token estimation utility — shared across all modules
2
+ // Uses content-aware heuristic: code/JSON averages ~3.3 chars/token,
3
+ // English prose averages ~4.2 chars/token.
4
+
5
+ /** Detect if content is primarily code/JSON vs English prose */
6
+ function isCodeLike(text: string): boolean {
7
+ // Count structural characters common in code/JSON
8
+ const structural = (text.match(/[{}[\]();:=<>,"'`|&\\/@#$%^*+~!?]/g) || []).length;
9
+ const ratio = structural / Math.max(text.length, 1);
10
+ return ratio > 0.08; // >8% structural chars = code-like
11
+ }
12
+
13
+ /** Estimate token count for a string with content-aware heuristic */
14
+ export function estimateTokens(text: string): number {
15
+ if (!text) return 0;
16
+ const charsPerToken = isCodeLike(text) ? 3.3 : 4.2;
17
+ return Math.ceil(text.length / charsPerToken);
18
+ }
@@ -107,12 +107,19 @@ function loadUserProfiles(): ToolProfile[] {
107
107
  return profiles;
108
108
  }
109
109
 
110
- /** Get all profiles — user profiles override builtins by name */
110
+ /** Get all profiles — user profiles override builtins by name (cached 30s) */
111
+ let _cachedProfiles: ToolProfile[] | null = null;
112
+ let _cachedProfilesAt = 0;
113
+
111
114
  export function getProfiles(): ToolProfile[] {
115
+ const now = Date.now();
116
+ if (_cachedProfiles && now - _cachedProfilesAt < 30_000) return _cachedProfiles;
112
117
  const user = loadUserProfiles();
113
118
  const userNames = new Set(user.map(p => p.name));
114
119
  const builtins = BUILTIN_PROFILES.filter(p => !userNames.has(p.name));
115
- return [...user, ...builtins];
120
+ _cachedProfiles = [...user, ...builtins];
121
+ _cachedProfilesAt = now;
122
+ return _cachedProfiles;
116
123
  }
117
124
 
118
125
  /** Find the matching profile for a command */
@@ -1 +0,0 @@
1
- {"sessionId":"c1e414c7-f1a5-4b9e-bcc4-64c451584cb8","pid":1236,"acquiredAt":1773584959902}
@@ -1,20 +0,0 @@
1
- ---
2
- name: Bug Report
3
- about: Report a bug in open-terminal
4
- labels: bug
5
- ---
6
-
7
- **Command:**
8
- `terminal exec "..."`
9
-
10
- **Expected:**
11
- What you expected to happen
12
-
13
- **Actual:**
14
- What actually happened
15
-
16
- **Environment:**
17
- - OS:
18
- - Node/Bun version:
19
- - open-terminal version: (`terminal --version`)
20
- - Provider: Cerebras / Anthropic
@@ -1,14 +0,0 @@
1
- ---
2
- name: Feature Request
3
- about: Suggest a feature for open-terminal
4
- labels: enhancement
5
- ---
6
-
7
- **Use case:**
8
- What problem does this solve?
9
-
10
- **Proposed solution:**
11
- How should it work?
12
-
13
- **Alternatives considered:**
14
- Other approaches you thought about
package/CONTRIBUTING.md DELETED
@@ -1,80 +0,0 @@
1
- # Contributing to open-terminal
2
-
3
- Thanks for your interest in contributing! open-terminal is an open-source smart terminal wrapper that saves AI agents 73-90% of tokens on terminal output.
4
-
5
- ## Development Setup
6
-
7
- ```bash
8
- git clone https://github.com/hasna/terminal.git
9
- cd terminal
10
- npm install
11
- npm run build # TypeScript compilation
12
- bun test # Run tests
13
- ```
14
-
15
- ## Architecture
16
-
17
- ```
18
- src/
19
- cli.tsx # CLI entry point (TUI + subcommands)
20
- ai.ts # NL translation (Cerebras/Anthropic providers)
21
- compression.ts # Token compression engine
22
- noise-filter.ts # Strip noise (npm fund, progress bars, etc.)
23
- command-rewriter.ts # Auto-optimize commands before execution
24
- output-processor.ts # AI-powered output summarization
25
- diff-cache.ts # Diff-aware output caching
26
- smart-display.ts # Visual output compression for TUI
27
- file-cache.ts # Session file read cache
28
- lazy-executor.ts # Lazy execution for large results
29
- expand-store.ts # Progressive disclosure store
30
- economy.ts # Token savings tracker
31
- sessions-db.ts # SQLite session tracking
32
- supervisor.ts # Background process manager
33
- snapshots.ts # Session state snapshots
34
- tree.ts # Tree compression for file listings
35
- mcp/
36
- server.ts # MCP server (20+ tools)
37
- install.ts # MCP installer for Claude/Codex/Gemini
38
- providers/
39
- base.ts # LLM provider interface
40
- anthropic.ts # Anthropic provider
41
- cerebras.ts # Cerebras provider (default)
42
- parsers/ # Structured output parsers
43
- search/ # Smart search (file, content, semantic)
44
- recipes/ # Reusable command templates
45
- ```
46
-
47
- ## How to Contribute
48
-
49
- ### Adding a new parser
50
- Parsers detect and structure specific command output types. See `src/parsers/` for examples. Each parser needs:
51
- - `detect(command, output)` — returns true if this parser can handle the output
52
- - `parse(command, output)` — returns structured data
53
-
54
- ### Adding a command rewrite rule
55
- See `src/command-rewriter.ts`. Add a pattern + rewrite function to the `rules` array.
56
-
57
- ### Adding an MCP tool
58
- See `src/mcp/server.ts`. Register with `server.tool(name, description, schema, handler)`.
59
-
60
- ## Running Tests
61
-
62
- ```bash
63
- bun test # All tests
64
- bun test src/parsers/ # Parser tests only
65
- bun test --coverage # With coverage
66
- ```
67
-
68
- ## Commit Convention
69
-
70
- We use conventional commits:
71
- - `feat:` — new feature
72
- - `fix:` — bug fix
73
- - `refactor:` — code restructuring
74
- - `test:` — adding tests
75
- - `docs:` — documentation
76
- - `chore:` — maintenance
77
-
78
- ## License
79
-
80
- Apache 2.0 — Copyright 2026 Hasna, Inc.