joonecli 0.1.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. package/dist/cli/index.js +4 -1
  2. package/dist/cli/index.js.map +1 -1
  3. package/dist/commands/builtinCommands.js +6 -6
  4. package/dist/commands/builtinCommands.js.map +1 -1
  5. package/dist/commands/commandRegistry.d.ts +3 -1
  6. package/dist/commands/commandRegistry.js.map +1 -1
  7. package/dist/core/agentLoop.d.ts +3 -1
  8. package/dist/core/agentLoop.js +17 -7
  9. package/dist/core/agentLoop.js.map +1 -1
  10. package/dist/core/compactor.js +2 -2
  11. package/dist/core/compactor.js.map +1 -1
  12. package/dist/core/contextGuard.d.ts +5 -0
  13. package/dist/core/contextGuard.js +30 -3
  14. package/dist/core/contextGuard.js.map +1 -1
  15. package/dist/core/events.d.ts +45 -0
  16. package/dist/core/events.js +8 -0
  17. package/dist/core/events.js.map +1 -0
  18. package/dist/core/sessionStore.js +3 -2
  19. package/dist/core/sessionStore.js.map +1 -1
  20. package/dist/core/subAgent.js +2 -2
  21. package/dist/core/subAgent.js.map +1 -1
  22. package/dist/core/tokenCounter.d.ts +8 -1
  23. package/dist/core/tokenCounter.js +28 -0
  24. package/dist/core/tokenCounter.js.map +1 -1
  25. package/dist/middleware/permission.js +1 -0
  26. package/dist/middleware/permission.js.map +1 -1
  27. package/dist/tools/browser.js +4 -1
  28. package/dist/tools/browser.js.map +1 -1
  29. package/dist/tools/index.d.ts +2 -1
  30. package/dist/tools/index.js +11 -3
  31. package/dist/tools/index.js.map +1 -1
  32. package/dist/tools/installHostDeps.d.ts +2 -0
  33. package/dist/tools/installHostDeps.js +37 -0
  34. package/dist/tools/installHostDeps.js.map +1 -0
  35. package/dist/tools/router.js +1 -0
  36. package/dist/tools/router.js.map +1 -1
  37. package/dist/tools/spawnAgent.js +3 -1
  38. package/dist/tools/spawnAgent.js.map +1 -1
  39. package/dist/tracing/sessionTracer.d.ts +1 -0
  40. package/dist/tracing/sessionTracer.js +4 -1
  41. package/dist/tracing/sessionTracer.js.map +1 -1
  42. package/dist/ui/App.js +6 -1
  43. package/dist/ui/App.js.map +1 -1
  44. package/dist/ui/components/ActionLog.d.ts +7 -0
  45. package/dist/ui/components/ActionLog.js +63 -0
  46. package/dist/ui/components/ActionLog.js.map +1 -0
  47. package/dist/ui/components/FileBrowser.d.ts +2 -0
  48. package/dist/ui/components/FileBrowser.js +41 -0
  49. package/dist/ui/components/FileBrowser.js.map +1 -0
  50. package/package.json +3 -5
  51. package/AGENTS.md +0 -56
  52. package/Handover.md +0 -115
  53. package/PROGRESS.md +0 -160
  54. package/docs/01_insights_and_patterns.md +0 -27
  55. package/docs/02_edge_cases_and_mitigations.md +0 -143
  56. package/docs/03_initial_implementation_plan.md +0 -66
  57. package/docs/04_tech_stack_proposal.md +0 -20
  58. package/docs/05_prd.md +0 -87
  59. package/docs/06_user_stories.md +0 -72
  60. package/docs/07_system_architecture.md +0 -138
  61. package/docs/08_roadmap.md +0 -200
  62. package/e2b/Dockerfile +0 -26
  63. package/src/__tests__/bootstrap.test.ts +0 -111
  64. package/src/__tests__/config.test.ts +0 -97
  65. package/src/__tests__/m55.test.ts +0 -238
  66. package/src/__tests__/middleware.test.ts +0 -219
  67. package/src/__tests__/modelFactory.test.ts +0 -63
  68. package/src/__tests__/optimizations.test.ts +0 -201
  69. package/src/__tests__/promptBuilder.test.ts +0 -141
  70. package/src/__tests__/sandbox.test.ts +0 -102
  71. package/src/__tests__/security.test.ts +0 -122
  72. package/src/__tests__/streaming.test.ts +0 -82
  73. package/src/__tests__/toolRouter.test.ts +0 -52
  74. package/src/__tests__/tools.test.ts +0 -146
  75. package/src/__tests__/tracing.test.ts +0 -196
  76. package/src/agents/agentRegistry.ts +0 -69
  77. package/src/agents/agentSpec.ts +0 -67
  78. package/src/agents/builtinAgents.ts +0 -142
  79. package/src/cli/config.ts +0 -124
  80. package/src/cli/index.ts +0 -742
  81. package/src/cli/modelFactory.ts +0 -174
  82. package/src/cli/postinstall.ts +0 -28
  83. package/src/cli/providers.ts +0 -107
  84. package/src/commands/builtinCommands.ts +0 -293
  85. package/src/commands/commandRegistry.ts +0 -194
  86. package/src/core/agentLoop.d.ts.map +0 -1
  87. package/src/core/agentLoop.ts +0 -312
  88. package/src/core/autoSave.ts +0 -95
  89. package/src/core/compactor.ts +0 -252
  90. package/src/core/contextGuard.ts +0 -129
  91. package/src/core/errors.ts +0 -202
  92. package/src/core/promptBuilder.d.ts.map +0 -1
  93. package/src/core/promptBuilder.ts +0 -139
  94. package/src/core/reasoningRouter.ts +0 -121
  95. package/src/core/retry.ts +0 -75
  96. package/src/core/sessionResumer.ts +0 -90
  97. package/src/core/sessionStore.ts +0 -216
  98. package/src/core/subAgent.ts +0 -339
  99. package/src/core/tokenCounter.ts +0 -64
  100. package/src/evals/dataset.ts +0 -67
  101. package/src/evals/evaluator.ts +0 -81
  102. package/src/hitl/bridge.ts +0 -160
  103. package/src/middleware/commandSanitizer.ts +0 -60
  104. package/src/middleware/loopDetection.ts +0 -63
  105. package/src/middleware/permission.ts +0 -72
  106. package/src/middleware/pipeline.ts +0 -75
  107. package/src/middleware/preCompletion.ts +0 -94
  108. package/src/middleware/types.ts +0 -45
  109. package/src/sandbox/bootstrap.ts +0 -121
  110. package/src/sandbox/manager.ts +0 -239
  111. package/src/sandbox/sync.ts +0 -157
  112. package/src/skills/loader.ts +0 -143
  113. package/src/skills/tools.ts +0 -99
  114. package/src/skills/types.ts +0 -13
  115. package/src/test_cache.ts +0 -72
  116. package/src/tools/askUser.ts +0 -47
  117. package/src/tools/browser.ts +0 -137
  118. package/src/tools/index.d.ts.map +0 -1
  119. package/src/tools/index.ts +0 -237
  120. package/src/tools/registry.ts +0 -198
  121. package/src/tools/router.ts +0 -78
  122. package/src/tools/security.ts +0 -220
  123. package/src/tools/spawnAgent.ts +0 -158
  124. package/src/tools/webSearch.ts +0 -142
  125. package/src/tracing/analyzer.ts +0 -265
  126. package/src/tracing/langsmith.ts +0 -63
  127. package/src/tracing/sessionTracer.ts +0 -202
  128. package/src/tracing/types.ts +0 -49
  129. package/src/types/valyu.d.ts +0 -37
  130. package/src/ui/App.tsx +0 -404
  131. package/src/ui/components/HITLPrompt.tsx +0 -119
  132. package/src/ui/components/Header.tsx +0 -51
  133. package/src/ui/components/MessageBubble.tsx +0 -46
  134. package/src/ui/components/StatusBar.tsx +0 -138
  135. package/src/ui/components/StreamingText.tsx +0 -48
  136. package/src/ui/components/ToolCallPanel.tsx +0 -80
  137. package/tests/commands/commands.test.ts +0 -356
  138. package/tests/core/compactor.test.ts +0 -217
  139. package/tests/core/retryAndErrors.test.ts +0 -164
  140. package/tests/core/sessionResumer.test.ts +0 -95
  141. package/tests/core/sessionStore.test.ts +0 -84
  142. package/tests/core/stability.test.ts +0 -165
  143. package/tests/core/subAgent.test.ts +0 -238
  144. package/tests/hitl/hitlBridge.test.ts +0 -115
  145. package/tsconfig.json +0 -16
  146. package/vitest.config.ts +0 -10
  147. package/vitest.out +0 -48
@@ -1,252 +0,0 @@
1
- /**
2
- * Conversation Compactor
3
- *
4
- * Replaces the simple string-based compaction with an LLM-powered summary.
5
- * Uses a dedicated compact prompt to produce a high-quality structured summary,
6
- * then injects a handoff prompt to orient the agent after compaction.
7
- *
8
- * Architecture:
9
- * 1. Evicted messages (all except keepLastN) are sent to a dedicated LLM call
10
- * 2. The LLM produces a structured markdown summary preserving critical context
11
- * 3. A handoff prompt is injected after the summary to guide the agent
12
- * 4. Falls back to string-based compaction if the LLM call fails
13
- */
14
-
15
- import { BaseMessage, SystemMessage, HumanMessage, AIMessage } from "@langchain/core/messages";
16
- import { BaseChatModel } from "@langchain/core/language_models/chat_models";
17
- import { Runnable } from "@langchain/core/runnables";
18
- import { countMessageTokens, estimateTokens } from "./tokenCounter.js";
19
-
20
- // ─── Compact Prompt ─────────────────────────────────────────────────────────────
21
-
22
- export const COMPACT_SYSTEM_PROMPT = `You are a conversation summarizer. Your task is to produce a structured reference document from the conversation below.
23
-
24
- PRESERVE ALL of the following (do NOT omit):
25
- - File paths created, edited, or deleted
26
- - Tool calls made and their outcomes (success or failure)
27
- - Key decisions made and the rationale behind them
28
- - Current task state — what has been completed and what remains
29
- - Errors encountered and how they were resolved
30
- - Code snippets or configurations that are critical to the ongoing task
31
-
32
- FORMAT as structured markdown with clear sections:
33
- ## Files Modified
34
- ## Decisions Made
35
- ## Current State
36
- ## Errors & Resolutions
37
-
38
- Be thorough but concise. Do NOT include conversational filler or pleasantries.`;
39
-
40
- // ─── Handoff Prompt ─────────────────────────────────────────────────────────────
41
-
42
- export function createHandoffPrompt(compactionTimestamp: string): string {
43
- return `[CONTEXT HANDOFF] Your earlier conversation has been compacted into the summary above.
44
- You are the same agent continuing the same task. Key points:
45
- - The summary preserves all file paths, decisions, tool outcomes, and errors
46
- - If you need details not included in the summary, re-read the relevant files using read_file
47
- - Continue from where the conversation left off — do NOT redo work described in the summary
48
- - Compacted at: ${compactionTimestamp}`;
49
- }
50
-
51
- // ─── Fast Model Defaults ────────────────────────────────────────────────────────
52
-
53
- /**
54
- * Maps each provider to its cheapest/fastest model for use in compaction
55
- * and sub-agent tasks. Users can override via config.compactModel.
56
- */
57
- export const FAST_MODEL_DEFAULTS: Record<string, string> = {
58
- anthropic: "claude-3-haiku-20240307",
59
- openai: "gpt-4o-mini",
60
- google: "gemini-2.5-flash",
61
- mistral: "mistral-small-latest",
62
- groq: "mixtral-8x7b-32768",
63
- deepseek: "deepseek-chat",
64
- fireworks: "accounts/fireworks/models/llama-v3p1-70b-instruct",
65
- together: "meta-llama/Llama-3.1-70B-Instruct-Turbo",
66
- ollama: "mistral",
67
- };
68
-
69
- /**
70
- * Resolves the model to use for compaction/sub-agents.
71
- * Priority: explicit override > FAST_MODEL_DEFAULTS > main model (fallback)
72
- */
73
- export function resolveFastModel(
74
- provider: string,
75
- mainModel: string,
76
- override?: string
77
- ): string {
78
- if (override) return override;
79
- return FAST_MODEL_DEFAULTS[provider] ?? mainModel;
80
- }
81
-
82
- // ─── Compactor Options ──────────────────────────────────────────────────────────
83
-
84
- export interface CompactorOptions {
85
- /** Number of recent messages to preserve (default: 8) */
86
- keepLastN?: number;
87
- /** Max tokens for the summary output (default: 2000) */
88
- maxSummaryTokens?: number;
89
- }
90
-
91
- // ─── Compaction Result ──────────────────────────────────────────────────────────
92
-
93
- export interface CompactionResult {
94
- /** The new compacted conversation history */
95
- compactedHistory: BaseMessage[];
96
- /** Number of messages evicted */
97
- evictedCount: number;
98
- /** Token count before compaction */
99
- tokensBefore: number;
100
- /** Token count after compaction */
101
- tokensAfter: number;
102
- /** Whether the LLM was used (true) or fallback was used (false) */
103
- llmUsed: boolean;
104
- }
105
-
106
- // ─── ConversationCompactor ──────────────────────────────────────────────────────
107
-
108
- export class ConversationCompactor {
109
-
110
- /**
111
- * Compact a conversation history using an LLM to generate a structured summary.
112
- *
113
- * @param history — The full conversation history
114
- * @param llm — The LLM to use for summarization (should be a fast/cheap model)
115
- * @param options — Compaction options
116
- * @returns CompactionResult with the new compacted history
117
- */
118
- async compact(
119
- history: BaseMessage[],
120
- llm: Runnable | BaseChatModel,
121
- options: CompactorOptions = {}
122
- ): Promise<CompactionResult> {
123
- const keepLastN = options.keepLastN ?? 8;
124
- const maxSummaryTokens = options.maxSummaryTokens ?? 2000;
125
-
126
- if (history.length <= keepLastN) {
127
- return {
128
- compactedHistory: history,
129
- evictedCount: 0,
130
- tokensBefore: countMessageTokens(history),
131
- tokensAfter: countMessageTokens(history),
132
- llmUsed: false,
133
- };
134
- }
135
-
136
- const tokensBefore = countMessageTokens(history);
137
- const evictedMessages = history.slice(0, history.length - keepLastN);
138
- const recentMessages = history.slice(-keepLastN);
139
- const evictedCount = evictedMessages.length;
140
-
141
- // Try LLM-powered compaction
142
- try {
143
- const summaryText = await this.generateLLMSummary(
144
- evictedMessages,
145
- llm,
146
- maxSummaryTokens
147
- );
148
-
149
- const summaryMessage = new HumanMessage(
150
- `<system-summary>\n[COMPACTED CONVERSATION SUMMARY]\n${summaryText}\n</system-summary>`
151
- );
152
-
153
- const handoffMessage = new HumanMessage(
154
- `<system-handoff>\n${createHandoffPrompt(new Date().toISOString())}\n</system-handoff>`
155
- );
156
-
157
- const compactedHistory = [summaryMessage, handoffMessage, ...recentMessages];
158
- const tokensAfter = countMessageTokens(compactedHistory);
159
-
160
- return {
161
- compactedHistory,
162
- evictedCount,
163
- tokensBefore,
164
- tokensAfter,
165
- llmUsed: true,
166
- };
167
- } catch (error) {
168
- // Fallback to string-based compaction
169
- return this.fallbackCompact(history, evictedMessages, recentMessages, tokensBefore);
170
- }
171
- }
172
-
173
- /**
174
- * Generates a structured summary using the LLM.
175
- */
176
- private async generateLLMSummary(
177
- evictedMessages: BaseMessage[],
178
- llm: Runnable | BaseChatModel,
179
- maxSummaryTokens: number
180
- ): Promise<string> {
181
- // Construct the prompt for the compactor LLM
182
- const compactPrompt = new HumanMessage(
183
- `<system-directive>\n${COMPACT_SYSTEM_PROMPT}\n\nKeep your summary under ${maxSummaryTokens} tokens.\n</system-directive>`
184
- );
185
-
186
- // Convert evicted messages into a readable format for the summarizer
187
- const conversationText = evictedMessages
188
- .map((msg) => {
189
- const role = msg._getType();
190
- const content = typeof msg.content === "string"
191
- ? msg.content
192
- : JSON.stringify(msg.content);
193
- return `[${role}]: ${content}`;
194
- })
195
- .join("\n\n");
196
-
197
- const summaryRequest = new HumanMessage(
198
- `Summarize this conversation:\n\n${conversationText}`
199
- );
200
-
201
- const response = await llm.invoke([compactPrompt, summaryRequest]);
202
-
203
- // Extract text from response
204
- if (typeof response === "string") return response;
205
- if ("content" in response && typeof response.content === "string") {
206
- return response.content;
207
- }
208
-
209
- throw new Error("Unexpected LLM response format during compaction");
210
- }
211
-
212
- /**
213
- * Fallback compaction using a simple heuristic summary (no LLM call).
214
- * Used when the LLM call fails.
215
- */
216
- private fallbackCompact(
217
- _fullHistory: BaseMessage[],
218
- evictedMessages: BaseMessage[],
219
- recentMessages: BaseMessage[],
220
- tokensBefore: number
221
- ): CompactionResult {
222
- // Build a basic summary from message roles and lengths
223
- const humanMsgCount = evictedMessages.filter(m => m._getType() === "human").length;
224
- const aiMsgCount = evictedMessages.filter(m => m._getType() === "ai").length;
225
- const toolMsgCount = evictedMessages.filter(m => m._getType() === "tool").length;
226
-
227
- const summaryText = [
228
- `[Fallback Compaction — LLM summary unavailable]`,
229
- `Evicted ${evictedMessages.length} messages:`,
230
- ` - ${humanMsgCount} user messages`,
231
- ` - ${aiMsgCount} agent responses`,
232
- ` - ${toolMsgCount} tool results`,
233
- `The conversation is continuing below.`,
234
- ].join("\n");
235
-
236
- const summaryMessage = new HumanMessage(`<system-summary>\n${summaryText}\n</system-summary>`);
237
- const handoffMessage = new HumanMessage(
238
- `<system-handoff>\n${createHandoffPrompt(new Date().toISOString())}\n</system-handoff>`
239
- );
240
-
241
- const compactedHistory = [summaryMessage, handoffMessage, ...recentMessages];
242
- const tokensAfter = countMessageTokens(compactedHistory);
243
-
244
- return {
245
- compactedHistory,
246
- evictedCount: evictedMessages.length,
247
- tokensBefore,
248
- tokensAfter,
249
- llmUsed: false,
250
- };
251
- }
252
- }
@@ -1,129 +0,0 @@
1
- /**
2
- * Context Guard
3
- *
4
- * Proactively monitors token usage during the agent loop and triggers auto-compaction
5
- * before the model's context window is exceeded.
6
- *
7
- * Thresholds:
8
- * - 80% (WARN): Triggers standard LLM-powered context compaction
9
- * - 95% (CRITICAL): Forces emergency truncation if compaction fails
10
- */
11
-
12
- import { BaseMessage, SystemMessage, HumanMessage } from "@langchain/core/messages";
13
- import { BaseChatModel } from "@langchain/core/language_models/chat_models";
14
- import { Runnable } from "@langchain/core/runnables";
15
- import { CacheOptimizedPromptBuilder, ContextState } from "./promptBuilder.js";
16
- import { countMessageTokens } from "./tokenCounter.js";
17
- import { createHandoffPrompt } from "./compactor.js";
18
-
19
- export interface ContextGuardMetrics {
20
- originalTokens: number;
21
- newTokens: number;
22
- messagesEvicted: number;
23
- actionTaken: "none" | "compacted" | "emergency_truncated";
24
- }
25
-
26
- export class ContextGuard {
27
- private promptBuilder: CacheOptimizedPromptBuilder;
28
- private llm: Runnable | BaseChatModel;
29
- private maxTokens: number;
30
-
31
- constructor(
32
- llm: Runnable | BaseChatModel,
33
- maxTokens: number,
34
- promptBuilder: CacheOptimizedPromptBuilder = new CacheOptimizedPromptBuilder()
35
- ) {
36
- this.llm = llm;
37
- this.maxTokens = maxTokens;
38
- this.promptBuilder = promptBuilder;
39
- }
40
-
41
- /**
42
- * Checks the token usage of the current state and compacts if necessary.
43
- * Returns updated state and metrics about the action taken.
44
- */
45
- async ensureCapacity(
46
- state: ContextState,
47
- warnThreshold = 0.8,
48
- criticalThreshold = 0.95
49
- ): Promise<{ state: ContextState; metrics: ContextGuardMetrics }> {
50
- const fullPrompt = this.promptBuilder.buildPrompt(state);
51
- const tokenCount = countMessageTokens(fullPrompt);
52
-
53
- // 1. Under limit — do nothing
54
- if (tokenCount < this.maxTokens * warnThreshold) {
55
- return {
56
- state,
57
- metrics: {
58
- originalTokens: tokenCount,
59
- newTokens: tokenCount,
60
- messagesEvicted: 0,
61
- actionTaken: "none",
62
- },
63
- };
64
- }
65
-
66
- // 2. Over WARN but below CRITICAL — try standard LLM compaction
67
- if (tokenCount < this.maxTokens * criticalThreshold) {
68
- const result = await this.promptBuilder.compactHistoryWithLLM(
69
- state.conversationHistory,
70
- this.llm,
71
- 8 // keep last 8 messages
72
- );
73
-
74
- return {
75
- state: {
76
- ...state,
77
- conversationHistory: result.compactedHistory,
78
- },
79
- metrics: {
80
- originalTokens: tokenCount,
81
- newTokens: result.tokensAfter,
82
- messagesEvicted: result.evictedCount,
83
- actionTaken: "compacted",
84
- },
85
- };
86
- }
87
-
88
- // 3. CRITICAL overflow (or standard compaction didn't free enough space)
89
- // Emergency truncation: drop everything except the last 4 messages and inject an emergency handoff
90
- const keepLast = 4;
91
-
92
- // If we're already at or below 4 messages, we literally can't truncate more
93
- if (state.conversationHistory.length <= keepLast) {
94
- return {
95
- state,
96
- metrics: { originalTokens: tokenCount, newTokens: tokenCount, messagesEvicted: 0, actionTaken: "none" }
97
- };
98
- }
99
-
100
- const recentMsgs = state.conversationHistory.slice(-keepLast);
101
- const evictedCount = state.conversationHistory.length - keepLast;
102
-
103
- const emergencySystemMsg = new HumanMessage(
104
- `<system-alert>\n[EMERGENCY CONTEXT TRUNCATION]\n` +
105
- `The conversation exceeded the maximum context window (${this.maxTokens} tokens). ` +
106
- `Older messages were aggressively deleted without summarization to prevent an immediate crash.\n` +
107
- `You are the same agent. ` + createHandoffPrompt(new Date().toISOString()) + `\n</system-alert>`
108
- );
109
-
110
- const newHistory = [emergencySystemMsg, ...recentMsgs];
111
- const newTokens = countMessageTokens(this.promptBuilder.buildPrompt({
112
- ...state,
113
- conversationHistory: newHistory
114
- }));
115
-
116
- return {
117
- state: {
118
- ...state,
119
- conversationHistory: newHistory,
120
- },
121
- metrics: {
122
- originalTokens: tokenCount,
123
- newTokens,
124
- messagesEvicted: evictedCount,
125
- actionTaken: "emergency_truncated",
126
- },
127
- };
128
- }
129
- }
@@ -1,202 +0,0 @@
1
- /**
2
- * JooneError — Structured error hierarchy for the Joone agent.
3
- *
4
- * Every error in the system carries:
5
- * - `category`: A machine-readable classification (e.g., "llm_api", "sandbox", "tool").
6
- * - `retryable`: Whether the operation that caused this error is safe to retry.
7
- * - `context`: Arbitrary structured metadata for debugging.
8
- * - `toRecoveryHint()`: A human-readable string the LLM can use to self-correct.
9
- */
10
-
11
- export type ErrorCategory = "llm_api" | "sandbox" | "tool" | "config" | "network" | "unknown";
12
-
13
- export class JooneError extends Error {
14
- public readonly category: ErrorCategory;
15
- public readonly retryable: boolean;
16
- public readonly context: Record<string, unknown>;
17
-
18
- constructor(
19
- message: string,
20
- opts: {
21
- category: ErrorCategory;
22
- retryable: boolean;
23
- context?: Record<string, unknown>;
24
- cause?: Error;
25
- }
26
- ) {
27
- super(message);
28
- this.name = "JooneError";
29
- this.category = opts.category;
30
- this.retryable = opts.retryable;
31
- this.context = opts.context ?? {};
32
- if (opts.cause) {
33
- this.cause = opts.cause;
34
- }
35
- }
36
-
37
- /**
38
- * Returns a hint string that can be injected into the LLM's conversation
39
- * so it can adapt its behavior instead of crashing.
40
- */
41
- toRecoveryHint(): string {
42
- return `[SYSTEM ERROR — ${this.category.toUpperCase()}]: ${this.message}`;
43
- }
44
- }
45
-
46
- // ─── LLM API Errors ─────────────────────────────────────────────────────────────
47
-
48
- export class LLMApiError extends JooneError {
49
- public readonly statusCode: number | undefined;
50
- public readonly provider: string;
51
-
52
- constructor(
53
- message: string,
54
- opts: {
55
- statusCode?: number;
56
- provider: string;
57
- retryable: boolean;
58
- headers?: Record<string, string>;
59
- cause?: Error;
60
- }
61
- ) {
62
- super(message, {
63
- category: "llm_api",
64
- retryable: opts.retryable,
65
- context: {
66
- statusCode: opts.statusCode,
67
- provider: opts.provider,
68
- retryAfter: opts.headers?.["retry-after"],
69
- },
70
- cause: opts.cause,
71
- });
72
- this.name = "LLMApiError";
73
- this.statusCode = opts.statusCode;
74
- this.provider = opts.provider;
75
- }
76
-
77
- toRecoveryHint(): string {
78
- if (this.statusCode === 429) {
79
- const retryAfter = this.context.retryAfter;
80
- return (
81
- `[SYSTEM ERROR — RATE LIMITED]: The ${this.provider} API returned a 429 rate limit error. ` +
82
- (retryAfter
83
- ? `Retry after ${retryAfter} seconds. `
84
- : "Wait a moment before trying again. ") +
85
- "Consider simplifying your request or reducing the number of tool calls per turn."
86
- );
87
- }
88
- if (this.statusCode === 401 || this.statusCode === 403) {
89
- return (
90
- `[SYSTEM ERROR — AUTH FAILURE]: The ${this.provider} API rejected the credentials (HTTP ${this.statusCode}). ` +
91
- "This is a fatal configuration error. Ask the user to verify their API key."
92
- );
93
- }
94
- return (
95
- `[SYSTEM ERROR — LLM API]: The ${this.provider} API returned an error` +
96
- (this.statusCode ? ` (HTTP ${this.statusCode})` : "") +
97
- `. ${this.message}`
98
- );
99
- }
100
- }
101
-
102
- // ─── Sandbox Errors ──────────────────────────────────────────────────────────────
103
-
104
- export class SandboxError extends JooneError {
105
- public readonly sandboxProvider: string;
106
-
107
- constructor(
108
- message: string,
109
- opts: {
110
- sandboxProvider: string;
111
- retryable: boolean;
112
- cause?: Error;
113
- }
114
- ) {
115
- super(message, {
116
- category: "sandbox",
117
- retryable: opts.retryable,
118
- context: { sandboxProvider: opts.sandboxProvider },
119
- cause: opts.cause,
120
- });
121
- this.name = "SandboxError";
122
- this.sandboxProvider = opts.sandboxProvider;
123
- }
124
-
125
- toRecoveryHint(): string {
126
- return (
127
- `[SYSTEM ERROR — SANDBOX]: The ${this.sandboxProvider} sandbox encountered an error: ${this.message}. ` +
128
- "The sandbox may have been recycled. Try running the command again."
129
- );
130
- }
131
- }
132
-
133
- // ─── Tool Execution Errors ───────────────────────────────────────────────────────
134
-
135
- export class ToolExecutionError extends JooneError {
136
- public readonly toolName: string;
137
-
138
- constructor(
139
- message: string,
140
- opts: {
141
- toolName: string;
142
- args?: Record<string, unknown>;
143
- retryable: boolean;
144
- cause?: Error;
145
- }
146
- ) {
147
- super(message, {
148
- category: "tool",
149
- retryable: opts.retryable,
150
- context: { toolName: opts.toolName, args: opts.args },
151
- cause: opts.cause,
152
- });
153
- this.name = "ToolExecutionError";
154
- this.toolName = opts.toolName;
155
- }
156
-
157
- toRecoveryHint(): string {
158
- return (
159
- `[SYSTEM ERROR — TOOL]: The tool "${this.toolName}" failed: ${this.message}. ` +
160
- "Try a different approach or check the arguments you passed."
161
- );
162
- }
163
- }
164
-
165
- // ─── Helpers ─────────────────────────────────────────────────────────────────────
166
-
167
- const RETRYABLE_STATUS_CODES = new Set([429, 500, 502, 503]);
168
- const RETRYABLE_ERROR_CODES = new Set(["ECONNRESET", "ETIMEDOUT", "ECONNREFUSED", "UND_ERR_CONNECT_TIMEOUT"]);
169
- const NON_RETRYABLE_STATUS_CODES = new Set([400, 401, 403, 404]);
170
-
171
- /**
172
- * Wraps a raw provider error into a structured `LLMApiError`.
173
- * Inspects the error for HTTP status codes, network error codes, etc.
174
- */
175
- export function wrapLLMError(error: unknown, provider: string): LLMApiError {
176
- if (error instanceof LLMApiError) return error;
177
-
178
- const err = error instanceof Error ? error : new Error(String(error));
179
- const statusCode = (err as any).status ?? (err as any).statusCode ?? (err as any).response?.status;
180
- const headers = (err as any).response?.headers ?? {};
181
- const code = (err as any).code as string | undefined;
182
-
183
- let retryable = false;
184
- if (typeof statusCode === "number") {
185
- retryable = RETRYABLE_STATUS_CODES.has(statusCode);
186
- } else if (code && RETRYABLE_ERROR_CODES.has(code)) {
187
- retryable = true;
188
- }
189
-
190
- // Non-retryable overrides
191
- if (typeof statusCode === "number" && NON_RETRYABLE_STATUS_CODES.has(statusCode)) {
192
- retryable = false;
193
- }
194
-
195
- return new LLMApiError(err.message, {
196
- statusCode,
197
- provider,
198
- retryable,
199
- headers,
200
- cause: err,
201
- });
202
- }
@@ -1 +0,0 @@
1
- {"version":3,"file":"promptBuilder.d.ts","sourceRoot":"","sources":["promptBuilder.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EAIZ,MAAM,0BAA0B,CAAC;AAElC,MAAM,WAAW,YAAY;IAC3B,wBAAwB,EAAE,MAAM,CAAC;IACjC,aAAa,EAAE,MAAM,CAAC;IACtB,cAAc,EAAE,MAAM,CAAC;IACvB,mBAAmB,EAAE,WAAW,EAAE,CAAC;CACpC;AAED;;;;;;;;;;GAUG;AACH,qBAAa,2BAA2B;IACtC;;;OAGG;IACI,WAAW,CAAC,KAAK,EAAE,YAAY,GAAG,WAAW,EAAE;IAwBtD;;;;OAIG;IACI,oBAAoB,CACzB,OAAO,EAAE,WAAW,EAAE,EACtB,QAAQ,EAAE,MAAM,GACf,WAAW,EAAE;IAOhB;;;;;OAKG;IACI,cAAc,CACnB,OAAO,EAAE,WAAW,EAAE,EACtB,OAAO,EAAE,MAAM,GACd,WAAW,EAAE;CAWjB"}