aiexecode 1.0.90 → 1.0.92

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiexecode might be problematic. Click here for more details.

Files changed (50) hide show
  1. package/README.md +1 -0
  2. package/index.js +13 -11
  3. package/mcp-agent-lib/init.sh +3 -0
  4. package/mcp-agent-lib/package-lock.json +14 -1
  5. package/mcp-agent-lib/package.json +4 -6
  6. package/mcp-agent-lib/sampleFastMCPClient/client.py +25 -0
  7. package/mcp-agent-lib/sampleFastMCPClient/run.sh +3 -0
  8. package/mcp-agent-lib/sampleFastMCPServer/run.sh +3 -0
  9. package/mcp-agent-lib/sampleFastMCPServer/server.py +12 -0
  10. package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/run.sh +3 -0
  11. package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/server.py +43 -0
  12. package/mcp-agent-lib/sampleFastMCPServerRootsRequest/server.py +63 -0
  13. package/mcp-agent-lib/sampleMCPHost/index.js +182 -63
  14. package/mcp-agent-lib/sampleMCPHost/mcp_config.json +7 -1
  15. package/mcp-agent-lib/sampleMCPHostFeatures/elicitation.js +151 -0
  16. package/mcp-agent-lib/sampleMCPHostFeatures/index.js +166 -0
  17. package/mcp-agent-lib/sampleMCPHostFeatures/roots.js +197 -0
  18. package/mcp-agent-lib/src/mcp_client.js +129 -67
  19. package/mcp-agent-lib/src/mcp_message_logger.js +516 -0
  20. package/package.json +3 -1
  21. package/payload_viewer/out/404/index.html +1 -1
  22. package/payload_viewer/out/404.html +1 -1
  23. package/payload_viewer/out/index.html +1 -1
  24. package/payload_viewer/out/index.txt +1 -1
  25. package/src/LLMClient/client.js +992 -0
  26. package/src/LLMClient/converters/input-normalizer.js +238 -0
  27. package/src/LLMClient/converters/responses-to-claude.js +454 -0
  28. package/src/LLMClient/converters/responses-to-gemini.js +648 -0
  29. package/src/LLMClient/converters/responses-to-ollama.js +348 -0
  30. package/src/LLMClient/errors.js +372 -0
  31. package/src/LLMClient/index.js +31 -0
  32. package/src/commands/apikey.js +10 -22
  33. package/src/commands/model.js +28 -28
  34. package/src/commands/reasoning_effort.js +9 -23
  35. package/src/config/ai_models.js +212 -0
  36. package/src/config/feature_flags.js +1 -1
  37. package/src/frontend/App.js +5 -10
  38. package/src/frontend/components/CurrentModelView.js +0 -33
  39. package/src/frontend/components/Footer.js +3 -3
  40. package/src/frontend/components/ModelListView.js +30 -87
  41. package/src/frontend/components/ModelUpdatedView.js +7 -142
  42. package/src/frontend/components/SetupWizard.js +37 -32
  43. package/src/system/ai_request.js +57 -42
  44. package/src/util/config.js +26 -4
  45. package/src/util/setup_wizard.js +1 -6
  46. package/mcp-agent-lib/.claude/settings.local.json +0 -9
  47. package/src/config/openai_models.js +0 -152
  48. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_buildManifest.js +0 -0
  49. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_clientMiddlewareManifest.json +0 -0
  50. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_ssgManifest.js +0 -0
@@ -0,0 +1,348 @@
1
+ /**
2
+ * Convert Responses API format to Ollama format
3
+ */
4
+
5
+ /**
6
+ * Convert Responses API request to Ollama format
7
+ * @param {Object} responsesRequest - Responses API format request
8
+ * @param {string} baseUrl - Ollama base URL
9
+ * @returns {Object} { url, request } - Ollama API endpoint and request body
10
+ */
11
+ export function convertResponsesRequestToOllamaFormat(responsesRequest, baseUrl = 'http://localhost:11434') {
12
+ const ollamaRequest = {
13
+ model: responsesRequest.model || 'llama3.2',
14
+ messages: []
15
+ };
16
+
17
+ // Convert input to messages
18
+ if (typeof responsesRequest.input === 'string') {
19
+ // Simple string input
20
+ ollamaRequest.messages.push({
21
+ role: 'user',
22
+ content: responsesRequest.input
23
+ });
24
+ } else if (Array.isArray(responsesRequest.input)) {
25
+ // Array input - could be messages or items
26
+ const toolCalls = []; // Collect tool calls to add to assistant message
27
+ const callIdToName = {}; // Map call_id to function name
28
+
29
+ for (const item of responsesRequest.input) {
30
+ if (item.type === 'function_call') {
31
+ // Store call_id -> name mapping
32
+ if (item.call_id && item.name) {
33
+ callIdToName[item.call_id] = item.name;
34
+ }
35
+
36
+ // Function call - convert to Ollama tool_calls format (no id or type fields)
37
+ toolCalls.push({
38
+ function: {
39
+ name: item.name,
40
+ arguments: JSON.parse(item.arguments) // Ollama expects object, not string
41
+ }
42
+ });
43
+ } else if (item.type === 'function_call_output') {
44
+ // If we have pending tool calls, add assistant message with tool_calls first
45
+ if (toolCalls.length > 0) {
46
+ ollamaRequest.messages.push({
47
+ role: 'assistant',
48
+ content: '',
49
+ tool_calls: [...toolCalls]
50
+ });
51
+ toolCalls.length = 0; // Clear the array
52
+ }
53
+
54
+ // Function call output - convert to tool message (Ollama uses tool_name, not tool_call_id)
55
+ // Look up the function name from the call_id
56
+ const toolName = callIdToName[item.call_id] || '';
57
+ ollamaRequest.messages.push({
58
+ role: 'tool',
59
+ content: item.output,
60
+ tool_name: toolName
61
+ });
62
+ } else if (item.role && item.content) {
63
+ // If we have pending tool calls, add assistant message with tool_calls first
64
+ if (toolCalls.length > 0) {
65
+ ollamaRequest.messages.push({
66
+ role: 'assistant',
67
+ content: '',
68
+ tool_calls: [...toolCalls]
69
+ });
70
+ toolCalls.length = 0; // Clear the array
71
+ }
72
+
73
+ // Message format
74
+ // Handle content that might be an array (OpenAI Responses API format)
75
+ const content = Array.isArray(item.content)
76
+ ? item.content.map(c => c.type === 'input_text' || c.type === 'text' ? c.text : c).filter(Boolean).join('\n')
77
+ : item.content;
78
+
79
+ if (item.role === 'system') {
80
+ // System message
81
+ ollamaRequest.messages.push({
82
+ role: 'system',
83
+ content: content
84
+ });
85
+ } else if (item.role === 'tool') {
86
+ // Tool result
87
+ ollamaRequest.messages.push({
88
+ role: 'tool',
89
+ content: content
90
+ });
91
+ } else if (item.role === 'assistant') {
92
+ // Assistant message
93
+ ollamaRequest.messages.push({
94
+ role: 'assistant',
95
+ content: content
96
+ });
97
+ } else {
98
+ // User message
99
+ ollamaRequest.messages.push({
100
+ role: 'user',
101
+ content: content
102
+ });
103
+ }
104
+ }
105
+ }
106
+
107
+ // If there are remaining tool calls at the end, add them
108
+ if (toolCalls.length > 0) {
109
+ ollamaRequest.messages.push({
110
+ role: 'assistant',
111
+ content: '',
112
+ tool_calls: [...toolCalls]
113
+ });
114
+ }
115
+ }
116
+
117
+ // Add system instruction if provided
118
+ if (responsesRequest.instructions) {
119
+ // Prepend system message
120
+ ollamaRequest.messages.unshift({
121
+ role: 'system',
122
+ content: responsesRequest.instructions
123
+ });
124
+ }
125
+
126
+ // Convert tools from Responses API format to Ollama format
127
+ // Ollama uses OpenAI-compatible tool format
128
+ if (responsesRequest.tools && Array.isArray(responsesRequest.tools)) {
129
+ ollamaRequest.tools = responsesRequest.tools.map(tool => {
130
+ if (tool.type === 'function' && tool.function) {
131
+ // Chat Completions format (already compatible)
132
+ return tool;
133
+ } else if (tool.type === 'custom') {
134
+ // Responses API format - convert to OpenAI function format
135
+ return {
136
+ type: 'function',
137
+ function: {
138
+ name: tool.name,
139
+ description: tool.description || `Tool: ${tool.name}`,
140
+ parameters: tool.input_schema || {
141
+ type: 'object',
142
+ properties: {}
143
+ }
144
+ }
145
+ };
146
+ } else if (tool.type === 'function' && !tool.function) {
147
+ // Flat function format - convert to nested format for Ollama
148
+ return {
149
+ type: 'function',
150
+ function: {
151
+ name: tool.name,
152
+ description: tool.description || `Tool: ${tool.name}`,
153
+ parameters: tool.parameters || {
154
+ type: 'object',
155
+ properties: {}
156
+ }
157
+ }
158
+ };
159
+ }
160
+ // Pass through
161
+ return tool;
162
+ });
163
+ }
164
+
165
+ // Options
166
+ const options = {};
167
+
168
+ // Temperature
169
+ if (responsesRequest.temperature !== undefined) {
170
+ options.temperature = responsesRequest.temperature;
171
+ }
172
+
173
+ // Top-p
174
+ if (responsesRequest.top_p !== undefined) {
175
+ options.top_p = responsesRequest.top_p;
176
+ }
177
+
178
+ // Max tokens (Ollama uses num_predict)
179
+ if (responsesRequest.max_output_tokens !== undefined) {
180
+ options.num_predict = responsesRequest.max_output_tokens;
181
+ }
182
+
183
+ if (Object.keys(options).length > 0) {
184
+ ollamaRequest.options = options;
185
+ }
186
+
187
+ // Tool choice (Responses API to Ollama)
188
+ // Ollama uses OpenAI-compatible tool_choice format
189
+ if (responsesRequest.tool_choice !== undefined) {
190
+ if (typeof responsesRequest.tool_choice === 'string') {
191
+ // 'auto', 'required', 'none'
192
+ ollamaRequest.tool_choice = responsesRequest.tool_choice;
193
+ } else if (responsesRequest.tool_choice?.type === 'function') {
194
+ // Specific tool
195
+ ollamaRequest.tool_choice = responsesRequest.tool_choice;
196
+ } else if (responsesRequest.tool_choice?.type === 'custom') {
197
+ // Convert custom to function format for Ollama
198
+ ollamaRequest.tool_choice = {
199
+ type: 'function',
200
+ function: {
201
+ name: responsesRequest.tool_choice.name
202
+ }
203
+ };
204
+ }
205
+ }
206
+
207
+ // Stream setting (will be overridden by client if streaming)
208
+ ollamaRequest.stream = false;
209
+
210
+ return {
211
+ url: `${baseUrl}/api/chat`,
212
+ request: ollamaRequest
213
+ };
214
+ }
215
+
216
+ /**
217
+ * Convert Ollama response to Responses API format
218
+ * @param {Object} ollamaResponse - Ollama format response
219
+ * @param {string} model - Model name
220
+ * @param {Object} originalRequest - Original request for context
221
+ * @returns {Object} Responses API format response
222
+ */
223
+ export function convertOllamaResponseToResponsesFormat(ollamaResponse, model, originalRequest = {}) {
224
+ const output = [];
225
+ let outputText = '';
226
+
227
+ // Process message content
228
+ if (ollamaResponse.message) {
229
+ const messageContent = [];
230
+
231
+ // Text content
232
+ if (ollamaResponse.message.content) {
233
+ messageContent.push({
234
+ type: 'output_text',
235
+ text: ollamaResponse.message.content,
236
+ annotations: []
237
+ });
238
+ outputText = ollamaResponse.message.content;
239
+ }
240
+
241
+ // Tool calls
242
+ if (ollamaResponse.message.tool_calls && Array.isArray(ollamaResponse.message.tool_calls)) {
243
+ for (const toolCall of ollamaResponse.message.tool_calls) {
244
+ const callId = toolCall.id || `call_${Date.now()}`;
245
+ output.push({
246
+ id: `fc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
247
+ type: 'function_call',
248
+ status: 'completed',
249
+ arguments: typeof toolCall.function?.arguments === 'string'
250
+ ? toolCall.function.arguments
251
+ : JSON.stringify(toolCall.function?.arguments || {}),
252
+ call_id: callId,
253
+ name: toolCall.function?.name || ''
254
+ });
255
+ }
256
+ }
257
+
258
+ // Add message with text content if any
259
+ if (messageContent.length > 0) {
260
+ output.push({
261
+ id: `msg_${Date.now()}`,
262
+ type: 'message',
263
+ status: 'completed',
264
+ role: 'assistant',
265
+ content: messageContent
266
+ });
267
+ }
268
+ }
269
+
270
+ // If no output items, create empty message
271
+ if (output.length === 0) {
272
+ output.push({
273
+ id: `msg_${Date.now()}`,
274
+ type: 'message',
275
+ status: 'completed',
276
+ role: 'assistant',
277
+ content: []
278
+ });
279
+ }
280
+
281
+ // Build Responses API response with ALL required fields
282
+ // Parse created_at from Ollama's ISO format to Unix timestamp
283
+ let createdAt = Math.floor(Date.now() / 1000);
284
+ if (ollamaResponse.created_at) {
285
+ if (typeof ollamaResponse.created_at === 'string') {
286
+ createdAt = Math.floor(new Date(ollamaResponse.created_at).getTime() / 1000);
287
+ } else if (typeof ollamaResponse.created_at === 'number') {
288
+ createdAt = ollamaResponse.created_at;
289
+ }
290
+ }
291
+
292
+ const responsesResponse = {
293
+ id: `resp_${Date.now()}`,
294
+ object: 'response',
295
+ created_at: createdAt,
296
+ status: ollamaResponse.done ? 'completed' : 'incomplete',
297
+ background: false,
298
+ billing: {
299
+ payer: 'developer'
300
+ },
301
+ error: null,
302
+ incomplete_details: null,
303
+ instructions: originalRequest.instructions || null,
304
+ max_output_tokens: originalRequest.max_output_tokens || null,
305
+ max_tool_calls: null,
306
+ model: model || ollamaResponse.model || 'llama3.2',
307
+ output: output,
308
+ parallel_tool_calls: true,
309
+ previous_response_id: null,
310
+ prompt_cache_key: null,
311
+ prompt_cache_retention: null,
312
+ reasoning: {
313
+ effort: originalRequest.reasoning?.effort || null,
314
+ summary: originalRequest.reasoning?.summary || null
315
+ },
316
+ safety_identifier: null,
317
+ service_tier: 'default',
318
+ store: originalRequest.store !== undefined ? originalRequest.store : true,
319
+ temperature: originalRequest.temperature !== undefined ? originalRequest.temperature : 1,
320
+ text: {
321
+ format: {
322
+ type: 'text'
323
+ },
324
+ verbosity: 'medium'
325
+ },
326
+ tool_choice: originalRequest.tool_choice || 'auto',
327
+ tools: originalRequest.tools || [],
328
+ top_logprobs: 0,
329
+ top_p: originalRequest.top_p !== undefined ? originalRequest.top_p : 1,
330
+ truncation: 'disabled',
331
+ usage: {
332
+ input_tokens: ollamaResponse.prompt_eval_count || 0,
333
+ input_tokens_details: {
334
+ cached_tokens: 0
335
+ },
336
+ output_tokens: ollamaResponse.eval_count || 0,
337
+ output_tokens_details: {
338
+ reasoning_tokens: 0
339
+ },
340
+ total_tokens: (ollamaResponse.prompt_eval_count || 0) + (ollamaResponse.eval_count || 0)
341
+ },
342
+ user: null,
343
+ metadata: {},
344
+ output_text: outputText
345
+ };
346
+
347
+ return responsesResponse;
348
+ }
@@ -0,0 +1,372 @@
1
+ /**
2
+ * OpenAI-compatible error handling
3
+ */
4
+
5
+ /**
6
+ * OpenAI API Error class
7
+ * Follows OpenAI Responses API error format
8
+ */
9
+ export class LLMError extends Error {
10
+ constructor(message, options = {}) {
11
+ super(message);
12
+ this.name = 'LLMError';
13
+
14
+ // OpenAI error structure
15
+ this.error = {
16
+ message: message,
17
+ type: options.type || 'api_error',
18
+ param: options.param || null,
19
+ code: options.code || null
20
+ };
21
+
22
+ // HTTP status code (if applicable)
23
+ this.status = options.status || 500;
24
+
25
+ // Original error (for debugging)
26
+ this.originalError = options.originalError || null;
27
+
28
+ // Provider information
29
+ this.provider = options.provider || 'unknown';
30
+
31
+ // Store original request for response format
32
+ this.request = options.request || {};
33
+ }
34
+
35
+ /**
36
+ * Convert to OpenAI Responses API error format
37
+ */
38
+ toResponsesFormat() {
39
+ return {
40
+ id: `resp_error_${Date.now()}`,
41
+ object: 'response',
42
+ created_at: Math.floor(Date.now() / 1000),
43
+ status: 'failed',
44
+ background: false,
45
+ billing: {
46
+ payer: 'developer'
47
+ },
48
+ error: {
49
+ type: this.error.type,
50
+ message: this.error.message,
51
+ code: this.error.code,
52
+ param: this.error.param
53
+ },
54
+ incomplete_details: null,
55
+ instructions: this.request.instructions || null,
56
+ max_output_tokens: this.request.max_output_tokens || null,
57
+ max_tool_calls: null,
58
+ model: this.request.model || null,
59
+ output: [],
60
+ parallel_tool_calls: true,
61
+ previous_response_id: null,
62
+ prompt_cache_key: null,
63
+ prompt_cache_retention: null,
64
+ reasoning: {
65
+ effort: this.request.reasoning?.effort || null,
66
+ summary: this.request.reasoning?.summary || null
67
+ },
68
+ safety_identifier: null,
69
+ service_tier: 'default',
70
+ store: this.request.store !== undefined ? this.request.store : true,
71
+ temperature: this.request.temperature !== undefined ? this.request.temperature : 1,
72
+ text: {
73
+ format: {
74
+ type: 'text'
75
+ },
76
+ verbosity: 'medium'
77
+ },
78
+ tool_choice: this.request.tool_choice || 'auto',
79
+ tools: this.request.tools || [],
80
+ top_logprobs: 0,
81
+ top_p: this.request.top_p !== undefined ? this.request.top_p : 1,
82
+ truncation: 'disabled',
83
+ usage: {
84
+ input_tokens: 0,
85
+ input_tokens_details: {
86
+ cached_tokens: 0
87
+ },
88
+ output_tokens: 0,
89
+ output_tokens_details: {
90
+ reasoning_tokens: 0
91
+ },
92
+ total_tokens: 0
93
+ },
94
+ user: null,
95
+ metadata: {},
96
+ output_text: ''
97
+ };
98
+ }
99
+
100
+ /**
101
+ * Convert to JSON (Legacy OpenAI error format)
102
+ */
103
+ toJSON() {
104
+ return {
105
+ error: this.error
106
+ };
107
+ }
108
+
109
+ /**
110
+ * Convert to string
111
+ */
112
+ toString() {
113
+ return `${this.provider} Error [${this.error.type}]: ${this.error.message}`;
114
+ }
115
+ }
116
+
117
+ /**
118
+ * Map HTTP status codes to OpenAI error types
119
+ */
120
+ const ERROR_TYPE_MAP = {
121
+ 400: 'invalid_request_error',
122
+ 401: 'authentication_error',
123
+ 403: 'permission_error',
124
+ 404: 'not_found_error',
125
+ 429: 'rate_limit_error',
126
+ 500: 'api_error',
127
+ 502: 'api_error',
128
+ 503: 'api_error'
129
+ };
130
+
131
+ /**
132
+ * Create LLMError from HTTP response
133
+ */
134
+ export async function createErrorFromResponse(response, provider) {
135
+ const status = response.status;
136
+ let errorData = null;
137
+
138
+ try {
139
+ const text = await response.text();
140
+ errorData = JSON.parse(text);
141
+ } catch (e) {
142
+ // Failed to parse JSON
143
+ return new LLMError(
144
+ `HTTP ${status}: ${response.statusText}`,
145
+ {
146
+ type: ERROR_TYPE_MAP[status] || 'api_error',
147
+ status: status,
148
+ provider: provider
149
+ }
150
+ );
151
+ }
152
+
153
+ // Extract error information
154
+ let message = errorData.error?.message || errorData.message || response.statusText;
155
+ let type = errorData.error?.type || ERROR_TYPE_MAP[status] || 'api_error';
156
+ let param = errorData.error?.param || null;
157
+ let code = errorData.error?.code || null;
158
+
159
+ return new LLMError(message, {
160
+ type: type,
161
+ param: param,
162
+ code: code,
163
+ status: status,
164
+ provider: provider,
165
+ originalError: errorData
166
+ });
167
+ }
168
+
169
+ /**
170
+ * Convert provider-specific error to LLMError
171
+ */
172
+ export function normalizeError(error, provider) {
173
+ // Already an LLMError
174
+ if (error instanceof LLMError) {
175
+ return error;
176
+ }
177
+
178
+ // Anthropic SDK error (check BEFORE OpenAI SDK error, as both have error.status && error.error)
179
+ if (provider === 'claude' && error.status) {
180
+ // Parse Claude error from error.error object (Anthropic SDK provides parsed error)
181
+ let errorType = ERROR_TYPE_MAP[error.status] || 'api_error';
182
+ let code = null;
183
+ let message = error.message || 'Claude API error';
184
+
185
+ // Anthropic SDK provides error.error object with structure:
186
+ // { type: 'error', error: { type: 'not_found_error', message: '...' } }
187
+ if (error.error && error.error.error) {
188
+ const claudeError = error.error.error;
189
+
190
+ if (claudeError.type) {
191
+ code = claudeError.type;
192
+
193
+ // Map Claude error types to OpenAI error types
194
+ const claudeType = claudeError.type;
195
+ if (claudeType === 'not_found_error') {
196
+ errorType = 'not_found_error';
197
+ } else if (claudeType === 'invalid_request_error') {
198
+ errorType = 'invalid_request_error';
199
+ } else if (claudeType === 'authentication_error') {
200
+ errorType = 'authentication_error';
201
+ } else if (claudeType === 'permission_error') {
202
+ errorType = 'permission_error';
203
+ } else if (claudeType === 'rate_limit_error') {
204
+ errorType = 'rate_limit_error';
205
+ } else if (claudeType === 'overloaded_error') {
206
+ errorType = 'api_error';
207
+ }
208
+ }
209
+
210
+ // Use the actual error message from Claude
211
+ if (claudeError.message) {
212
+ message = claudeError.message;
213
+ }
214
+ }
215
+
216
+ return new LLMError(
217
+ message,
218
+ {
219
+ type: errorType,
220
+ param: null,
221
+ code: code,
222
+ status: error.status,
223
+ provider: 'claude',
224
+ originalError: error
225
+ }
226
+ );
227
+ }
228
+
229
+ // OpenAI SDK error
230
+ if (error.status && error.error) {
231
+ return new LLMError(
232
+ error.error.message || error.message,
233
+ {
234
+ type: error.error.type || ERROR_TYPE_MAP[error.status] || 'api_error',
235
+ param: error.error.param || null,
236
+ code: error.error.code || null,
237
+ status: error.status,
238
+ provider: provider,
239
+ originalError: error
240
+ }
241
+ );
242
+ }
243
+
244
+ // Google Generative AI error
245
+ if (provider === 'gemini') {
246
+ // Extract status from error message if available
247
+ let status = 500;
248
+ const statusMatch = error.message?.match(/\[(\d+)\s+/);
249
+ if (statusMatch) {
250
+ status = parseInt(statusMatch[1]);
251
+ }
252
+
253
+ return new LLMError(
254
+ error.message || 'Gemini API error',
255
+ {
256
+ type: ERROR_TYPE_MAP[status] || 'api_error',
257
+ param: null,
258
+ code: null,
259
+ status: status,
260
+ provider: 'gemini',
261
+ originalError: error
262
+ }
263
+ );
264
+ }
265
+
266
+ // Ollama error
267
+ if (provider === 'ollama') {
268
+ return new LLMError(
269
+ error.message || 'Ollama API error',
270
+ {
271
+ type: 'api_error',
272
+ param: null,
273
+ code: null,
274
+ status: error.status || 500,
275
+ provider: 'ollama',
276
+ originalError: error
277
+ }
278
+ );
279
+ }
280
+
281
+ // Network errors (connection refused, DNS errors, etc)
282
+ if (error.code === 'ECONNREFUSED' || error.code === 'ENOTFOUND' || error.code === 'ETIMEDOUT') {
283
+ return new LLMError(
284
+ error.message || 'Network connection error',
285
+ {
286
+ type: 'api_error',
287
+ param: null,
288
+ code: error.code,
289
+ status: 500,
290
+ provider: provider,
291
+ originalError: error
292
+ }
293
+ );
294
+ }
295
+
296
+ // TypeError (usually from URL parsing or network issues)
297
+ if (error instanceof TypeError) {
298
+ return new LLMError(
299
+ error.message || 'Request error',
300
+ {
301
+ type: 'api_error',
302
+ param: null,
303
+ code: null,
304
+ status: 500,
305
+ provider: provider,
306
+ originalError: error
307
+ }
308
+ );
309
+ }
310
+
311
+ // Generic error
312
+ return new LLMError(
313
+ error.message || 'Unknown error',
314
+ {
315
+ type: 'api_error',
316
+ status: 500,
317
+ provider: provider,
318
+ originalError: error
319
+ }
320
+ );
321
+ }
322
+
323
+ /**
324
+ * Specific error types for common cases
325
+ */
326
+
327
+ export class AuthenticationError extends LLMError {
328
+ constructor(message, provider) {
329
+ super(message, {
330
+ type: 'authentication_error',
331
+ code: 'invalid_api_key',
332
+ status: 401,
333
+ provider: provider
334
+ });
335
+ this.name = 'AuthenticationError';
336
+ }
337
+ }
338
+
339
+ export class InvalidRequestError extends LLMError {
340
+ constructor(message, param, provider) {
341
+ super(message, {
342
+ type: 'invalid_request_error',
343
+ param: param,
344
+ status: 400,
345
+ provider: provider
346
+ });
347
+ this.name = 'InvalidRequestError';
348
+ }
349
+ }
350
+
351
+ export class RateLimitError extends LLMError {
352
+ constructor(message, provider) {
353
+ super(message, {
354
+ type: 'rate_limit_error',
355
+ code: 'rate_limit_exceeded',
356
+ status: 429,
357
+ provider: provider
358
+ });
359
+ this.name = 'RateLimitError';
360
+ }
361
+ }
362
+
363
+ export class NotFoundError extends LLMError {
364
+ constructor(message, provider) {
365
+ super(message, {
366
+ type: 'not_found_error',
367
+ status: 404,
368
+ provider: provider
369
+ });
370
+ this.name = 'NotFoundError';
371
+ }
372
+ }