claude-flow 2.0.0-alpha.66 → 2.0.0-alpha.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/.claude/cache/agent-pool.json +33 -0
  2. package/.claude/cache/memory-optimization.json +19 -0
  3. package/.claude/cache/neural-optimization.json +25 -0
  4. package/.claude/cache/optimized-hooks.json +19 -0
  5. package/.claude/cache/parallel-processing.json +26 -0
  6. package/.claude/optimized-settings.json +270 -0
  7. package/.claude/settings-backup.json +186 -0
  8. package/.claude/settings-enhanced.json +278 -0
  9. package/.claude/settings-fixed.json +186 -0
  10. package/.claude/settings.json +105 -8
  11. package/CHANGELOG.md +38 -0
  12. package/bin/claude-flow +1 -1
  13. package/dist/cli/simple-commands/hive-mind.js +1 -1
  14. package/dist/cli/simple-commands/hive-mind.js.map +1 -1
  15. package/dist/cli/simple-commands/hooks.js +6 -4
  16. package/dist/cli/simple-commands/hooks.js.map +1 -1
  17. package/dist/providers/anthropic-provider.d.ts +27 -0
  18. package/dist/providers/anthropic-provider.d.ts.map +1 -0
  19. package/dist/providers/anthropic-provider.js +247 -0
  20. package/dist/providers/anthropic-provider.js.map +1 -0
  21. package/dist/providers/base-provider.d.ts +134 -0
  22. package/dist/providers/base-provider.d.ts.map +1 -0
  23. package/dist/providers/base-provider.js +407 -0
  24. package/dist/providers/base-provider.js.map +1 -0
  25. package/dist/providers/cohere-provider.d.ts +28 -0
  26. package/dist/providers/cohere-provider.d.ts.map +1 -0
  27. package/dist/providers/cohere-provider.js +407 -0
  28. package/dist/providers/cohere-provider.js.map +1 -0
  29. package/dist/providers/google-provider.d.ts +23 -0
  30. package/dist/providers/google-provider.d.ts.map +1 -0
  31. package/dist/providers/google-provider.js +362 -0
  32. package/dist/providers/google-provider.js.map +1 -0
  33. package/dist/providers/index.d.ts +14 -0
  34. package/dist/providers/index.d.ts.map +1 -0
  35. package/dist/providers/index.js +18 -0
  36. package/dist/providers/index.js.map +1 -0
  37. package/dist/providers/ollama-provider.d.ts +23 -0
  38. package/dist/providers/ollama-provider.d.ts.map +1 -0
  39. package/dist/providers/ollama-provider.js +374 -0
  40. package/dist/providers/ollama-provider.js.map +1 -0
  41. package/dist/providers/openai-provider.d.ts +23 -0
  42. package/dist/providers/openai-provider.d.ts.map +1 -0
  43. package/dist/providers/openai-provider.js +349 -0
  44. package/dist/providers/openai-provider.js.map +1 -0
  45. package/dist/providers/provider-manager.d.ts +139 -0
  46. package/dist/providers/provider-manager.d.ts.map +1 -0
  47. package/dist/providers/provider-manager.js +513 -0
  48. package/dist/providers/provider-manager.js.map +1 -0
  49. package/dist/providers/types.d.ts +356 -0
  50. package/dist/providers/types.d.ts.map +1 -0
  51. package/dist/providers/types.js +61 -0
  52. package/dist/providers/types.js.map +1 -0
  53. package/dist/providers/utils.d.ts +37 -0
  54. package/dist/providers/utils.d.ts.map +1 -0
  55. package/dist/providers/utils.js +322 -0
  56. package/dist/providers/utils.js.map +1 -0
  57. package/dist/services/agentic-flow-hooks/hook-manager.d.ts +70 -0
  58. package/dist/services/agentic-flow-hooks/hook-manager.d.ts.map +1 -0
  59. package/dist/services/agentic-flow-hooks/hook-manager.js +512 -0
  60. package/dist/services/agentic-flow-hooks/hook-manager.js.map +1 -0
  61. package/dist/services/agentic-flow-hooks/index.d.ts +36 -0
  62. package/dist/services/agentic-flow-hooks/index.d.ts.map +1 -0
  63. package/dist/services/agentic-flow-hooks/index.js +325 -0
  64. package/dist/services/agentic-flow-hooks/index.js.map +1 -0
  65. package/dist/services/agentic-flow-hooks/llm-hooks.d.ts +33 -0
  66. package/dist/services/agentic-flow-hooks/llm-hooks.d.ts.map +1 -0
  67. package/dist/services/agentic-flow-hooks/llm-hooks.js +415 -0
  68. package/dist/services/agentic-flow-hooks/llm-hooks.js.map +1 -0
  69. package/dist/services/agentic-flow-hooks/memory-hooks.d.ts +45 -0
  70. package/dist/services/agentic-flow-hooks/memory-hooks.d.ts.map +1 -0
  71. package/dist/services/agentic-flow-hooks/memory-hooks.js +532 -0
  72. package/dist/services/agentic-flow-hooks/memory-hooks.js.map +1 -0
  73. package/dist/services/agentic-flow-hooks/neural-hooks.d.ts +39 -0
  74. package/dist/services/agentic-flow-hooks/neural-hooks.d.ts.map +1 -0
  75. package/dist/services/agentic-flow-hooks/neural-hooks.js +561 -0
  76. package/dist/services/agentic-flow-hooks/neural-hooks.js.map +1 -0
  77. package/dist/services/agentic-flow-hooks/performance-hooks.d.ts +33 -0
  78. package/dist/services/agentic-flow-hooks/performance-hooks.d.ts.map +1 -0
  79. package/dist/services/agentic-flow-hooks/performance-hooks.js +621 -0
  80. package/dist/services/agentic-flow-hooks/performance-hooks.js.map +1 -0
  81. package/dist/services/agentic-flow-hooks/types.d.ts +379 -0
  82. package/dist/services/agentic-flow-hooks/types.d.ts.map +1 -0
  83. package/dist/services/agentic-flow-hooks/types.js +8 -0
  84. package/dist/services/agentic-flow-hooks/types.js.map +1 -0
  85. package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts +39 -0
  86. package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts.map +1 -0
  87. package/dist/services/agentic-flow-hooks/workflow-hooks.js +742 -0
  88. package/dist/services/agentic-flow-hooks/workflow-hooks.js.map +1 -0
  89. package/package.json +1 -1
  90. package/scripts/optimize-performance.js +400 -0
  91. package/scripts/performance-monitor.js +263 -0
  92. package/src/cli/help-text.js +1 -1
  93. package/src/cli/simple-cli.js +1 -1
  94. package/src/cli/simple-commands/hive-mind.js +1 -1
  95. package/src/providers/anthropic-provider.ts +282 -0
  96. package/src/providers/base-provider.ts +560 -0
  97. package/src/providers/cohere-provider.ts +521 -0
  98. package/src/providers/google-provider.ts +477 -0
  99. package/src/providers/index.ts +21 -0
  100. package/src/providers/ollama-provider.ts +489 -0
  101. package/src/providers/openai-provider.ts +476 -0
  102. package/src/providers/provider-manager.ts +654 -0
  103. package/src/providers/types.ts +531 -0
  104. package/src/providers/utils.ts +376 -0
  105. package/src/services/agentic-flow-hooks/hook-manager.ts +701 -0
  106. package/src/services/agentic-flow-hooks/index.ts +386 -0
  107. package/src/services/agentic-flow-hooks/llm-hooks.ts +557 -0
  108. package/src/services/agentic-flow-hooks/memory-hooks.ts +710 -0
  109. package/src/services/agentic-flow-hooks/neural-hooks.ts +758 -0
  110. package/src/services/agentic-flow-hooks/performance-hooks.ts +827 -0
  111. package/src/services/agentic-flow-hooks/types.ts +503 -0
  112. package/src/services/agentic-flow-hooks/workflow-hooks.ts +1026 -0
@@ -0,0 +1,476 @@
1
+ /**
2
+ * OpenAI Provider Implementation
3
+ * Supports GPT-4, GPT-3.5, and other OpenAI models
4
+ */
5
+
6
+ import { BaseProvider } from './base-provider.js';
7
+ import {
8
+ LLMProvider,
9
+ LLMModel,
10
+ LLMRequest,
11
+ LLMResponse,
12
+ LLMStreamEvent,
13
+ ModelInfo,
14
+ ProviderCapabilities,
15
+ HealthCheckResult,
16
+ LLMProviderError,
17
+ RateLimitError,
18
+ AuthenticationError,
19
+ ModelNotFoundError,
20
+ } from './types.js';
21
+
22
+ interface OpenAIRequest {
23
+ model: string;
24
+ messages: Array<{
25
+ role: 'system' | 'user' | 'assistant' | 'function';
26
+ content: string;
27
+ name?: string;
28
+ function_call?: {
29
+ name: string;
30
+ arguments: string;
31
+ };
32
+ }>;
33
+ temperature?: number;
34
+ max_tokens?: number;
35
+ top_p?: number;
36
+ frequency_penalty?: number;
37
+ presence_penalty?: number;
38
+ stop?: string[];
39
+ stream?: boolean;
40
+ functions?: Array<{
41
+ name: string;
42
+ description: string;
43
+ parameters: any;
44
+ }>;
45
+ function_call?: 'auto' | 'none' | { name: string };
46
+ }
47
+
48
+ interface OpenAIResponse {
49
+ id: string;
50
+ object: string;
51
+ created: number;
52
+ model: string;
53
+ choices: Array<{
54
+ index: number;
55
+ message: {
56
+ role: string;
57
+ content: string | null;
58
+ function_call?: {
59
+ name: string;
60
+ arguments: string;
61
+ };
62
+ };
63
+ finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter';
64
+ }>;
65
+ usage: {
66
+ prompt_tokens: number;
67
+ completion_tokens: number;
68
+ total_tokens: number;
69
+ };
70
+ }
71
+
72
+ interface OpenAIStreamChunk {
73
+ id: string;
74
+ object: string;
75
+ created: number;
76
+ model: string;
77
+ choices: Array<{
78
+ index: number;
79
+ delta: {
80
+ role?: string;
81
+ content?: string;
82
+ function_call?: {
83
+ name?: string;
84
+ arguments?: string;
85
+ };
86
+ };
87
+ finish_reason?: string;
88
+ }>;
89
+ }
90
+
91
+ export class OpenAIProvider extends BaseProvider {
92
+ readonly name: LLMProvider = 'openai';
93
+ readonly capabilities: ProviderCapabilities = {
94
+ supportedModels: [
95
+ 'gpt-4-turbo-preview',
96
+ 'gpt-4',
97
+ 'gpt-4-32k',
98
+ 'gpt-3.5-turbo',
99
+ 'gpt-3.5-turbo-16k',
100
+ ],
101
+ maxContextLength: {
102
+ 'gpt-4-turbo-preview': 128000,
103
+ 'gpt-4': 8192,
104
+ 'gpt-4-32k': 32768,
105
+ 'gpt-3.5-turbo': 4096,
106
+ 'gpt-3.5-turbo-16k': 16384,
107
+ } as Record<LLMModel, number>,
108
+ maxOutputTokens: {
109
+ 'gpt-4-turbo-preview': 4096,
110
+ 'gpt-4': 4096,
111
+ 'gpt-4-32k': 4096,
112
+ 'gpt-3.5-turbo': 4096,
113
+ 'gpt-3.5-turbo-16k': 4096,
114
+ } as Record<LLMModel, number>,
115
+ supportsStreaming: true,
116
+ supportsFunctionCalling: true,
117
+ supportsSystemMessages: true,
118
+ supportsVision: true, // GPT-4 with vision
119
+ supportsAudio: false,
120
+ supportsTools: true,
121
+ supportsFineTuning: true,
122
+ supportsEmbeddings: true,
123
+ supportsLogprobs: true,
124
+ supportsBatching: true,
125
+ rateLimit: {
126
+ requestsPerMinute: 3500,
127
+ tokensPerMinute: 90000,
128
+ concurrentRequests: 100,
129
+ },
130
+ pricing: {
131
+ 'gpt-4-turbo-preview': {
132
+ promptCostPer1k: 0.01,
133
+ completionCostPer1k: 0.03,
134
+ currency: 'USD',
135
+ },
136
+ 'gpt-4': {
137
+ promptCostPer1k: 0.03,
138
+ completionCostPer1k: 0.06,
139
+ currency: 'USD',
140
+ },
141
+ 'gpt-4-32k': {
142
+ promptCostPer1k: 0.06,
143
+ completionCostPer1k: 0.12,
144
+ currency: 'USD',
145
+ },
146
+ 'gpt-3.5-turbo': {
147
+ promptCostPer1k: 0.0005,
148
+ completionCostPer1k: 0.0015,
149
+ currency: 'USD',
150
+ },
151
+ 'gpt-3.5-turbo-16k': {
152
+ promptCostPer1k: 0.003,
153
+ completionCostPer1k: 0.004,
154
+ currency: 'USD',
155
+ },
156
+ },
157
+ };
158
+
159
+ private baseUrl: string;
160
+ private headers: Record<string, string>;
161
+
162
+ protected async doInitialize(): Promise<void> {
163
+ if (!this.config.apiKey) {
164
+ throw new AuthenticationError('OpenAI API key is required', 'openai');
165
+ }
166
+
167
+ this.baseUrl = this.config.apiUrl || 'https://api.openai.com/v1';
168
+ this.headers = {
169
+ 'Authorization': `Bearer ${this.config.apiKey}`,
170
+ 'Content-Type': 'application/json',
171
+ };
172
+
173
+ // Add organization header if provided
174
+ if (this.config.providerOptions?.organization) {
175
+ this.headers['OpenAI-Organization'] = this.config.providerOptions.organization;
176
+ }
177
+ }
178
+
179
+ protected async doComplete(request: LLMRequest): Promise<LLMResponse> {
180
+ const openAIRequest: OpenAIRequest = {
181
+ model: this.mapToOpenAIModel(request.model || this.config.model),
182
+ messages: request.messages.map((msg) => ({
183
+ role: msg.role,
184
+ content: msg.content,
185
+ ...(msg.name && { name: msg.name }),
186
+ ...(msg.functionCall && { function_call: msg.functionCall }),
187
+ })),
188
+ temperature: request.temperature ?? this.config.temperature,
189
+ max_tokens: request.maxTokens ?? this.config.maxTokens,
190
+ top_p: request.topP ?? this.config.topP,
191
+ frequency_penalty: request.frequencyPenalty ?? this.config.frequencyPenalty,
192
+ presence_penalty: request.presencePenalty ?? this.config.presencePenalty,
193
+ stop: request.stopSequences ?? this.config.stopSequences,
194
+ stream: false,
195
+ };
196
+
197
+ // Add function calling if present
198
+ if (request.functions) {
199
+ openAIRequest.functions = request.functions;
200
+ openAIRequest.function_call = request.functionCall;
201
+ }
202
+
203
+ const controller = new AbortController();
204
+ const timeout = setTimeout(() => controller.abort(), this.config.timeout || 60000);
205
+
206
+ try {
207
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
208
+ method: 'POST',
209
+ headers: this.headers,
210
+ body: JSON.stringify(openAIRequest),
211
+ signal: controller.signal,
212
+ });
213
+
214
+ clearTimeout(timeout);
215
+
216
+ if (!response.ok) {
217
+ await this.handleErrorResponse(response);
218
+ }
219
+
220
+ const data: OpenAIResponse = await response.json();
221
+ const choice = data.choices[0];
222
+
223
+ // Calculate cost
224
+ const model = request.model || this.config.model;
225
+ const pricing = this.capabilities.pricing![model];
226
+ const promptCost = (data.usage.prompt_tokens / 1000) * pricing.promptCostPer1k;
227
+ const completionCost = (data.usage.completion_tokens / 1000) * pricing.completionCostPer1k;
228
+
229
+ return {
230
+ id: data.id,
231
+ model: this.mapFromOpenAIModel(data.model),
232
+ provider: 'openai',
233
+ content: choice.message.content || '',
234
+ functionCall: choice.message.function_call,
235
+ usage: {
236
+ promptTokens: data.usage.prompt_tokens,
237
+ completionTokens: data.usage.completion_tokens,
238
+ totalTokens: data.usage.total_tokens,
239
+ },
240
+ cost: {
241
+ promptCost,
242
+ completionCost,
243
+ totalCost: promptCost + completionCost,
244
+ currency: 'USD',
245
+ },
246
+ finishReason: choice.finish_reason,
247
+ };
248
+ } catch (error) {
249
+ clearTimeout(timeout);
250
+ throw this.transformError(error);
251
+ }
252
+ }
253
+
254
+ protected async *doStreamComplete(request: LLMRequest): AsyncIterable<LLMStreamEvent> {
255
+ const openAIRequest: OpenAIRequest = {
256
+ model: this.mapToOpenAIModel(request.model || this.config.model),
257
+ messages: request.messages.map((msg) => ({
258
+ role: msg.role,
259
+ content: msg.content,
260
+ ...(msg.name && { name: msg.name }),
261
+ ...(msg.functionCall && { function_call: msg.functionCall }),
262
+ })),
263
+ temperature: request.temperature ?? this.config.temperature,
264
+ max_tokens: request.maxTokens ?? this.config.maxTokens,
265
+ top_p: request.topP ?? this.config.topP,
266
+ frequency_penalty: request.frequencyPenalty ?? this.config.frequencyPenalty,
267
+ presence_penalty: request.presencePenalty ?? this.config.presencePenalty,
268
+ stop: request.stopSequences ?? this.config.stopSequences,
269
+ stream: true,
270
+ };
271
+
272
+ if (request.functions) {
273
+ openAIRequest.functions = request.functions;
274
+ openAIRequest.function_call = request.functionCall;
275
+ }
276
+
277
+ const controller = new AbortController();
278
+ const timeout = setTimeout(() => controller.abort(), (this.config.timeout || 60000) * 2);
279
+
280
+ try {
281
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
282
+ method: 'POST',
283
+ headers: this.headers,
284
+ body: JSON.stringify(openAIRequest),
285
+ signal: controller.signal,
286
+ });
287
+
288
+ if (!response.ok) {
289
+ await this.handleErrorResponse(response);
290
+ }
291
+
292
+ const reader = response.body!.getReader();
293
+ const decoder = new TextDecoder();
294
+ let buffer = '';
295
+ let totalPromptTokens = 0;
296
+ let totalCompletionTokens = 0;
297
+
298
+ while (true) {
299
+ const { done, value } = await reader.read();
300
+ if (done) break;
301
+
302
+ buffer += decoder.decode(value, { stream: true });
303
+ const lines = buffer.split('\n');
304
+ buffer = lines.pop() || '';
305
+
306
+ for (const line of lines) {
307
+ if (line.startsWith('data: ')) {
308
+ const data = line.slice(6);
309
+ if (data === '[DONE]') continue;
310
+
311
+ try {
312
+ const chunk: OpenAIStreamChunk = JSON.parse(data);
313
+ const delta = chunk.choices[0].delta;
314
+
315
+ if (delta.content) {
316
+ yield {
317
+ type: 'content',
318
+ delta: { content: delta.content },
319
+ };
320
+ }
321
+
322
+ if (delta.function_call) {
323
+ yield {
324
+ type: 'function_call',
325
+ delta: { functionCall: delta.function_call },
326
+ };
327
+ }
328
+
329
+ if (chunk.choices[0].finish_reason) {
330
+ // Estimate tokens for streaming
331
+ const promptTokens = this.estimateTokens(JSON.stringify(request.messages));
332
+ const completionTokens = Math.max(totalCompletionTokens, 100); // Minimum estimate
333
+
334
+ const model = request.model || this.config.model;
335
+ const pricing = this.capabilities.pricing![model];
336
+ const promptCost = (promptTokens / 1000) * pricing.promptCostPer1k;
337
+ const completionCost = (completionTokens / 1000) * pricing.completionCostPer1k;
338
+
339
+ yield {
340
+ type: 'done',
341
+ usage: {
342
+ promptTokens,
343
+ completionTokens,
344
+ totalTokens: promptTokens + completionTokens,
345
+ },
346
+ cost: {
347
+ promptCost,
348
+ completionCost,
349
+ totalCost: promptCost + completionCost,
350
+ currency: 'USD',
351
+ },
352
+ };
353
+ }
354
+ } catch (e) {
355
+ this.logger.warn('Failed to parse OpenAI stream chunk', { data, error: e });
356
+ }
357
+ }
358
+ }
359
+ }
360
+ } catch (error) {
361
+ clearTimeout(timeout);
362
+ throw this.transformError(error);
363
+ } finally {
364
+ clearTimeout(timeout);
365
+ }
366
+ }
367
+
368
+ async listModels(): Promise<LLMModel[]> {
369
+ return this.capabilities.supportedModels;
370
+ }
371
+
372
+ async getModelInfo(model: LLMModel): Promise<ModelInfo> {
373
+ return {
374
+ model,
375
+ name: model,
376
+ description: this.getModelDescription(model),
377
+ contextLength: this.capabilities.maxContextLength[model] || 4096,
378
+ maxOutputTokens: this.capabilities.maxOutputTokens[model] || 4096,
379
+ supportedFeatures: [
380
+ 'chat',
381
+ 'completion',
382
+ 'function_calling',
383
+ ...(model.includes('gpt-4') ? ['vision'] : []),
384
+ ],
385
+ pricing: this.capabilities.pricing![model],
386
+ };
387
+ }
388
+
389
+ protected async doHealthCheck(): Promise<HealthCheckResult> {
390
+ try {
391
+ const response = await fetch(`${this.baseUrl}/models`, {
392
+ headers: this.headers,
393
+ });
394
+
395
+ if (!response.ok) {
396
+ throw new Error(`Health check failed: ${response.status}`);
397
+ }
398
+
399
+ return {
400
+ healthy: true,
401
+ timestamp: new Date(),
402
+ };
403
+ } catch (error) {
404
+ return {
405
+ healthy: false,
406
+ error: error instanceof Error ? error.message : 'Unknown error',
407
+ timestamp: new Date(),
408
+ };
409
+ }
410
+ }
411
+
412
+ private mapToOpenAIModel(model: LLMModel): string {
413
+ // Map our model names to OpenAI model names if needed
414
+ const modelMap: Record<string, string> = {
415
+ 'gpt-4-turbo-preview': 'gpt-4-turbo-preview',
416
+ 'gpt-4': 'gpt-4',
417
+ 'gpt-4-32k': 'gpt-4-32k',
418
+ 'gpt-3.5-turbo': 'gpt-3.5-turbo',
419
+ 'gpt-3.5-turbo-16k': 'gpt-3.5-turbo-16k',
420
+ };
421
+ return modelMap[model] || model;
422
+ }
423
+
424
+ private mapFromOpenAIModel(model: string): LLMModel {
425
+ // Ensure the model is in our supported list
426
+ return this.capabilities.supportedModels.find((m) => m === model) || 'gpt-3.5-turbo';
427
+ }
428
+
429
+ private getModelDescription(model: LLMModel): string {
430
+ const descriptions: Record<string, string> = {
431
+ 'gpt-4-turbo-preview': 'Latest GPT-4 Turbo model with improved performance',
432
+ 'gpt-4': 'Most capable GPT-4 model for complex tasks',
433
+ 'gpt-4-32k': 'GPT-4 with extended 32k context window',
434
+ 'gpt-3.5-turbo': 'Fast and efficient model for most tasks',
435
+ 'gpt-3.5-turbo-16k': 'GPT-3.5 Turbo with extended context',
436
+ };
437
+ return descriptions[model] || 'OpenAI language model';
438
+ }
439
+
440
+ private async handleErrorResponse(response: Response): Promise<void> {
441
+ const errorText = await response.text();
442
+ let errorData: any;
443
+
444
+ try {
445
+ errorData = JSON.parse(errorText);
446
+ } catch {
447
+ errorData = { error: { message: errorText } };
448
+ }
449
+
450
+ const message = errorData.error?.message || 'Unknown error';
451
+
452
+ switch (response.status) {
453
+ case 401:
454
+ throw new AuthenticationError(message, 'openai', errorData);
455
+ case 429:
456
+ const retryAfter = response.headers.get('retry-after');
457
+ throw new RateLimitError(
458
+ message,
459
+ 'openai',
460
+ retryAfter ? parseInt(retryAfter) : undefined,
461
+ errorData
462
+ );
463
+ case 404:
464
+ throw new ModelNotFoundError(this.config.model, 'openai', errorData);
465
+ default:
466
+ throw new LLMProviderError(
467
+ message,
468
+ `OPENAI_${response.status}`,
469
+ 'openai',
470
+ response.status,
471
+ response.status >= 500,
472
+ errorData
473
+ );
474
+ }
475
+ }
476
+ }