@animalabs/membrane 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/dist/context/index.d.ts +10 -0
  2. package/dist/context/index.d.ts.map +1 -0
  3. package/dist/context/index.js +9 -0
  4. package/dist/context/index.js.map +1 -0
  5. package/dist/context/process.d.ts +22 -0
  6. package/dist/context/process.d.ts.map +1 -0
  7. package/dist/context/process.js +369 -0
  8. package/dist/context/process.js.map +1 -0
  9. package/dist/context/types.d.ts +118 -0
  10. package/dist/context/types.d.ts.map +1 -0
  11. package/dist/context/types.js +60 -0
  12. package/dist/context/types.js.map +1 -0
  13. package/dist/index.d.ts +12 -0
  14. package/dist/index.d.ts.map +1 -0
  15. package/dist/index.js +18 -0
  16. package/dist/index.js.map +1 -0
  17. package/dist/membrane.d.ts +96 -0
  18. package/dist/membrane.d.ts.map +1 -0
  19. package/dist/membrane.js +893 -0
  20. package/dist/membrane.js.map +1 -0
  21. package/dist/providers/anthropic.d.ts +36 -0
  22. package/dist/providers/anthropic.d.ts.map +1 -0
  23. package/dist/providers/anthropic.js +265 -0
  24. package/dist/providers/anthropic.js.map +1 -0
  25. package/dist/providers/index.d.ts +8 -0
  26. package/dist/providers/index.d.ts.map +1 -0
  27. package/dist/providers/index.js +8 -0
  28. package/dist/providers/index.js.map +1 -0
  29. package/dist/providers/openai-compatible.d.ts +74 -0
  30. package/dist/providers/openai-compatible.d.ts.map +1 -0
  31. package/dist/providers/openai-compatible.js +412 -0
  32. package/dist/providers/openai-compatible.js.map +1 -0
  33. package/dist/providers/openai.d.ts +69 -0
  34. package/dist/providers/openai.d.ts.map +1 -0
  35. package/dist/providers/openai.js +455 -0
  36. package/dist/providers/openai.js.map +1 -0
  37. package/dist/providers/openrouter.d.ts +76 -0
  38. package/dist/providers/openrouter.d.ts.map +1 -0
  39. package/dist/providers/openrouter.js +492 -0
  40. package/dist/providers/openrouter.js.map +1 -0
  41. package/dist/transforms/chat.d.ts +52 -0
  42. package/dist/transforms/chat.d.ts.map +1 -0
  43. package/dist/transforms/chat.js +136 -0
  44. package/dist/transforms/chat.js.map +1 -0
  45. package/dist/transforms/index.d.ts +6 -0
  46. package/dist/transforms/index.d.ts.map +1 -0
  47. package/dist/transforms/index.js +6 -0
  48. package/dist/transforms/index.js.map +1 -0
  49. package/dist/transforms/prefill.d.ts +89 -0
  50. package/dist/transforms/prefill.d.ts.map +1 -0
  51. package/dist/transforms/prefill.js +401 -0
  52. package/dist/transforms/prefill.js.map +1 -0
  53. package/dist/types/config.d.ts +103 -0
  54. package/dist/types/config.d.ts.map +1 -0
  55. package/dist/types/config.js +21 -0
  56. package/dist/types/config.js.map +1 -0
  57. package/dist/types/content.d.ts +81 -0
  58. package/dist/types/content.d.ts.map +1 -0
  59. package/dist/types/content.js +40 -0
  60. package/dist/types/content.js.map +1 -0
  61. package/dist/types/errors.d.ts +42 -0
  62. package/dist/types/errors.d.ts.map +1 -0
  63. package/dist/types/errors.js +208 -0
  64. package/dist/types/errors.js.map +1 -0
  65. package/dist/types/index.d.ts +18 -0
  66. package/dist/types/index.d.ts.map +1 -0
  67. package/dist/types/index.js +9 -0
  68. package/dist/types/index.js.map +1 -0
  69. package/dist/types/message.d.ts +46 -0
  70. package/dist/types/message.d.ts.map +1 -0
  71. package/dist/types/message.js +38 -0
  72. package/dist/types/message.js.map +1 -0
  73. package/dist/types/provider.d.ts +155 -0
  74. package/dist/types/provider.d.ts.map +1 -0
  75. package/dist/types/provider.js +5 -0
  76. package/dist/types/provider.js.map +1 -0
  77. package/dist/types/request.d.ts +78 -0
  78. package/dist/types/request.d.ts.map +1 -0
  79. package/dist/types/request.js +5 -0
  80. package/dist/types/request.js.map +1 -0
  81. package/dist/types/response.d.ts +131 -0
  82. package/dist/types/response.d.ts.map +1 -0
  83. package/dist/types/response.js +7 -0
  84. package/dist/types/response.js.map +1 -0
  85. package/dist/types/streaming.d.ts +164 -0
  86. package/dist/types/streaming.d.ts.map +1 -0
  87. package/dist/types/streaming.js +5 -0
  88. package/dist/types/streaming.js.map +1 -0
  89. package/dist/types/tools.d.ts +71 -0
  90. package/dist/types/tools.d.ts.map +1 -0
  91. package/dist/types/tools.js +5 -0
  92. package/dist/types/tools.js.map +1 -0
  93. package/dist/utils/index.d.ts +5 -0
  94. package/dist/utils/index.d.ts.map +1 -0
  95. package/dist/utils/index.js +5 -0
  96. package/dist/utils/index.js.map +1 -0
  97. package/dist/utils/stream-parser.d.ts +53 -0
  98. package/dist/utils/stream-parser.d.ts.map +1 -0
  99. package/dist/utils/stream-parser.js +359 -0
  100. package/dist/utils/stream-parser.js.map +1 -0
  101. package/dist/utils/tool-parser.d.ts +130 -0
  102. package/dist/utils/tool-parser.d.ts.map +1 -0
  103. package/dist/utils/tool-parser.js +571 -0
  104. package/dist/utils/tool-parser.js.map +1 -0
  105. package/package.json +37 -0
  106. package/src/context/index.ts +24 -0
  107. package/src/context/process.ts +520 -0
  108. package/src/context/types.ts +231 -0
  109. package/src/index.ts +23 -0
  110. package/src/membrane.ts +1174 -0
  111. package/src/providers/anthropic.ts +340 -0
  112. package/src/providers/index.ts +31 -0
  113. package/src/providers/openai-compatible.ts +570 -0
  114. package/src/providers/openai.ts +625 -0
  115. package/src/providers/openrouter.ts +662 -0
  116. package/src/transforms/chat.ts +212 -0
  117. package/src/transforms/index.ts +22 -0
  118. package/src/transforms/prefill.ts +585 -0
  119. package/src/types/config.ts +172 -0
  120. package/src/types/content.ts +181 -0
  121. package/src/types/errors.ts +277 -0
  122. package/src/types/index.ts +154 -0
  123. package/src/types/message.ts +89 -0
  124. package/src/types/provider.ts +249 -0
  125. package/src/types/request.ts +131 -0
  126. package/src/types/response.ts +223 -0
  127. package/src/types/streaming.ts +231 -0
  128. package/src/types/tools.ts +92 -0
  129. package/src/utils/index.ts +15 -0
  130. package/src/utils/stream-parser.ts +440 -0
  131. package/src/utils/tool-parser.ts +715 -0
@@ -0,0 +1,625 @@
1
+ /**
2
+ * OpenAI Direct provider adapter
3
+ *
4
+ * Direct adapter for OpenAI's API with support for modern models:
5
+ * - GPT-4o, GPT-4 Turbo
6
+ * - GPT-5, GPT-5-mini (uses max_completion_tokens)
7
+ * - o1, o3, o4-mini reasoning models
8
+ *
9
+ * Key differences from generic OpenAI-compatible:
10
+ * - Uses max_completion_tokens for newer models (not max_tokens)
11
+ * - Handles reasoning models' special requirements
12
+ * - Direct API integration with proper error handling
13
+ */
14
+
15
+ import type {
16
+ ProviderAdapter,
17
+ ProviderRequest,
18
+ ProviderRequestOptions,
19
+ ProviderResponse,
20
+ StreamCallbacks,
21
+ ContentBlock,
22
+ } from '../types/index.js';
23
+ import {
24
+ MembraneError,
25
+ rateLimitError,
26
+ contextLengthError,
27
+ authError,
28
+ serverError,
29
+ abortError,
30
+ networkError,
31
+ } from '../types/index.js';
32
+
33
+ // ============================================================================
34
+ // Types
35
+ // ============================================================================
36
+
37
+ interface OpenAIMessage {
38
+ role: 'user' | 'assistant' | 'system' | 'tool';
39
+ content?: string | null;
40
+ tool_calls?: OpenAIToolCall[];
41
+ tool_call_id?: string;
42
+ }
43
+
44
+ interface OpenAIToolCall {
45
+ id: string;
46
+ type: 'function';
47
+ function: {
48
+ name: string;
49
+ arguments: string;
50
+ };
51
+ }
52
+
53
+ interface OpenAITool {
54
+ type: 'function';
55
+ function: {
56
+ name: string;
57
+ description: string;
58
+ parameters: Record<string, unknown>;
59
+ };
60
+ }
61
+
62
+ interface OpenAIResponse {
63
+ id: string;
64
+ model: string;
65
+ choices: {
66
+ index: number;
67
+ message: OpenAIMessage;
68
+ finish_reason: string;
69
+ }[];
70
+ usage?: {
71
+ prompt_tokens: number;
72
+ completion_tokens: number;
73
+ total_tokens: number;
74
+ /** OpenAI prompt caching details (automatic for prompts ≥1024 tokens) */
75
+ prompt_tokens_details?: {
76
+ cached_tokens?: number;
77
+ audio_tokens?: number;
78
+ };
79
+ completion_tokens_details?: {
80
+ reasoning_tokens?: number;
81
+ audio_tokens?: number;
82
+ };
83
+ };
84
+ }
85
+
86
+ // ============================================================================
87
+ // Adapter Configuration
88
+ // ============================================================================
89
+
90
+ export interface OpenAIAdapterConfig {
91
+ /** API key (defaults to OPENAI_API_KEY env var) */
92
+ apiKey?: string;
93
+
94
+ /** Base URL (default: https://api.openai.com/v1) - useful for Azure OpenAI */
95
+ baseURL?: string;
96
+
97
+ /** Organization ID (optional) */
98
+ organization?: string;
99
+
100
+ /** Default max tokens */
101
+ defaultMaxTokens?: number;
102
+ }
103
+
104
+ // ============================================================================
105
+ // Model Detection Helpers
106
+ // ============================================================================
107
+
108
+ /**
109
+ * Models that require max_completion_tokens instead of max_tokens
110
+ */
111
+ const COMPLETION_TOKENS_MODELS = [
112
+ 'gpt-5',
113
+ 'gpt-5-mini',
114
+ 'o1',
115
+ 'o1-mini',
116
+ 'o1-preview',
117
+ 'o3',
118
+ 'o3-mini',
119
+ 'o4-mini',
120
+ ];
121
+
122
+ /**
123
+ * Check if a model requires max_completion_tokens parameter
124
+ */
125
+ function requiresCompletionTokens(model: string): boolean {
126
+ return COMPLETION_TOKENS_MODELS.some(prefix => model.startsWith(prefix));
127
+ }
128
+
129
+ /**
130
+ * Models that don't support custom temperature (only default 1.0)
131
+ */
132
+ const NO_TEMPERATURE_MODELS = [
133
+ 'gpt-5', // Base GPT-5 models
134
+ 'gpt-5-mini',
135
+ 'o1', // Reasoning models
136
+ 'o1-mini',
137
+ 'o1-preview',
138
+ 'o3',
139
+ 'o3-mini',
140
+ 'o4-mini',
141
+ ];
142
+
143
+ /**
144
+ * Check if a model doesn't support custom temperature
145
+ */
146
+ function noTemperatureSupport(model: string): boolean {
147
+ return NO_TEMPERATURE_MODELS.some(prefix => model.startsWith(prefix));
148
+ }
149
+
150
+ /**
151
+ * Models that don't support stop sequences (reasoning models)
152
+ */
153
+ const NO_STOP_MODELS = [
154
+ 'o1', // Reasoning models don't support stop sequences
155
+ 'o1-mini',
156
+ 'o1-preview',
157
+ 'o3',
158
+ 'o3-mini',
159
+ 'o4-mini',
160
+ ];
161
+
162
+ /**
163
+ * Check if a model doesn't support stop sequences
164
+ */
165
+ function noStopSupport(model: string): boolean {
166
+ return NO_STOP_MODELS.some(prefix => model.startsWith(prefix));
167
+ }
168
+
169
+ // ============================================================================
170
+ // OpenAI Adapter
171
+ // ============================================================================
172
+
173
+ export class OpenAIAdapter implements ProviderAdapter {
174
+ readonly name = 'openai';
175
+ private apiKey: string;
176
+ private baseURL: string;
177
+ private organization?: string;
178
+ private defaultMaxTokens: number;
179
+
180
+ constructor(config: OpenAIAdapterConfig = {}) {
181
+ this.apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? '';
182
+ this.baseURL = config.baseURL ?? 'https://api.openai.com/v1';
183
+ this.organization = config.organization;
184
+ this.defaultMaxTokens = config.defaultMaxTokens ?? 4096;
185
+
186
+ if (!this.apiKey) {
187
+ throw new Error('OpenAI API key not provided');
188
+ }
189
+ }
190
+
191
+ supportsModel(modelId: string): boolean {
192
+ // OpenAI models typically start with gpt-, o1, o3, o4
193
+ return (
194
+ modelId.startsWith('gpt-') ||
195
+ modelId.startsWith('o1') ||
196
+ modelId.startsWith('o3') ||
197
+ modelId.startsWith('o4')
198
+ );
199
+ }
200
+
201
+ async complete(
202
+ request: ProviderRequest,
203
+ options?: ProviderRequestOptions
204
+ ): Promise<ProviderResponse> {
205
+ const openAIRequest = this.buildRequest(request);
206
+
207
+ try {
208
+ const response = await this.makeRequest(openAIRequest, options);
209
+ return this.parseResponse(response, request.model);
210
+ } catch (error) {
211
+ throw this.handleError(error);
212
+ }
213
+ }
214
+
215
+ async stream(
216
+ request: ProviderRequest,
217
+ callbacks: StreamCallbacks,
218
+ options?: ProviderRequestOptions
219
+ ): Promise<ProviderResponse> {
220
+ const openAIRequest = this.buildRequest(request);
221
+ openAIRequest.stream = true;
222
+ // Request usage data in stream for cache metrics
223
+ openAIRequest.stream_options = { include_usage: true };
224
+
225
+ try {
226
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
227
+ method: 'POST',
228
+ headers: this.getHeaders(),
229
+ body: JSON.stringify(openAIRequest),
230
+ signal: options?.signal,
231
+ });
232
+
233
+ if (!response.ok) {
234
+ const errorText = await response.text();
235
+ throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
236
+ }
237
+
238
+ const reader = response.body?.getReader();
239
+ if (!reader) {
240
+ throw new Error('No response body');
241
+ }
242
+
243
+ const decoder = new TextDecoder();
244
+ let accumulated = '';
245
+ let finishReason = 'stop';
246
+ let toolCalls: OpenAIToolCall[] = [];
247
+ let streamUsage: OpenAIResponse['usage'] | undefined;
248
+
249
+ while (true) {
250
+ const { done, value } = await reader.read();
251
+ if (done) break;
252
+
253
+ const chunk = decoder.decode(value, { stream: true });
254
+ const lines = chunk.split('\n').filter(line => line.startsWith('data: '));
255
+
256
+ for (const line of lines) {
257
+ const data = line.slice(6);
258
+ if (data === '[DONE]') continue;
259
+
260
+ try {
261
+ const parsed = JSON.parse(data);
262
+ const delta = parsed.choices?.[0]?.delta;
263
+
264
+ if (delta?.content) {
265
+ accumulated += delta.content;
266
+ callbacks.onChunk(delta.content);
267
+ }
268
+
269
+ // Handle streaming tool calls
270
+ if (delta?.tool_calls) {
271
+ for (const tc of delta.tool_calls) {
272
+ const index = tc.index ?? 0;
273
+ if (!toolCalls[index]) {
274
+ toolCalls[index] = {
275
+ id: tc.id ?? '',
276
+ type: 'function',
277
+ function: { name: '', arguments: '' },
278
+ };
279
+ }
280
+ if (tc.id) toolCalls[index].id = tc.id;
281
+ if (tc.function?.name) toolCalls[index].function.name = tc.function.name;
282
+ if (tc.function?.arguments) {
283
+ toolCalls[index].function.arguments += tc.function.arguments;
284
+ }
285
+ }
286
+ }
287
+
288
+ if (parsed.choices?.[0]?.finish_reason) {
289
+ finishReason = parsed.choices[0].finish_reason;
290
+ }
291
+
292
+ // Capture usage data (comes in final chunk with stream_options.include_usage)
293
+ if (parsed.usage) {
294
+ streamUsage = parsed.usage;
295
+ }
296
+ } catch {
297
+ // Ignore parse errors in stream
298
+ }
299
+ }
300
+ }
301
+
302
+ // Build response with accumulated data
303
+ const message: OpenAIMessage = {
304
+ role: 'assistant',
305
+ content: accumulated || null,
306
+ };
307
+
308
+ if (toolCalls.length > 0) {
309
+ message.tool_calls = toolCalls;
310
+ }
311
+
312
+ return this.parseStreamedResponse(message, finishReason, request.model, streamUsage);
313
+
314
+ } catch (error) {
315
+ throw this.handleError(error);
316
+ }
317
+ }
318
+
319
+ private getHeaders(): Record<string, string> {
320
+ const headers: Record<string, string> = {
321
+ 'Authorization': `Bearer ${this.apiKey}`,
322
+ 'Content-Type': 'application/json',
323
+ };
324
+
325
+ if (this.organization) {
326
+ headers['OpenAI-Organization'] = this.organization;
327
+ }
328
+
329
+ return headers;
330
+ }
331
+
332
+ private buildRequest(request: ProviderRequest): any {
333
+ const messages = this.convertMessages(request.messages as any[]);
334
+ const model = request.model;
335
+ const maxTokens = request.maxTokens || this.defaultMaxTokens;
336
+
337
+ const params: any = {
338
+ model,
339
+ messages,
340
+ };
341
+
342
+ // Use appropriate max tokens parameter based on model
343
+ if (requiresCompletionTokens(model)) {
344
+ params.max_completion_tokens = maxTokens;
345
+ } else {
346
+ params.max_tokens = maxTokens;
347
+ }
348
+
349
+ // Some models (gpt-5, o1, o3, o4) don't support custom temperature
350
+ if (request.temperature !== undefined && !noTemperatureSupport(model)) {
351
+ params.temperature = request.temperature;
352
+ }
353
+
354
+ // Reasoning models (o1, o3, o4) don't support stop sequences
355
+ if (request.stopSequences && request.stopSequences.length > 0 && !noStopSupport(model)) {
356
+ params.stop = request.stopSequences;
357
+ }
358
+
359
+ if (request.tools && request.tools.length > 0) {
360
+ params.tools = this.convertTools(request.tools as any[]);
361
+ }
362
+
363
+ // Apply extra params (can override automatic choices)
364
+ if (request.extra) {
365
+ Object.assign(params, request.extra);
366
+ }
367
+
368
+ return params;
369
+ }
370
+
371
+ private convertMessages(messages: any[]): OpenAIMessage[] {
372
+ // Use flatMap to handle one-to-many expansion (multiple tool_results → multiple messages)
373
+ return messages.flatMap(msg => {
374
+ // If it's already in OpenAI format, pass through
375
+ if (msg.role && (typeof msg.content === 'string' || msg.content === null || msg.tool_calls)) {
376
+ return [msg as OpenAIMessage];
377
+ }
378
+
379
+ // Convert from Anthropic-style format
380
+ if (Array.isArray(msg.content)) {
381
+ const textParts: string[] = [];
382
+ const toolCalls: OpenAIToolCall[] = [];
383
+ const toolResults: OpenAIMessage[] = [];
384
+
385
+ for (const block of msg.content) {
386
+ if (block.type === 'text') {
387
+ textParts.push(block.text);
388
+ } else if (block.type === 'tool_use') {
389
+ toolCalls.push({
390
+ id: block.id,
391
+ type: 'function',
392
+ function: {
393
+ name: block.name,
394
+ arguments: JSON.stringify(block.input),
395
+ },
396
+ });
397
+ } else if (block.type === 'tool_result') {
398
+ // Collect ALL tool results - each becomes a separate message
399
+ toolResults.push({
400
+ role: 'tool' as const,
401
+ tool_call_id: block.tool_use_id || block.toolUseId,
402
+ content: typeof block.content === 'string' ? block.content : JSON.stringify(block.content),
403
+ });
404
+ }
405
+ }
406
+
407
+ // If we have tool results, return them (possibly multiple)
408
+ if (toolResults.length > 0) {
409
+ return toolResults;
410
+ }
411
+
412
+ // Otherwise build normal message
413
+ const result: OpenAIMessage = {
414
+ role: msg.role,
415
+ content: textParts.join('\n') || null,
416
+ };
417
+
418
+ if (toolCalls.length > 0) {
419
+ result.tool_calls = toolCalls;
420
+ }
421
+
422
+ return [result];
423
+ }
424
+
425
+ return [{
426
+ role: msg.role,
427
+ content: msg.content,
428
+ }];
429
+ });
430
+ }
431
+
432
+ private convertTools(tools: any[]): OpenAITool[] {
433
+ return tools.map(tool => {
434
+ const inputSchema = tool.inputSchema || tool.input_schema || { type: 'object', properties: {} };
435
+
436
+ return {
437
+ type: 'function',
438
+ function: {
439
+ name: tool.name,
440
+ description: tool.description,
441
+ parameters: inputSchema,
442
+ },
443
+ };
444
+ });
445
+ }
446
+
447
+ private async makeRequest(request: any, options?: ProviderRequestOptions): Promise<OpenAIResponse> {
448
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
449
+ method: 'POST',
450
+ headers: this.getHeaders(),
451
+ body: JSON.stringify(request),
452
+ signal: options?.signal,
453
+ });
454
+
455
+ if (!response.ok) {
456
+ const errorText = await response.text();
457
+ throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
458
+ }
459
+
460
+ return response.json() as Promise<OpenAIResponse>;
461
+ }
462
+
463
+ private parseResponse(response: OpenAIResponse, requestedModel: string): ProviderResponse {
464
+ const choice = response.choices[0];
465
+ const message = choice?.message;
466
+
467
+ // Extract prompt caching details (OpenAI automatic caching for prompts ≥1024 tokens)
468
+ const cachedTokens = response.usage?.prompt_tokens_details?.cached_tokens ?? 0;
469
+
470
+ return {
471
+ content: this.messageToContent(message),
472
+ stopReason: this.mapFinishReason(choice?.finish_reason),
473
+ stopSequence: undefined,
474
+ usage: {
475
+ inputTokens: response.usage?.prompt_tokens ?? 0,
476
+ outputTokens: response.usage?.completion_tokens ?? 0,
477
+ // OpenAI's automatic prompt caching - cached tokens are read from cache
478
+ // Note: OpenAI doesn't have separate "creation" tokens - it's automatic
479
+ cacheReadTokens: cachedTokens > 0 ? cachedTokens : undefined,
480
+ },
481
+ model: response.model ?? requestedModel,
482
+ raw: response,
483
+ };
484
+ }
485
+
486
+ private parseStreamedResponse(
487
+ message: OpenAIMessage,
488
+ finishReason: string,
489
+ requestedModel: string,
490
+ streamUsage?: OpenAIResponse['usage']
491
+ ): ProviderResponse {
492
+ // Extract cached tokens from stream usage if available
493
+ const cachedTokens = streamUsage?.prompt_tokens_details?.cached_tokens ?? 0;
494
+
495
+ return {
496
+ content: this.messageToContent(message),
497
+ stopReason: this.mapFinishReason(finishReason),
498
+ stopSequence: undefined,
499
+ usage: {
500
+ inputTokens: streamUsage?.prompt_tokens ?? 0,
501
+ outputTokens: streamUsage?.completion_tokens ?? 0,
502
+ cacheReadTokens: cachedTokens > 0 ? cachedTokens : undefined,
503
+ },
504
+ model: requestedModel,
505
+ raw: { message, finish_reason: finishReason, usage: streamUsage },
506
+ };
507
+ }
508
+
509
+ private messageToContent(message: OpenAIMessage | undefined): ContentBlock[] {
510
+ if (!message) return [];
511
+
512
+ const content: ContentBlock[] = [];
513
+
514
+ if (message.content) {
515
+ content.push({ type: 'text', text: message.content });
516
+ }
517
+
518
+ if (message.tool_calls) {
519
+ for (const tc of message.tool_calls) {
520
+ content.push({
521
+ type: 'tool_use',
522
+ id: tc.id,
523
+ name: tc.function.name,
524
+ input: JSON.parse(tc.function.arguments || '{}'),
525
+ });
526
+ }
527
+ }
528
+
529
+ return content;
530
+ }
531
+
532
+ private mapFinishReason(reason: string | undefined): string {
533
+ switch (reason) {
534
+ case 'stop':
535
+ return 'end_turn';
536
+ case 'length':
537
+ return 'max_tokens';
538
+ case 'tool_calls':
539
+ return 'tool_use';
540
+ case 'content_filter':
541
+ return 'refusal';
542
+ default:
543
+ return 'end_turn';
544
+ }
545
+ }
546
+
547
+ private handleError(error: unknown): MembraneError {
548
+ if (error instanceof Error) {
549
+ const message = error.message;
550
+
551
+ // OpenAI specific error patterns
552
+ if (message.includes('429') || message.includes('rate_limit')) {
553
+ // Try to extract retry-after
554
+ const retryMatch = message.match(/retry after (\d+)/i);
555
+ const retryAfter = retryMatch?.[1] ? parseInt(retryMatch[1], 10) * 1000 : undefined;
556
+ return rateLimitError(message, retryAfter, error);
557
+ }
558
+
559
+ if (message.includes('401') || message.includes('invalid_api_key') || message.includes('Incorrect API key')) {
560
+ return authError(message, error);
561
+ }
562
+
563
+ if (message.includes('context_length') || message.includes('maximum context') || message.includes('too long')) {
564
+ return contextLengthError(message, error);
565
+ }
566
+
567
+ if (message.includes('500') || message.includes('502') || message.includes('503') || message.includes('server_error')) {
568
+ return serverError(message, undefined, error);
569
+ }
570
+
571
+ if (error.name === 'AbortError') {
572
+ return abortError();
573
+ }
574
+
575
+ if (message.includes('network') || message.includes('fetch') || message.includes('ECONNREFUSED')) {
576
+ return networkError(message, error);
577
+ }
578
+ }
579
+
580
+ return new MembraneError({
581
+ type: 'unknown',
582
+ message: error instanceof Error ? error.message : String(error),
583
+ retryable: false,
584
+ rawError: error,
585
+ });
586
+ }
587
+ }
588
+
589
+ // ============================================================================
590
+ // Content Conversion Utilities
591
+ // ============================================================================
592
+
593
+ /**
594
+ * Convert normalized content blocks to OpenAI message format
595
+ */
596
+ export function toOpenAIContent(blocks: ContentBlock[]): string | null {
597
+ const textBlocks = blocks.filter(b => b.type === 'text') as Array<{ type: 'text'; text: string }>;
598
+ if (textBlocks.length === 0) return null;
599
+ return textBlocks.map(b => b.text).join('\n');
600
+ }
601
+
602
+ /**
603
+ * Convert OpenAI response message to normalized content blocks
604
+ */
605
+ export function fromOpenAIContent(message: OpenAIMessage): ContentBlock[] {
606
+ const result: ContentBlock[] = [];
607
+
608
+ if (message.content) {
609
+ result.push({ type: 'text', text: message.content });
610
+ }
611
+
612
+ if (message.tool_calls) {
613
+ for (const tc of message.tool_calls) {
614
+ result.push({
615
+ type: 'tool_use',
616
+ id: tc.id,
617
+ name: tc.function.name,
618
+ input: JSON.parse(tc.function.arguments || '{}'),
619
+ });
620
+ }
621
+ }
622
+
623
+ return result;
624
+ }
625
+