@animalabs/membrane 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/dist/context/index.d.ts +10 -0
  2. package/dist/context/index.d.ts.map +1 -0
  3. package/dist/context/index.js +9 -0
  4. package/dist/context/index.js.map +1 -0
  5. package/dist/context/process.d.ts +22 -0
  6. package/dist/context/process.d.ts.map +1 -0
  7. package/dist/context/process.js +369 -0
  8. package/dist/context/process.js.map +1 -0
  9. package/dist/context/types.d.ts +118 -0
  10. package/dist/context/types.d.ts.map +1 -0
  11. package/dist/context/types.js +60 -0
  12. package/dist/context/types.js.map +1 -0
  13. package/dist/index.d.ts +12 -0
  14. package/dist/index.d.ts.map +1 -0
  15. package/dist/index.js +18 -0
  16. package/dist/index.js.map +1 -0
  17. package/dist/membrane.d.ts +96 -0
  18. package/dist/membrane.d.ts.map +1 -0
  19. package/dist/membrane.js +893 -0
  20. package/dist/membrane.js.map +1 -0
  21. package/dist/providers/anthropic.d.ts +36 -0
  22. package/dist/providers/anthropic.d.ts.map +1 -0
  23. package/dist/providers/anthropic.js +265 -0
  24. package/dist/providers/anthropic.js.map +1 -0
  25. package/dist/providers/index.d.ts +8 -0
  26. package/dist/providers/index.d.ts.map +1 -0
  27. package/dist/providers/index.js +8 -0
  28. package/dist/providers/index.js.map +1 -0
  29. package/dist/providers/openai-compatible.d.ts +74 -0
  30. package/dist/providers/openai-compatible.d.ts.map +1 -0
  31. package/dist/providers/openai-compatible.js +412 -0
  32. package/dist/providers/openai-compatible.js.map +1 -0
  33. package/dist/providers/openai.d.ts +69 -0
  34. package/dist/providers/openai.d.ts.map +1 -0
  35. package/dist/providers/openai.js +455 -0
  36. package/dist/providers/openai.js.map +1 -0
  37. package/dist/providers/openrouter.d.ts +76 -0
  38. package/dist/providers/openrouter.d.ts.map +1 -0
  39. package/dist/providers/openrouter.js +492 -0
  40. package/dist/providers/openrouter.js.map +1 -0
  41. package/dist/transforms/chat.d.ts +52 -0
  42. package/dist/transforms/chat.d.ts.map +1 -0
  43. package/dist/transforms/chat.js +136 -0
  44. package/dist/transforms/chat.js.map +1 -0
  45. package/dist/transforms/index.d.ts +6 -0
  46. package/dist/transforms/index.d.ts.map +1 -0
  47. package/dist/transforms/index.js +6 -0
  48. package/dist/transforms/index.js.map +1 -0
  49. package/dist/transforms/prefill.d.ts +89 -0
  50. package/dist/transforms/prefill.d.ts.map +1 -0
  51. package/dist/transforms/prefill.js +401 -0
  52. package/dist/transforms/prefill.js.map +1 -0
  53. package/dist/types/config.d.ts +103 -0
  54. package/dist/types/config.d.ts.map +1 -0
  55. package/dist/types/config.js +21 -0
  56. package/dist/types/config.js.map +1 -0
  57. package/dist/types/content.d.ts +81 -0
  58. package/dist/types/content.d.ts.map +1 -0
  59. package/dist/types/content.js +40 -0
  60. package/dist/types/content.js.map +1 -0
  61. package/dist/types/errors.d.ts +42 -0
  62. package/dist/types/errors.d.ts.map +1 -0
  63. package/dist/types/errors.js +208 -0
  64. package/dist/types/errors.js.map +1 -0
  65. package/dist/types/index.d.ts +18 -0
  66. package/dist/types/index.d.ts.map +1 -0
  67. package/dist/types/index.js +9 -0
  68. package/dist/types/index.js.map +1 -0
  69. package/dist/types/message.d.ts +46 -0
  70. package/dist/types/message.d.ts.map +1 -0
  71. package/dist/types/message.js +38 -0
  72. package/dist/types/message.js.map +1 -0
  73. package/dist/types/provider.d.ts +155 -0
  74. package/dist/types/provider.d.ts.map +1 -0
  75. package/dist/types/provider.js +5 -0
  76. package/dist/types/provider.js.map +1 -0
  77. package/dist/types/request.d.ts +78 -0
  78. package/dist/types/request.d.ts.map +1 -0
  79. package/dist/types/request.js +5 -0
  80. package/dist/types/request.js.map +1 -0
  81. package/dist/types/response.d.ts +131 -0
  82. package/dist/types/response.d.ts.map +1 -0
  83. package/dist/types/response.js +7 -0
  84. package/dist/types/response.js.map +1 -0
  85. package/dist/types/streaming.d.ts +164 -0
  86. package/dist/types/streaming.d.ts.map +1 -0
  87. package/dist/types/streaming.js +5 -0
  88. package/dist/types/streaming.js.map +1 -0
  89. package/dist/types/tools.d.ts +71 -0
  90. package/dist/types/tools.d.ts.map +1 -0
  91. package/dist/types/tools.js +5 -0
  92. package/dist/types/tools.js.map +1 -0
  93. package/dist/utils/index.d.ts +5 -0
  94. package/dist/utils/index.d.ts.map +1 -0
  95. package/dist/utils/index.js +5 -0
  96. package/dist/utils/index.js.map +1 -0
  97. package/dist/utils/stream-parser.d.ts +53 -0
  98. package/dist/utils/stream-parser.d.ts.map +1 -0
  99. package/dist/utils/stream-parser.js +359 -0
  100. package/dist/utils/stream-parser.js.map +1 -0
  101. package/dist/utils/tool-parser.d.ts +130 -0
  102. package/dist/utils/tool-parser.d.ts.map +1 -0
  103. package/dist/utils/tool-parser.js +571 -0
  104. package/dist/utils/tool-parser.js.map +1 -0
  105. package/package.json +37 -0
  106. package/src/context/index.ts +24 -0
  107. package/src/context/process.ts +520 -0
  108. package/src/context/types.ts +231 -0
  109. package/src/index.ts +23 -0
  110. package/src/membrane.ts +1174 -0
  111. package/src/providers/anthropic.ts +340 -0
  112. package/src/providers/index.ts +31 -0
  113. package/src/providers/openai-compatible.ts +570 -0
  114. package/src/providers/openai.ts +625 -0
  115. package/src/providers/openrouter.ts +662 -0
  116. package/src/transforms/chat.ts +212 -0
  117. package/src/transforms/index.ts +22 -0
  118. package/src/transforms/prefill.ts +585 -0
  119. package/src/types/config.ts +172 -0
  120. package/src/types/content.ts +181 -0
  121. package/src/types/errors.ts +277 -0
  122. package/src/types/index.ts +154 -0
  123. package/src/types/message.ts +89 -0
  124. package/src/types/provider.ts +249 -0
  125. package/src/types/request.ts +131 -0
  126. package/src/types/response.ts +223 -0
  127. package/src/types/streaming.ts +231 -0
  128. package/src/types/tools.ts +92 -0
  129. package/src/utils/index.ts +15 -0
  130. package/src/utils/stream-parser.ts +440 -0
  131. package/src/utils/tool-parser.ts +715 -0
@@ -0,0 +1,570 @@
1
+ /**
2
+ * OpenAI-Compatible provider adapter
3
+ *
4
+ * Generic adapter for any OpenAI-compatible API endpoint:
5
+ * - Ollama (http://localhost:11434/v1)
6
+ * - vLLM
7
+ * - Together AI
8
+ * - Groq
9
+ * - Local inference servers
10
+ * - Any other OpenAI-compatible endpoint
11
+ *
12
+ * Uses the standard OpenAI chat completions format with tool_calls support.
13
+ */
14
+
15
+ import type {
16
+ ProviderAdapter,
17
+ ProviderRequest,
18
+ ProviderRequestOptions,
19
+ ProviderResponse,
20
+ StreamCallbacks,
21
+ ContentBlock,
22
+ ToolDefinition,
23
+ } from '../types/index.js';
24
+ import {
25
+ MembraneError,
26
+ rateLimitError,
27
+ contextLengthError,
28
+ authError,
29
+ serverError,
30
+ abortError,
31
+ networkError,
32
+ } from '../types/index.js';
33
+
34
+ // ============================================================================
35
+ // Types
36
+ // ============================================================================
37
+
38
+ interface OpenAIMessage {
39
+ role: 'user' | 'assistant' | 'system' | 'tool';
40
+ content?: string | null;
41
+ tool_calls?: OpenAIToolCall[];
42
+ tool_call_id?: string;
43
+ }
44
+
45
+ interface OpenAIToolCall {
46
+ id: string;
47
+ type: 'function';
48
+ function: {
49
+ name: string;
50
+ arguments: string;
51
+ };
52
+ }
53
+
54
+ interface OpenAITool {
55
+ type: 'function';
56
+ function: {
57
+ name: string;
58
+ description: string;
59
+ parameters: Record<string, unknown>;
60
+ };
61
+ }
62
+
63
+ interface OpenAIResponse {
64
+ id: string;
65
+ model: string;
66
+ choices: {
67
+ index: number;
68
+ message: OpenAIMessage;
69
+ finish_reason: string;
70
+ }[];
71
+ usage?: {
72
+ prompt_tokens: number;
73
+ completion_tokens: number;
74
+ total_tokens: number;
75
+ };
76
+ }
77
+
78
+ // ============================================================================
79
+ // Adapter Configuration
80
+ // ============================================================================
81
+
82
+ export interface OpenAICompatibleAdapterConfig {
83
+ /** Base URL for the API (required, e.g., 'http://localhost:11434/v1') */
84
+ baseURL: string;
85
+
86
+ /** API key (optional for local servers) */
87
+ apiKey?: string;
88
+
89
+ /** Provider name for logging/identification (default: 'openai-compatible') */
90
+ providerName?: string;
91
+
92
+ /** Default max tokens */
93
+ defaultMaxTokens?: number;
94
+
95
+ /** Additional headers to include with requests */
96
+ extraHeaders?: Record<string, string>;
97
+ }
98
+
99
+ // ============================================================================
100
+ // OpenAI-Compatible Adapter
101
+ // ============================================================================
102
+
103
+ export class OpenAICompatibleAdapter implements ProviderAdapter {
104
+ readonly name: string;
105
+ private baseURL: string;
106
+ private apiKey: string;
107
+ private defaultMaxTokens: number;
108
+ private extraHeaders: Record<string, string>;
109
+
110
+ constructor(config: OpenAICompatibleAdapterConfig) {
111
+ if (!config.baseURL) {
112
+ throw new Error('OpenAI-compatible adapter requires baseURL');
113
+ }
114
+
115
+ this.name = config.providerName ?? 'openai-compatible';
116
+ this.baseURL = config.baseURL.replace(/\/$/, ''); // Remove trailing slash
117
+ this.apiKey = config.apiKey ?? '';
118
+ this.defaultMaxTokens = config.defaultMaxTokens ?? 4096;
119
+ this.extraHeaders = config.extraHeaders ?? {};
120
+ }
121
+
122
+ supportsModel(_modelId: string): boolean {
123
+ // This is a generic adapter - it supports whatever the endpoint supports
124
+ // Model routing should be handled at a higher level
125
+ return true;
126
+ }
127
+
128
+ async complete(
129
+ request: ProviderRequest,
130
+ options?: ProviderRequestOptions
131
+ ): Promise<ProviderResponse> {
132
+ const openAIRequest = this.buildRequest(request);
133
+
134
+ try {
135
+ const response = await this.makeRequest(openAIRequest, options);
136
+ return this.parseResponse(response, request.model);
137
+ } catch (error) {
138
+ throw this.handleError(error);
139
+ }
140
+ }
141
+
142
+ async stream(
143
+ request: ProviderRequest,
144
+ callbacks: StreamCallbacks,
145
+ options?: ProviderRequestOptions
146
+ ): Promise<ProviderResponse> {
147
+ const openAIRequest = this.buildRequest(request);
148
+ openAIRequest.stream = true;
149
+
150
+ try {
151
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
152
+ method: 'POST',
153
+ headers: this.getHeaders(),
154
+ body: JSON.stringify(openAIRequest),
155
+ signal: options?.signal,
156
+ });
157
+
158
+ if (!response.ok) {
159
+ const errorText = await response.text();
160
+ throw new Error(`API error: ${response.status} ${errorText}`);
161
+ }
162
+
163
+ const reader = response.body?.getReader();
164
+ if (!reader) {
165
+ throw new Error('No response body');
166
+ }
167
+
168
+ const decoder = new TextDecoder();
169
+ let accumulated = '';
170
+ let finishReason = 'stop';
171
+ let toolCalls: OpenAIToolCall[] = [];
172
+
173
+ while (true) {
174
+ const { done, value } = await reader.read();
175
+ if (done) break;
176
+
177
+ const chunk = decoder.decode(value, { stream: true });
178
+ const lines = chunk.split('\n').filter(line => line.startsWith('data: '));
179
+
180
+ for (const line of lines) {
181
+ const data = line.slice(6);
182
+ if (data === '[DONE]') continue;
183
+
184
+ try {
185
+ const parsed = JSON.parse(data);
186
+ const delta = parsed.choices?.[0]?.delta;
187
+
188
+ if (delta?.content) {
189
+ accumulated += delta.content;
190
+ callbacks.onChunk(delta.content);
191
+ }
192
+
193
+ // Handle streaming tool calls
194
+ if (delta?.tool_calls) {
195
+ for (const tc of delta.tool_calls) {
196
+ const index = tc.index ?? 0;
197
+ if (!toolCalls[index]) {
198
+ toolCalls[index] = {
199
+ id: tc.id ?? '',
200
+ type: 'function',
201
+ function: { name: '', arguments: '' },
202
+ };
203
+ }
204
+ if (tc.id) toolCalls[index].id = tc.id;
205
+ if (tc.function?.name) toolCalls[index].function.name = tc.function.name;
206
+ if (tc.function?.arguments) {
207
+ toolCalls[index].function.arguments += tc.function.arguments;
208
+ }
209
+ }
210
+ }
211
+
212
+ if (parsed.choices?.[0]?.finish_reason) {
213
+ finishReason = parsed.choices[0].finish_reason;
214
+ }
215
+ } catch {
216
+ // Ignore parse errors in stream
217
+ }
218
+ }
219
+ }
220
+
221
+ // Build response with accumulated data
222
+ const message: OpenAIMessage = {
223
+ role: 'assistant',
224
+ content: accumulated || null,
225
+ };
226
+
227
+ if (toolCalls.length > 0) {
228
+ message.tool_calls = toolCalls;
229
+ }
230
+
231
+ return this.parseStreamedResponse(message, finishReason, request.model);
232
+
233
+ } catch (error) {
234
+ throw this.handleError(error);
235
+ }
236
+ }
237
+
238
+ private getHeaders(): Record<string, string> {
239
+ const headers: Record<string, string> = {
240
+ 'Content-Type': 'application/json',
241
+ ...this.extraHeaders,
242
+ };
243
+
244
+ // Only add Authorization header if we have an API key
245
+ if (this.apiKey) {
246
+ headers['Authorization'] = `Bearer ${this.apiKey}`;
247
+ }
248
+
249
+ return headers;
250
+ }
251
+
252
+ private buildRequest(request: ProviderRequest): any {
253
+ const messages = this.convertMessages(request.messages as any[]);
254
+
255
+ const params: any = {
256
+ model: request.model,
257
+ messages,
258
+ max_tokens: request.maxTokens || this.defaultMaxTokens,
259
+ };
260
+
261
+ if (request.temperature !== undefined) {
262
+ params.temperature = request.temperature;
263
+ }
264
+
265
+ if (request.stopSequences && request.stopSequences.length > 0) {
266
+ params.stop = request.stopSequences;
267
+ }
268
+
269
+ if (request.tools && request.tools.length > 0) {
270
+ params.tools = this.convertTools(request.tools as any[]);
271
+ }
272
+
273
+ // Apply extra params
274
+ if (request.extra) {
275
+ Object.assign(params, request.extra);
276
+ }
277
+
278
+ return params;
279
+ }
280
+
281
+ private convertMessages(messages: any[]): OpenAIMessage[] {
282
+ // Use flatMap to handle one-to-many expansion (multiple tool_results → multiple messages)
283
+ return messages.flatMap(msg => {
284
+ // If it's already in OpenAI format, pass through
285
+ if (msg.role && (typeof msg.content === 'string' || msg.content === null || msg.tool_calls)) {
286
+ return [msg as OpenAIMessage];
287
+ }
288
+
289
+ // Convert from Anthropic-style format
290
+ if (Array.isArray(msg.content)) {
291
+ const textParts: string[] = [];
292
+ const toolCalls: OpenAIToolCall[] = [];
293
+ const toolResults: OpenAIMessage[] = [];
294
+
295
+ for (const block of msg.content) {
296
+ if (block.type === 'text') {
297
+ textParts.push(block.text);
298
+ } else if (block.type === 'tool_use') {
299
+ toolCalls.push({
300
+ id: block.id,
301
+ type: 'function',
302
+ function: {
303
+ name: block.name,
304
+ arguments: JSON.stringify(block.input),
305
+ },
306
+ });
307
+ } else if (block.type === 'tool_result') {
308
+ // Collect ALL tool results - each becomes a separate message
309
+ toolResults.push({
310
+ role: 'tool' as const,
311
+ tool_call_id: block.tool_use_id || block.toolUseId,
312
+ content: typeof block.content === 'string' ? block.content : JSON.stringify(block.content),
313
+ });
314
+ }
315
+ }
316
+
317
+ // If we have tool results, return them (possibly multiple)
318
+ if (toolResults.length > 0) {
319
+ return toolResults;
320
+ }
321
+
322
+ // Otherwise build normal message
323
+ const result: OpenAIMessage = {
324
+ role: msg.role,
325
+ content: textParts.join('\n') || null,
326
+ };
327
+
328
+ if (toolCalls.length > 0) {
329
+ result.tool_calls = toolCalls;
330
+ }
331
+
332
+ return [result];
333
+ }
334
+
335
+ return [{
336
+ role: msg.role,
337
+ content: msg.content,
338
+ }];
339
+ });
340
+ }
341
+
342
+ private convertTools(tools: any[]): OpenAITool[] {
343
+ return tools.map(tool => {
344
+ // Handle different input formats
345
+ const inputSchema = tool.inputSchema || tool.input_schema || { type: 'object', properties: {} };
346
+
347
+ return {
348
+ type: 'function',
349
+ function: {
350
+ name: tool.name,
351
+ description: tool.description,
352
+ parameters: inputSchema,
353
+ },
354
+ };
355
+ });
356
+ }
357
+
358
+ private async makeRequest(request: any, options?: ProviderRequestOptions): Promise<OpenAIResponse> {
359
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
360
+ method: 'POST',
361
+ headers: this.getHeaders(),
362
+ body: JSON.stringify(request),
363
+ signal: options?.signal,
364
+ });
365
+
366
+ if (!response.ok) {
367
+ const errorText = await response.text();
368
+ throw new Error(`API error: ${response.status} ${errorText}`);
369
+ }
370
+
371
+ return response.json() as Promise<OpenAIResponse>;
372
+ }
373
+
374
+ private parseResponse(response: OpenAIResponse, requestedModel: string): ProviderResponse {
375
+ const choice = response.choices[0];
376
+ const message = choice?.message;
377
+
378
+ return {
379
+ content: this.messageToContent(message),
380
+ stopReason: this.mapFinishReason(choice?.finish_reason),
381
+ stopSequence: undefined,
382
+ usage: {
383
+ inputTokens: response.usage?.prompt_tokens ?? 0,
384
+ outputTokens: response.usage?.completion_tokens ?? 0,
385
+ },
386
+ model: response.model ?? requestedModel,
387
+ raw: response,
388
+ };
389
+ }
390
+
391
+ private parseStreamedResponse(
392
+ message: OpenAIMessage,
393
+ finishReason: string,
394
+ requestedModel: string
395
+ ): ProviderResponse {
396
+ return {
397
+ content: this.messageToContent(message),
398
+ stopReason: this.mapFinishReason(finishReason),
399
+ stopSequence: undefined,
400
+ usage: {
401
+ inputTokens: 0, // Not available in streaming
402
+ outputTokens: 0,
403
+ },
404
+ model: requestedModel,
405
+ raw: { message, finish_reason: finishReason },
406
+ };
407
+ }
408
+
409
+ private messageToContent(message: OpenAIMessage | undefined): ContentBlock[] {
410
+ if (!message) return [];
411
+
412
+ const content: ContentBlock[] = [];
413
+
414
+ if (message.content) {
415
+ content.push({ type: 'text', text: message.content });
416
+ }
417
+
418
+ if (message.tool_calls) {
419
+ for (const tc of message.tool_calls) {
420
+ content.push({
421
+ type: 'tool_use',
422
+ id: tc.id,
423
+ name: tc.function.name,
424
+ input: JSON.parse(tc.function.arguments || '{}'),
425
+ });
426
+ }
427
+ }
428
+
429
+ return content;
430
+ }
431
+
432
+ private mapFinishReason(reason: string | undefined): string {
433
+ switch (reason) {
434
+ case 'stop':
435
+ return 'end_turn';
436
+ case 'length':
437
+ return 'max_tokens';
438
+ case 'tool_calls':
439
+ return 'tool_use';
440
+ case 'content_filter':
441
+ return 'refusal';
442
+ default:
443
+ return 'end_turn';
444
+ }
445
+ }
446
+
447
+ private handleError(error: unknown): MembraneError {
448
+ if (error instanceof Error) {
449
+ const message = error.message;
450
+
451
+ if (message.includes('429') || message.includes('rate')) {
452
+ return rateLimitError(message, undefined, error);
453
+ }
454
+
455
+ if (message.includes('401') || message.includes('auth') || message.includes('Unauthorized')) {
456
+ return authError(message, error);
457
+ }
458
+
459
+ if (message.includes('context') || message.includes('too long') || message.includes('maximum context')) {
460
+ return contextLengthError(message, error);
461
+ }
462
+
463
+ if (message.includes('500') || message.includes('502') || message.includes('503')) {
464
+ return serverError(message, undefined, error);
465
+ }
466
+
467
+ if (error.name === 'AbortError') {
468
+ return abortError();
469
+ }
470
+
471
+ if (message.includes('network') || message.includes('fetch') || message.includes('ECONNREFUSED')) {
472
+ return networkError(message, error);
473
+ }
474
+ }
475
+
476
+ return new MembraneError({
477
+ type: 'unknown',
478
+ message: error instanceof Error ? error.message : String(error),
479
+ retryable: false,
480
+ rawError: error,
481
+ });
482
+ }
483
+ }
484
+
485
+ // ============================================================================
486
+ // Content Conversion Utilities
487
+ // ============================================================================
488
+
489
+ /**
490
+ * Convert normalized content blocks to OpenAI message format
491
+ */
492
+ export function toOpenAIMessages(
493
+ messages: { role: string; content: ContentBlock[] }[]
494
+ ): OpenAIMessage[] {
495
+ const result: OpenAIMessage[] = [];
496
+
497
+ for (const msg of messages) {
498
+ const textParts: string[] = [];
499
+ const toolCalls: OpenAIToolCall[] = [];
500
+ const toolResults: { id: string; content: string }[] = [];
501
+
502
+ for (const block of msg.content) {
503
+ if (block.type === 'text') {
504
+ textParts.push(block.text);
505
+ } else if (block.type === 'tool_use') {
506
+ toolCalls.push({
507
+ id: block.id,
508
+ type: 'function',
509
+ function: {
510
+ name: block.name,
511
+ arguments: JSON.stringify(block.input),
512
+ },
513
+ });
514
+ } else if (block.type === 'tool_result') {
515
+ toolResults.push({
516
+ id: block.toolUseId,
517
+ content: typeof block.content === 'string' ? block.content : JSON.stringify(block.content),
518
+ });
519
+ }
520
+ }
521
+
522
+ // Add main message
523
+ if (textParts.length > 0 || toolCalls.length > 0) {
524
+ const message: OpenAIMessage = {
525
+ role: msg.role as 'user' | 'assistant',
526
+ content: textParts.join('\n') || null,
527
+ };
528
+ if (toolCalls.length > 0) {
529
+ message.tool_calls = toolCalls;
530
+ }
531
+ result.push(message);
532
+ }
533
+
534
+ // Add tool results as separate messages
535
+ for (const tr of toolResults) {
536
+ result.push({
537
+ role: 'tool',
538
+ tool_call_id: tr.id,
539
+ content: tr.content,
540
+ });
541
+ }
542
+ }
543
+
544
+ return result;
545
+ }
546
+
547
+ /**
548
+ * Convert OpenAI response message to normalized content blocks
549
+ */
550
+ export function fromOpenAIMessage(message: OpenAIMessage): ContentBlock[] {
551
+ const result: ContentBlock[] = [];
552
+
553
+ if (message.content) {
554
+ result.push({ type: 'text', text: message.content });
555
+ }
556
+
557
+ if (message.tool_calls) {
558
+ for (const tc of message.tool_calls) {
559
+ result.push({
560
+ type: 'tool_use',
561
+ id: tc.id,
562
+ name: tc.function.name,
563
+ input: JSON.parse(tc.function.arguments || '{}'),
564
+ });
565
+ }
566
+ }
567
+
568
+ return result;
569
+ }
570
+