@animalabs/membrane 0.5.29 → 0.5.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,17 +1,25 @@
1
1
  /**
2
- * OpenAI Responses API provider adapter
2
+ * OpenAI Images API provider adapter
3
3
  *
4
- * Adapter for OpenAI's `/v1/responses` endpoint, required for image generation
5
- * models like `gpt-image-1`. The Responses API differs from Chat Completions:
4
+ * Adapter for OpenAI's Images API endpoints, used for image generation
5
+ * models like `gpt-image-1`:
6
6
  *
7
- * - Uses `input` array instead of `messages`
8
- * - Content types: `input_text` / `input_image` (not `text` / `image_url`)
9
- * - Image generation is a tool: `{"type": "image_generation"}`
10
- * - Generated images come back as `image_generation_call` output items
11
- * - Streaming uses different event types
7
+ * - `/v1/images/generations` text-to-image (no image input)
8
+ * - `/v1/images/edits` image editing (accepts input images + prompt)
12
9
  *
13
- * This adapter converts membrane's ProviderRequest into the Responses API format,
14
- * sends the request, and converts the response back into membrane ContentBlocks.
10
+ * The adapter automatically selects the right endpoint:
11
+ * - If conversation contains images uses /edits with images as data URLs
12
+ * - If text-only → uses /generations
13
+ *
14
+ * Both endpoints:
15
+ * - Take a single `prompt` string (not conversation messages)
16
+ * - Return base64-encoded images in `data[].b64_json`
17
+ * - No streaming support (returns complete image)
18
+ * - Support `size`, `quality`, `n`, `background`, `output_format`
19
+ *
20
+ * Note: File retains the name openai-responses.ts and class name
21
+ * OpenAIResponsesAdapter for compatibility with existing factory
22
+ * routing and vendor configs (`openairesponses-*` prefix).
15
23
  */
16
24
 
17
25
  import type {
@@ -33,77 +41,53 @@ import {
33
41
  } from '../types/index.js';
34
42
 
35
43
  // ============================================================================
36
- // Responses API Types
44
+ // Images API Types
37
45
  // ============================================================================
38
46
 
39
- interface ResponsesInputTextPart {
40
- type: 'input_text';
41
- text: string;
42
- }
43
-
44
- interface ResponsesInputImagePart {
45
- type: 'input_image';
46
- image_url: string; // data URI: "data:image/jpeg;base64,..."
47
- detail?: 'auto' | 'low' | 'high';
48
- }
49
-
50
- type ResponsesInputPart = ResponsesInputTextPart | ResponsesInputImagePart;
51
-
52
- interface ResponsesInputMessage {
53
- role: 'user' | 'assistant' | 'system' | 'developer';
54
- content: ResponsesInputPart[] | string;
55
- }
56
-
57
- interface ResponsesRequest {
47
+ interface ImagesGenerateRequest {
58
48
  model: string;
59
- input: (ResponsesInputMessage | string)[];
60
- instructions?: string;
61
- tools?: { type: string; [key: string]: unknown }[];
62
- temperature?: number;
63
- top_p?: number;
64
- max_output_tokens?: number;
65
- stream?: boolean;
49
+ prompt: string;
50
+ n?: number;
51
+ size?: string;
52
+ quality?: string;
53
+ background?: string;
54
+ output_format?: string;
66
55
  [key: string]: unknown;
67
56
  }
68
57
 
69
- interface ResponsesOutputText {
70
- type: 'output_text';
71
- text: string;
72
- }
73
-
74
- interface ResponsesImageGenerationCall {
75
- type: 'image_generation_call';
76
- id: string;
77
- result: string; // base64 image data
78
- status?: string;
58
+ interface ImagesEditRequest {
59
+ model: string;
60
+ prompt: string;
61
+ image: string[]; // base64 data URLs
62
+ n?: number;
63
+ size?: string;
64
+ quality?: string;
65
+ [key: string]: unknown;
79
66
  }
80
67
 
81
- type ResponsesOutputContent = ResponsesOutputText | ResponsesImageGenerationCall;
68
+ type ImagesRequest = ImagesGenerateRequest | ImagesEditRequest;
82
69
 
83
- interface ResponsesOutputMessage {
84
- type: 'message';
85
- id: string;
86
- role: 'assistant';
87
- content: ResponsesOutputContent[];
70
+ interface ImagesResponseData {
71
+ b64_json?: string;
72
+ url?: string;
73
+ revised_prompt?: string;
88
74
  }
89
75
 
90
- type ResponsesOutputItem = ResponsesOutputMessage | ResponsesImageGenerationCall;
91
-
92
- interface ResponsesAPIResponse {
93
- id: string;
94
- object: string;
95
- model: string;
96
- output: ResponsesOutputItem[];
76
+ interface ImagesAPIResponse {
77
+ created: number;
78
+ data: ImagesResponseData[];
97
79
  usage?: {
98
80
  input_tokens: number;
99
81
  output_tokens: number;
100
82
  total_tokens: number;
101
83
  input_tokens_details?: {
102
- cached_tokens?: number;
84
+ text_tokens?: number;
85
+ image_tokens?: number;
86
+ };
87
+ output_tokens_details?: {
88
+ image_tokens?: number;
103
89
  };
104
90
  };
105
- status?: string;
106
- error?: { code: string; message: string };
107
91
  }
108
92
 
109
93
  // ============================================================================
@@ -120,12 +104,20 @@ export interface OpenAIResponsesAdapterConfig {
120
104
  /** Organization ID (optional) */
121
105
  organization?: string;
122
106
 
123
- /** Default max output tokens */
107
+ /** Default max output tokens (unused by Images API, kept for interface compat) */
124
108
  defaultMaxTokens?: number;
109
+
110
+ /**
111
+ * Whether to allow image editing via /v1/images/edits when images
112
+ * are present in the conversation context. When true (default),
113
+ * the adapter auto-detects images and routes to the edits endpoint.
114
+ * When false, always uses /v1/images/generations (text-only).
115
+ */
116
+ allowImageEditing?: boolean;
125
117
  }
126
118
 
127
119
  // ============================================================================
128
- // OpenAI Responses Adapter
120
+ // OpenAI Images Adapter
129
121
  // ============================================================================
130
122
 
131
123
  export class OpenAIResponsesAdapter implements ProviderAdapter {
@@ -133,13 +125,13 @@ export class OpenAIResponsesAdapter implements ProviderAdapter {
133
125
  private apiKey: string;
134
126
  private baseURL: string;
135
127
  private organization?: string;
136
- private defaultMaxTokens: number;
128
+ private allowImageEditing: boolean;
137
129
 
138
130
  constructor(config: OpenAIResponsesAdapterConfig = {}) {
139
131
  this.apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? '';
140
132
  this.baseURL = (config.baseURL ?? 'https://api.openai.com/v1').replace(/\/$/, '');
141
133
  this.organization = config.organization;
142
- this.defaultMaxTokens = config.defaultMaxTokens ?? 4096;
134
+ this.allowImageEditing = config.allowImageEditing ?? true;
143
135
 
144
136
  if (!this.apiKey) {
145
137
  throw new Error('OpenAI API key not provided');
@@ -154,31 +146,58 @@ export class OpenAIResponsesAdapter implements ProviderAdapter {
154
146
  request: ProviderRequest,
155
147
  options?: ProviderRequestOptions
156
148
  ): Promise<ProviderResponse> {
157
- const responsesRequest = this.buildRequest(request);
158
- options?.onRequest?.(responsesRequest);
149
+ const inputImages = this.allowImageEditing ? this.extractImages(request) : [];
150
+ const isEdit = inputImages.length > 0;
151
+ const endpoint = isEdit ? 'images/edits' : 'images/generations';
152
+ const imagesRequest = this.buildRequest(request, inputImages);
153
+ options?.onRequest?.(imagesRequest);
159
154
 
160
155
  try {
161
- const response = await fetch(`${this.baseURL}/responses`, {
156
+ const fetchOptions: RequestInit = {
162
157
  method: 'POST',
163
- headers: this.getHeaders(),
164
- body: JSON.stringify(responsesRequest),
165
158
  signal: options?.signal,
166
- });
159
+ };
167
160
 
168
- if (!response.ok) {
169
- const errorText = await response.text();
170
- throw new Error(`OpenAI Responses API error: ${response.status} ${errorText}`);
161
+ if (isEdit) {
162
+ // /v1/images/edits requires multipart/form-data with file uploads
163
+ const formData = new FormData();
164
+ formData.append('model', imagesRequest.model);
165
+ formData.append('prompt', imagesRequest.prompt);
166
+ if (imagesRequest.n != null) formData.append('n', String(imagesRequest.n));
167
+ if (imagesRequest.quality) formData.append('quality', String(imagesRequest.quality));
168
+ if (imagesRequest.size) formData.append('size', String(imagesRequest.size));
169
+
170
+ // Convert base64 data URLs to Blobs and append as file uploads
171
+ for (const dataUrl of inputImages) {
172
+ const { buffer, mimeType } = this.dataUrlToBuffer(dataUrl);
173
+ const ext = mimeType.split('/')[1] || 'png';
174
+ const blob = new Blob([buffer], { type: mimeType });
175
+ formData.append('image[]', blob, `image.${ext}`);
176
+ }
177
+
178
+ // Auth header only — don't set Content-Type, let fetch set multipart boundary
179
+ fetchOptions.headers = {
180
+ Authorization: `Bearer ${this.apiKey}`,
181
+ ...(this.organization ? { 'OpenAI-Organization': this.organization } : {}),
182
+ };
183
+ fetchOptions.body = formData;
184
+ } else {
185
+ // /v1/images/generations accepts JSON
186
+ fetchOptions.headers = this.getHeaders();
187
+ fetchOptions.body = JSON.stringify(imagesRequest);
171
188
  }
172
189
 
173
- const data = (await response.json()) as ResponsesAPIResponse;
190
+ const response = await fetch(`${this.baseURL}/${endpoint}`, fetchOptions);
174
191
 
175
- if (data.error) {
176
- throw new Error(`OpenAI Responses API error: ${data.error.code} ${data.error.message}`);
192
+ if (!response.ok) {
193
+ const errorText = await response.text();
194
+ throw new Error(`OpenAI Images API error: ${response.status} ${errorText}`);
177
195
  }
178
196
 
179
- return this.parseResponse(data, request.model, responsesRequest);
197
+ const data = (await response.json()) as ImagesAPIResponse;
198
+ return this.parseResponse(data, request.model, imagesRequest);
180
199
  } catch (error) {
181
- throw this.handleError(error, responsesRequest);
200
+ throw this.handleError(error, imagesRequest);
182
201
  }
183
202
  }
184
203
 
@@ -187,135 +206,18 @@ export class OpenAIResponsesAdapter implements ProviderAdapter {
187
206
  callbacks: StreamCallbacks,
188
207
  options?: ProviderRequestOptions
189
208
  ): Promise<ProviderResponse> {
190
- const responsesRequest = this.buildRequest(request);
191
- responsesRequest.stream = true;
192
- options?.onRequest?.(responsesRequest);
193
-
194
- try {
195
- const response = await fetch(`${this.baseURL}/responses`, {
196
- method: 'POST',
197
- headers: this.getHeaders(),
198
- body: JSON.stringify(responsesRequest),
199
- signal: options?.signal,
200
- });
201
-
202
- if (!response.ok) {
203
- const errorText = await response.text();
204
- throw new Error(`OpenAI Responses API error: ${response.status} ${errorText}`);
205
- }
206
-
207
- const reader = response.body?.getReader();
208
- if (!reader) {
209
- throw new Error('No response body');
209
+ // Images API doesn't support streaming — do a full request
210
+ // and emit any text content as a single chunk
211
+ const response = await this.complete(request, options);
212
+
213
+ const blocks = response.content as ContentBlock[];
214
+ for (const block of blocks) {
215
+ if (block.type === 'text' && (block as any).text) {
216
+ callbacks.onChunk((block as any).text);
210
217
  }
211
-
212
- const decoder = new TextDecoder();
213
- let accumulated = '';
214
- let images: { data: string; mimeType: string }[] = [];
215
- let lastUsage: ResponsesAPIResponse['usage'] | undefined;
216
- let buffer = '';
217
-
218
- while (true) {
219
- const { done, value } = await reader.read();
220
- if (done) break;
221
-
222
- buffer += decoder.decode(value, { stream: true });
223
- const lines = buffer.split('\n');
224
- buffer = lines.pop() ?? '';
225
-
226
- for (const line of lines) {
227
- if (!line.startsWith('data: ')) continue;
228
- const data = line.slice(6).trim();
229
- if (!data || data === '[DONE]') continue;
230
-
231
- try {
232
- const parsed = JSON.parse(data);
233
-
234
- // Handle text deltas
235
- if (parsed.type === 'response.output_text.delta') {
236
- const delta = parsed.delta ?? '';
237
- accumulated += delta;
238
- callbacks.onChunk(delta);
239
- }
240
-
241
- // Handle completed text
242
- if (parsed.type === 'response.output_text.done') {
243
- // Text already accumulated via deltas
244
- }
245
-
246
- // Handle image generation results
247
- if (parsed.type === 'response.image_generation_call.done') {
248
- if (parsed.result) {
249
- images.push({
250
- data: parsed.result,
251
- mimeType: 'image/png',
252
- });
253
- }
254
- }
255
-
256
- // Handle completed response (has usage)
257
- if (parsed.type === 'response.completed' || parsed.type === 'response.done') {
258
- const resp = parsed.response ?? parsed;
259
- if (resp.usage) {
260
- lastUsage = resp.usage;
261
- }
262
- // Extract any images from the completed response output
263
- if (resp.output) {
264
- for (const item of resp.output) {
265
- if (item.type === 'image_generation_call' && item.result) {
266
- // Only add if not already captured via streaming events
267
- const alreadyCaptured = images.some(img => img.data === item.result);
268
- if (!alreadyCaptured) {
269
- images.push({
270
- data: item.result,
271
- mimeType: 'image/png',
272
- });
273
- }
274
- }
275
- }
276
- }
277
- }
278
- } catch {
279
- // Ignore parse errors in stream chunks
280
- }
281
- }
282
- }
283
-
284
- // Process remaining buffer
285
- if (buffer.trim()) {
286
- const remaining = buffer.trim();
287
- const dataLine = remaining.startsWith('data: ') ? remaining.slice(6).trim() : remaining;
288
- if (dataLine && dataLine !== '[DONE]') {
289
- try {
290
- const parsed = JSON.parse(dataLine);
291
- if (parsed.type === 'response.completed' || parsed.type === 'response.done') {
292
- const resp = parsed.response ?? parsed;
293
- if (resp.usage) lastUsage = resp.usage;
294
- }
295
- } catch {
296
- // Final buffer wasn't valid JSON
297
- }
298
- }
299
- }
300
-
301
- const cachedTokens = lastUsage?.input_tokens_details?.cached_tokens ?? 0;
302
-
303
- return {
304
- content: this.buildContentBlocks(accumulated, images),
305
- stopReason: 'end_turn',
306
- stopSequence: undefined,
307
- usage: {
308
- inputTokens: lastUsage?.input_tokens ?? 0,
309
- outputTokens: lastUsage?.output_tokens ?? 0,
310
- cacheReadTokens: cachedTokens > 0 ? cachedTokens : undefined,
311
- },
312
- model: request.model,
313
- rawRequest: responsesRequest,
314
- raw: { usage: lastUsage },
315
- };
316
- } catch (error) {
317
- throw this.handleError(error, responsesRequest);
318
218
  }
219
+
220
+ return response;
319
221
  }
320
222
 
321
223
  // --------------------------------------------------------------------------
@@ -335,115 +237,122 @@ export class OpenAIResponsesAdapter implements ProviderAdapter {
335
237
  return headers;
336
238
  }
337
239
 
338
- private buildRequest(request: ProviderRequest): ResponsesRequest {
339
- const input = this.convertMessages(request.messages as any[]);
340
- const maxTokens = request.maxTokens || this.defaultMaxTokens;
240
+ /**
241
+ * Extract base64 images from conversation messages as data URLs.
242
+ * Used to determine whether to use /edits (with images) or /generations.
243
+ * Returns up to 16 images (OpenAI limit for /v1/images/edits).
244
+ */
245
+ private extractImages(request: ProviderRequest): string[] {
246
+ const dataUrls: string[] = [];
247
+ const MAX_IMAGES = 16;
341
248
 
342
- const responsesRequest: ResponsesRequest = {
343
- model: request.model,
344
- input,
345
- max_output_tokens: maxTokens,
346
- };
249
+ if (!request.messages) return dataUrls;
347
250
 
348
- // System prompt instructions
349
- if (request.system) {
350
- const systemText =
351
- typeof request.system === 'string'
352
- ? request.system
353
- : (request.system as any[])
354
- .filter((b: any) => b.type === 'text')
355
- .map((b: any) => b.text)
356
- .join('\n');
251
+ for (const msg of request.messages as any[]) {
252
+ if (!Array.isArray(msg.content)) continue;
357
253
 
358
- if (systemText) {
359
- responsesRequest.instructions = systemText;
254
+ for (const block of msg.content) {
255
+ if (dataUrls.length >= MAX_IMAGES) break;
256
+
257
+ if (block.type === 'image') {
258
+ const source = block.source;
259
+ if (source?.type === 'base64' && source.data) {
260
+ const mimeType = source.media_type ?? source.mediaType ?? 'image/png';
261
+ dataUrls.push(`data:${mimeType};base64,${source.data}`);
262
+ }
263
+ }
360
264
  }
361
265
  }
362
266
 
363
- if (request.temperature !== undefined) {
364
- responsesRequest.temperature = request.temperature;
365
- }
267
+ return dataUrls;
268
+ }
366
269
 
367
- if (request.topP !== undefined) {
368
- responsesRequest.top_p = request.topP;
270
+ /**
271
+ * Convert a base64 data URL to a Buffer + mimeType for file upload.
272
+ */
273
+ private dataUrlToBuffer(dataUrl: string): { buffer: Buffer; mimeType: string } {
274
+ const match = dataUrl.match(/^data:([^;]+);base64,(.+)$/s);
275
+ if (!match || !match[1] || !match[2]) {
276
+ throw new Error('Invalid data URL format');
369
277
  }
278
+ return {
279
+ mimeType: match[1],
280
+ buffer: Buffer.from(match[2], 'base64'),
281
+ };
282
+ }
283
+
284
+ private buildRequest(request: ProviderRequest, inputImages: string[]): ImagesRequest {
285
+ const prompt = this.flattenToPrompt(request);
370
286
 
371
- // Auto-include image_generation tool for image models
372
- if (request.model?.includes('image')) {
373
- responsesRequest.tools = [{ type: 'image_generation' }];
287
+ const imagesRequest: ImagesRequest = {
288
+ model: request.model,
289
+ prompt,
290
+ n: 1,
291
+ quality: 'auto',
292
+ };
293
+
294
+ // Include input images for the /edits endpoint
295
+ if (inputImages.length > 0) {
296
+ (imagesRequest as ImagesEditRequest).image = inputImages;
374
297
  }
375
298
 
376
- // Apply extra params (filter out internal membrane fields)
299
+ // Apply extra params (allow overriding size, quality, n, etc.)
377
300
  if (request.extra) {
378
- const { normalizedMessages, prompt, ...rest } = request.extra as Record<string, unknown>;
379
- Object.assign(responsesRequest, rest);
301
+ const { normalizedMessages, prompt: _p, ...rest } = request.extra as Record<string, unknown>;
302
+ Object.assign(imagesRequest, rest);
380
303
  }
381
304
 
382
- return responsesRequest;
305
+ return imagesRequest;
383
306
  }
384
307
 
385
- private convertMessages(messages: any[]): ResponsesInputMessage[] {
386
- const result: ResponsesInputMessage[] = [];
387
-
388
- for (const msg of messages) {
389
- const role = this.mapRole(msg.role);
308
+ /**
309
+ * Flatten conversation messages into a single prompt string.
310
+ *
311
+ * The Images API takes a single text prompt, not a conversation.
312
+ * We include the system prompt as context and concatenate all
313
+ * message text with role labels so the model understands the
314
+ * full conversation when deciding what image to generate.
315
+ */
316
+ private flattenToPrompt(request: ProviderRequest): string {
317
+ const parts: string[] = [];
318
+
319
+ // Include system prompt as context
320
+ if (request.system) {
321
+ const systemText =
322
+ typeof request.system === 'string'
323
+ ? request.system
324
+ : (request.system as any[])
325
+ .filter((b: any) => b.type === 'text')
326
+ .map((b: any) => b.text)
327
+ .join('\n');
390
328
 
391
- // Simple string content
392
- if (typeof msg.content === 'string') {
393
- result.push({ role, content: msg.content });
394
- continue;
329
+ if (systemText) {
330
+ parts.push(systemText);
395
331
  }
332
+ }
396
333
 
397
- // Array content blocks (Anthropic-style)
398
- if (Array.isArray(msg.content)) {
399
- const parts: ResponsesInputPart[] = [];
400
-
401
- for (const block of msg.content) {
402
- if (block.type === 'text') {
403
- if (block.text) {
404
- parts.push({ type: 'input_text', text: block.text });
405
- }
406
- } else if (block.type === 'image') {
407
- const source = block.source;
408
- if (source?.type === 'base64' && source.data) {
409
- const mimeType = source.media_type ?? source.mediaType ?? 'image/jpeg';
410
- parts.push({
411
- type: 'input_image',
412
- image_url: `data:${mimeType};base64,${source.data}`,
413
- });
414
- }
415
- }
416
- // tool_use and tool_result are not supported in the Responses API input
417
- // for image models — skip them silently
334
+ // Extract text from messages with role labels
335
+ if (request.messages) {
336
+ for (const msg of request.messages as any[]) {
337
+ const role = msg.role === 'assistant' ? 'Assistant' : 'User';
338
+ let text = '';
339
+
340
+ if (typeof msg.content === 'string') {
341
+ text = msg.content;
342
+ } else if (Array.isArray(msg.content)) {
343
+ text = msg.content
344
+ .filter((b: any) => b.type === 'text' && b.text)
345
+ .map((b: any) => b.text)
346
+ .join('\n');
418
347
  }
419
348
 
420
- if (parts.length > 0) {
421
- result.push({ role, content: parts });
349
+ if (text) {
350
+ parts.push(`${role}: ${text}`);
422
351
  }
423
- continue;
424
352
  }
425
-
426
- // Null/empty content — skip
427
- if (msg.content === null || msg.content === undefined) continue;
428
-
429
- // Fallback
430
- result.push({ role, content: String(msg.content) });
431
353
  }
432
354
 
433
- return result;
434
- }
435
-
436
- private mapRole(role: string): 'user' | 'assistant' | 'system' | 'developer' {
437
- switch (role) {
438
- case 'user':
439
- return 'user';
440
- case 'assistant':
441
- return 'assistant';
442
- case 'system':
443
- return 'developer';
444
- default:
445
- return 'user';
446
- }
355
+ return parts.join('\n\n');
447
356
  }
448
357
 
449
358
  // --------------------------------------------------------------------------
@@ -451,52 +360,41 @@ export class OpenAIResponsesAdapter implements ProviderAdapter {
451
360
  // --------------------------------------------------------------------------
452
361
 
453
362
  private parseResponse(
454
- response: ResponsesAPIResponse,
363
+ response: ImagesAPIResponse,
455
364
  requestedModel: string,
456
365
  rawRequest: unknown
457
366
  ): ProviderResponse {
458
- let text = '';
459
367
  const images: { data: string; mimeType: string }[] = [];
368
+ let revisedPrompt: string | undefined;
460
369
 
461
- for (const item of response.output) {
462
- if (item.type === 'message') {
463
- for (const content of item.content) {
464
- if (content.type === 'output_text') {
465
- text += content.text;
466
- } else if (content.type === 'image_generation_call') {
467
- images.push({
468
- data: content.result,
469
- mimeType: 'image/png',
470
- });
471
- }
472
- }
473
- } else if (item.type === 'image_generation_call') {
370
+ for (const item of response.data) {
371
+ if (item.b64_json) {
474
372
  images.push({
475
- data: item.result,
373
+ data: item.b64_json,
476
374
  mimeType: 'image/png',
477
375
  });
478
376
  }
377
+ if (item.revised_prompt) {
378
+ revisedPrompt = item.revised_prompt;
379
+ }
479
380
  }
480
381
 
481
- const cachedTokens = response.usage?.input_tokens_details?.cached_tokens ?? 0;
482
-
483
382
  return {
484
- content: this.buildContentBlocks(text, images),
383
+ content: this.buildContentBlocks(revisedPrompt, images),
485
384
  stopReason: 'end_turn',
486
385
  stopSequence: undefined,
487
386
  usage: {
488
387
  inputTokens: response.usage?.input_tokens ?? 0,
489
388
  outputTokens: response.usage?.output_tokens ?? 0,
490
- cacheReadTokens: cachedTokens > 0 ? cachedTokens : undefined,
491
389
  },
492
- model: response.model ?? requestedModel,
390
+ model: requestedModel,
493
391
  rawRequest,
494
392
  raw: response,
495
393
  };
496
394
  }
497
395
 
498
396
  private buildContentBlocks(
499
- text: string,
397
+ text: string | undefined,
500
398
  images: { data: string; mimeType: string }[] = []
501
399
  ): ContentBlock[] {
502
400
  const content: ContentBlock[] = [];
@@ -548,6 +446,20 @@ export class OpenAIResponsesAdapter implements ProviderAdapter {
548
446
  return contextLengthError(message, error, rawRequest);
549
447
  }
550
448
 
449
+ if (
450
+ message.includes('content_policy') ||
451
+ message.includes('safety_system') ||
452
+ message.includes('moderation')
453
+ ) {
454
+ return new MembraneError({
455
+ type: 'unknown',
456
+ message: `Content policy violation: ${message}`,
457
+ retryable: false,
458
+ rawError: error,
459
+ rawRequest,
460
+ });
461
+ }
462
+
551
463
  if (
552
464
  message.includes('500') ||
553
465
  message.includes('502') ||