@ai-sdk/open-responses 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,613 @@
1
+ // src/version.ts
2
+ var VERSION = true ? "1.0.0" : "0.0.0-test";
3
+
4
+ // src/open-responses-provider.ts
5
+ import {
6
+ NoSuchModelError
7
+ } from "@ai-sdk/provider";
8
+ import {
9
+ generateId,
10
+ withUserAgentSuffix
11
+ } from "@ai-sdk/provider-utils";
12
+
13
+ // src/responses/open-responses-language-model.ts
14
+ import {
15
+ combineHeaders,
16
+ createEventSourceResponseHandler,
17
+ createJsonErrorResponseHandler,
18
+ createJsonResponseHandler,
19
+ jsonSchema,
20
+ postJsonToApi
21
+ } from "@ai-sdk/provider-utils";
22
+ import { z as z2 } from "zod/v4";
23
+
24
+ // src/responses/convert-to-open-responses-input.ts
25
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
26
+ async function convertToOpenResponsesInput({
27
+ prompt
28
+ }) {
29
+ var _a, _b;
30
+ const input = [];
31
+ const warnings = [];
32
+ const systemMessages = [];
33
+ for (const { role, content } of prompt) {
34
+ switch (role) {
35
+ case "system": {
36
+ systemMessages.push(content);
37
+ break;
38
+ }
39
+ case "user": {
40
+ const userContent = [];
41
+ for (const part of content) {
42
+ switch (part.type) {
43
+ case "text": {
44
+ userContent.push({ type: "input_text", text: part.text });
45
+ break;
46
+ }
47
+ case "file": {
48
+ if (!part.mediaType.startsWith("image/")) {
49
+ warnings.push({
50
+ type: "other",
51
+ message: `unsupported file content type: ${part.mediaType}`
52
+ });
53
+ break;
54
+ }
55
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
56
+ userContent.push({
57
+ type: "input_image",
58
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : {
59
+ image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`
60
+ }
61
+ });
62
+ break;
63
+ }
64
+ }
65
+ }
66
+ input.push({ type: "message", role: "user", content: userContent });
67
+ break;
68
+ }
69
+ case "assistant": {
70
+ const assistantContent = [];
71
+ const toolCalls = [];
72
+ for (const part of content) {
73
+ switch (part.type) {
74
+ case "text": {
75
+ assistantContent.push({ type: "output_text", text: part.text });
76
+ break;
77
+ }
78
+ case "tool-call": {
79
+ const argumentsValue = typeof part.input === "string" ? part.input : JSON.stringify(part.input);
80
+ toolCalls.push({
81
+ type: "function_call",
82
+ call_id: part.toolCallId,
83
+ name: part.toolName,
84
+ arguments: argumentsValue
85
+ });
86
+ break;
87
+ }
88
+ }
89
+ }
90
+ if (assistantContent.length > 0) {
91
+ input.push({
92
+ type: "message",
93
+ role: "assistant",
94
+ content: assistantContent
95
+ });
96
+ }
97
+ for (const toolCall of toolCalls) {
98
+ input.push(toolCall);
99
+ }
100
+ break;
101
+ }
102
+ case "tool": {
103
+ for (const part of content) {
104
+ if (part.type === "tool-result") {
105
+ const output = part.output;
106
+ let contentValue;
107
+ switch (output.type) {
108
+ case "text":
109
+ case "error-text":
110
+ contentValue = output.value;
111
+ break;
112
+ case "execution-denied":
113
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
114
+ break;
115
+ case "json":
116
+ case "error-json":
117
+ contentValue = JSON.stringify(output.value);
118
+ break;
119
+ case "content": {
120
+ const contentParts = [];
121
+ for (const item of output.value) {
122
+ switch (item.type) {
123
+ case "text": {
124
+ contentParts.push({
125
+ type: "input_text",
126
+ text: item.text
127
+ });
128
+ break;
129
+ }
130
+ case "image-data": {
131
+ contentParts.push({
132
+ type: "input_image",
133
+ image_url: `data:${item.mediaType};base64,${item.data}`
134
+ });
135
+ break;
136
+ }
137
+ case "image-url": {
138
+ contentParts.push({
139
+ type: "input_image",
140
+ image_url: item.url
141
+ });
142
+ break;
143
+ }
144
+ case "file-data": {
145
+ contentParts.push({
146
+ type: "input_file",
147
+ filename: (_b = item.filename) != null ? _b : "data",
148
+ file_data: `data:${item.mediaType};base64,${item.data}`
149
+ });
150
+ break;
151
+ }
152
+ default: {
153
+ warnings.push({
154
+ type: "other",
155
+ message: `unsupported tool content part type: ${item.type}`
156
+ });
157
+ break;
158
+ }
159
+ }
160
+ }
161
+ contentValue = contentParts;
162
+ break;
163
+ }
164
+ }
165
+ input.push({
166
+ type: "function_call_output",
167
+ call_id: part.toolCallId,
168
+ output: contentValue
169
+ });
170
+ }
171
+ }
172
+ break;
173
+ }
174
+ }
175
+ }
176
+ return {
177
+ input,
178
+ instructions: systemMessages.length > 0 ? systemMessages.join("\n") : void 0,
179
+ warnings
180
+ };
181
+ }
182
+
183
+ // src/responses/open-responses-api.ts
184
+ import { lazySchema } from "@ai-sdk/provider-utils";
185
+ import { z } from "zod/v4";
186
+ import { zodSchema } from "@ai-sdk/provider-utils";
187
+ var openResponsesErrorSchema = lazySchema(
188
+ () => zodSchema(
189
+ z.object({
190
+ error: z.object({
191
+ message: z.string(),
192
+ type: z.string(),
193
+ param: z.string(),
194
+ code: z.string()
195
+ })
196
+ })
197
+ )
198
+ );
199
+
200
+ // src/responses/map-open-responses-finish-reason.ts
201
+ function mapOpenResponsesFinishReason({
202
+ finishReason,
203
+ hasToolCalls
204
+ }) {
205
+ switch (finishReason) {
206
+ case void 0:
207
+ case null:
208
+ return hasToolCalls ? "tool-calls" : "stop";
209
+ case "max_output_tokens":
210
+ return "length";
211
+ case "content_filter":
212
+ return "content-filter";
213
+ default:
214
+ return hasToolCalls ? "tool-calls" : "other";
215
+ }
216
+ }
217
+
218
+ // src/responses/open-responses-language-model.ts
219
+ var OpenResponsesLanguageModel = class {
220
+ constructor(modelId, config) {
221
+ this.specificationVersion = "v3";
222
+ this.supportedUrls = {
223
+ "image/*": [/^https?:\/\/.*$/]
224
+ };
225
+ this.modelId = modelId;
226
+ this.config = config;
227
+ }
228
+ get provider() {
229
+ return this.config.provider;
230
+ }
231
+ async getArgs({
232
+ maxOutputTokens,
233
+ temperature,
234
+ stopSequences,
235
+ topP,
236
+ topK,
237
+ presencePenalty,
238
+ frequencyPenalty,
239
+ seed,
240
+ prompt,
241
+ providerOptions,
242
+ tools,
243
+ toolChoice,
244
+ responseFormat
245
+ }) {
246
+ var _a;
247
+ const warnings = [];
248
+ if (stopSequences != null) {
249
+ warnings.push({ type: "unsupported", feature: "stopSequences" });
250
+ }
251
+ if (topK != null) {
252
+ warnings.push({ type: "unsupported", feature: "topK" });
253
+ }
254
+ if (seed != null) {
255
+ warnings.push({ type: "unsupported", feature: "seed" });
256
+ }
257
+ const {
258
+ input,
259
+ instructions,
260
+ warnings: inputWarnings
261
+ } = await convertToOpenResponsesInput({
262
+ prompt
263
+ });
264
+ warnings.push(...inputWarnings);
265
+ const functionTools = tools == null ? void 0 : tools.filter((tool) => tool.type === "function").map((tool) => ({
266
+ type: "function",
267
+ name: tool.name,
268
+ description: tool.description,
269
+ parameters: tool.inputSchema,
270
+ ...tool.strict != null ? { strict: tool.strict } : {}
271
+ }));
272
+ const convertedToolChoice = toolChoice == null ? void 0 : toolChoice.type === "tool" ? { type: "function", name: toolChoice.toolName } : toolChoice.type;
273
+ const textFormat = (responseFormat == null ? void 0 : responseFormat.type) === "json" ? {
274
+ type: "json_schema",
275
+ ...responseFormat.schema != null ? {
276
+ name: (_a = responseFormat.name) != null ? _a : "response",
277
+ description: responseFormat.description,
278
+ schema: responseFormat.schema,
279
+ strict: true
280
+ } : {}
281
+ } : void 0;
282
+ return {
283
+ body: {
284
+ model: this.modelId,
285
+ input,
286
+ instructions,
287
+ max_output_tokens: maxOutputTokens,
288
+ temperature,
289
+ top_p: topP,
290
+ presence_penalty: presencePenalty,
291
+ frequency_penalty: frequencyPenalty,
292
+ tools: (functionTools == null ? void 0 : functionTools.length) ? functionTools : void 0,
293
+ tool_choice: convertedToolChoice,
294
+ ...textFormat != null && { text: { format: textFormat } }
295
+ },
296
+ warnings
297
+ };
298
+ }
299
+ async doGenerate(options) {
300
+ var _a, _b, _c, _d, _e, _f;
301
+ const { body, warnings } = await this.getArgs(options);
302
+ const {
303
+ responseHeaders,
304
+ value: response,
305
+ rawValue: rawResponse
306
+ } = await postJsonToApi({
307
+ url: this.config.url,
308
+ headers: combineHeaders(this.config.headers(), options.headers),
309
+ body,
310
+ failedResponseHandler: createJsonErrorResponseHandler({
311
+ errorSchema: openResponsesErrorSchema,
312
+ errorToMessage: (error) => error.error.message
313
+ }),
314
+ successfulResponseHandler: createJsonResponseHandler(
315
+ // do not validate the response body, only apply types to the response body
316
+ jsonSchema(() => {
317
+ throw new Error("json schema not implemented");
318
+ })
319
+ ),
320
+ abortSignal: options.abortSignal,
321
+ fetch: this.config.fetch
322
+ });
323
+ const content = [];
324
+ let hasToolCalls = false;
325
+ for (const part of response.output) {
326
+ switch (part.type) {
327
+ // TODO AI SDK 7 adjust reasoning in the specification to better support the reasoning structure from open responses.
328
+ case "reasoning": {
329
+ for (const contentPart of (_a = part.content) != null ? _a : []) {
330
+ content.push({
331
+ type: "reasoning",
332
+ text: contentPart.text
333
+ });
334
+ }
335
+ break;
336
+ }
337
+ case "message": {
338
+ for (const contentPart of part.content) {
339
+ content.push({
340
+ type: "text",
341
+ text: contentPart.text
342
+ });
343
+ }
344
+ break;
345
+ }
346
+ case "function_call": {
347
+ hasToolCalls = true;
348
+ content.push({
349
+ type: "tool-call",
350
+ toolCallId: part.call_id,
351
+ toolName: part.name,
352
+ input: part.arguments
353
+ });
354
+ break;
355
+ }
356
+ }
357
+ }
358
+ const usage = response.usage;
359
+ const inputTokens = usage == null ? void 0 : usage.input_tokens;
360
+ const cachedInputTokens = (_b = usage == null ? void 0 : usage.input_tokens_details) == null ? void 0 : _b.cached_tokens;
361
+ const outputTokens = usage == null ? void 0 : usage.output_tokens;
362
+ const reasoningTokens = (_c = usage == null ? void 0 : usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens;
363
+ return {
364
+ content,
365
+ finishReason: {
366
+ unified: mapOpenResponsesFinishReason({
367
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
368
+ hasToolCalls
369
+ }),
370
+ raw: (_f = (_e = response.incomplete_details) == null ? void 0 : _e.reason) != null ? _f : void 0
371
+ },
372
+ usage: {
373
+ inputTokens: {
374
+ total: inputTokens,
375
+ noCache: (inputTokens != null ? inputTokens : 0) - (cachedInputTokens != null ? cachedInputTokens : 0),
376
+ cacheRead: cachedInputTokens,
377
+ cacheWrite: void 0
378
+ },
379
+ outputTokens: {
380
+ total: outputTokens,
381
+ text: (outputTokens != null ? outputTokens : 0) - (reasoningTokens != null ? reasoningTokens : 0),
382
+ reasoning: reasoningTokens
383
+ },
384
+ raw: response.usage
385
+ },
386
+ request: { body },
387
+ response: {
388
+ id: response.id,
389
+ timestamp: new Date(response.created_at * 1e3),
390
+ modelId: response.model,
391
+ headers: responseHeaders,
392
+ body: rawResponse
393
+ },
394
+ providerMetadata: void 0,
395
+ warnings
396
+ };
397
+ }
398
+ async doStream(options) {
399
+ const { body, warnings } = await this.getArgs(options);
400
+ const { responseHeaders, value: response } = await postJsonToApi({
401
+ url: this.config.url,
402
+ headers: combineHeaders(this.config.headers(), options.headers),
403
+ body: {
404
+ ...body,
405
+ stream: true
406
+ },
407
+ failedResponseHandler: createJsonErrorResponseHandler({
408
+ errorSchema: openResponsesErrorSchema,
409
+ errorToMessage: (error) => error.error.message
410
+ }),
411
+ // TODO consider validation
412
+ successfulResponseHandler: createEventSourceResponseHandler(z2.any()),
413
+ abortSignal: options.abortSignal,
414
+ fetch: this.config.fetch
415
+ });
416
+ const usage = {
417
+ inputTokens: {
418
+ total: void 0,
419
+ noCache: void 0,
420
+ cacheRead: void 0,
421
+ cacheWrite: void 0
422
+ },
423
+ outputTokens: {
424
+ total: void 0,
425
+ text: void 0,
426
+ reasoning: void 0
427
+ }
428
+ };
429
+ const updateUsage = (responseUsage) => {
430
+ var _a, _b;
431
+ if (!responseUsage) {
432
+ return;
433
+ }
434
+ const inputTokens = responseUsage.input_tokens;
435
+ const cachedInputTokens = (_a = responseUsage.input_tokens_details) == null ? void 0 : _a.cached_tokens;
436
+ const outputTokens = responseUsage.output_tokens;
437
+ const reasoningTokens = (_b = responseUsage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens;
438
+ usage.inputTokens = {
439
+ total: inputTokens,
440
+ noCache: (inputTokens != null ? inputTokens : 0) - (cachedInputTokens != null ? cachedInputTokens : 0),
441
+ cacheRead: cachedInputTokens,
442
+ cacheWrite: void 0
443
+ };
444
+ usage.outputTokens = {
445
+ total: outputTokens,
446
+ text: (outputTokens != null ? outputTokens : 0) - (reasoningTokens != null ? reasoningTokens : 0),
447
+ reasoning: reasoningTokens
448
+ };
449
+ usage.raw = responseUsage;
450
+ };
451
+ let isActiveReasoning = false;
452
+ let hasToolCalls = false;
453
+ let finishReason = {
454
+ unified: "other",
455
+ raw: void 0
456
+ };
457
+ const toolCallsByItemId = {};
458
+ return {
459
+ stream: response.pipeThrough(
460
+ new TransformStream({
461
+ start(controller) {
462
+ controller.enqueue({ type: "stream-start", warnings });
463
+ },
464
+ transform(parseResult, controller) {
465
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
466
+ if (options.includeRawChunks) {
467
+ controller.enqueue({
468
+ type: "raw",
469
+ rawValue: parseResult.rawValue
470
+ });
471
+ }
472
+ if (!parseResult.success) {
473
+ controller.enqueue({ type: "error", error: parseResult.error });
474
+ return;
475
+ }
476
+ const chunk = parseResult.value;
477
+ if (chunk.type === "response.output_item.added" && chunk.item.type === "function_call") {
478
+ toolCallsByItemId[chunk.item.id] = {
479
+ toolName: chunk.item.name,
480
+ toolCallId: chunk.item.call_id,
481
+ arguments: chunk.item.arguments
482
+ };
483
+ } else if (chunk.type === "response.function_call_arguments.delta") {
484
+ const functionCallChunk = chunk;
485
+ const toolCall = (_a = toolCallsByItemId[functionCallChunk.item_id]) != null ? _a : toolCallsByItemId[functionCallChunk.item_id] = {};
486
+ toolCall.arguments = ((_b = toolCall.arguments) != null ? _b : "") + functionCallChunk.delta;
487
+ } else if (chunk.type === "response.function_call_arguments.done") {
488
+ const functionCallChunk = chunk;
489
+ const toolCall = (_c = toolCallsByItemId[functionCallChunk.item_id]) != null ? _c : toolCallsByItemId[functionCallChunk.item_id] = {};
490
+ toolCall.arguments = functionCallChunk.arguments;
491
+ } else if (chunk.type === "response.output_item.done" && chunk.item.type === "function_call") {
492
+ const toolCall = toolCallsByItemId[chunk.item.id];
493
+ const toolName = (_d = toolCall == null ? void 0 : toolCall.toolName) != null ? _d : chunk.item.name;
494
+ const toolCallId = (_e = toolCall == null ? void 0 : toolCall.toolCallId) != null ? _e : chunk.item.call_id;
495
+ const input = (_g = (_f = toolCall == null ? void 0 : toolCall.arguments) != null ? _f : chunk.item.arguments) != null ? _g : "";
496
+ controller.enqueue({
497
+ type: "tool-call",
498
+ toolCallId,
499
+ toolName,
500
+ input
501
+ });
502
+ hasToolCalls = true;
503
+ delete toolCallsByItemId[chunk.item.id];
504
+ } else if (chunk.type === "response.output_item.added" && chunk.item.type === "reasoning") {
505
+ controller.enqueue({
506
+ type: "reasoning-start",
507
+ id: chunk.item.id
508
+ });
509
+ isActiveReasoning = true;
510
+ } else if (chunk.type === "response.reasoning_text.delta") {
511
+ const reasoningChunk = chunk;
512
+ controller.enqueue({
513
+ type: "reasoning-delta",
514
+ id: reasoningChunk.item_id,
515
+ delta: reasoningChunk.delta
516
+ });
517
+ } else if (chunk.type === "response.output_item.done" && chunk.item.type === "reasoning") {
518
+ controller.enqueue({ type: "reasoning-end", id: chunk.item.id });
519
+ isActiveReasoning = false;
520
+ } else if (chunk.type === "response.output_item.added" && chunk.item.type === "message") {
521
+ controller.enqueue({ type: "text-start", id: chunk.item.id });
522
+ } else if (chunk.type === "response.output_text.delta") {
523
+ controller.enqueue({
524
+ type: "text-delta",
525
+ id: chunk.item_id,
526
+ delta: chunk.delta
527
+ });
528
+ } else if (chunk.type === "response.output_item.done" && chunk.item.type === "message") {
529
+ controller.enqueue({ type: "text-end", id: chunk.item.id });
530
+ } else if (chunk.type === "response.completed" || chunk.type === "response.incomplete") {
531
+ const reason = (_h = chunk.response.incomplete_details) == null ? void 0 : _h.reason;
532
+ finishReason = {
533
+ unified: mapOpenResponsesFinishReason({
534
+ finishReason: reason,
535
+ hasToolCalls
536
+ }),
537
+ raw: reason != null ? reason : void 0
538
+ };
539
+ updateUsage(chunk.response.usage);
540
+ } else if (chunk.type === "response.failed") {
541
+ finishReason = {
542
+ unified: "error",
543
+ raw: (_j = (_i = chunk.response.error) == null ? void 0 : _i.code) != null ? _j : chunk.response.status
544
+ };
545
+ updateUsage(chunk.response.usage);
546
+ }
547
+ },
548
+ flush(controller) {
549
+ if (isActiveReasoning) {
550
+ controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
551
+ }
552
+ controller.enqueue({
553
+ type: "finish",
554
+ finishReason,
555
+ usage,
556
+ providerMetadata: void 0
557
+ });
558
+ }
559
+ })
560
+ ),
561
+ request: { body },
562
+ response: { headers: responseHeaders }
563
+ };
564
+ }
565
+ };
566
+
567
+ // src/open-responses-provider.ts
568
+ function createOpenResponses(options) {
569
+ const providerName = options.name;
570
+ const getHeaders = () => withUserAgentSuffix(
571
+ {
572
+ ...options.apiKey ? {
573
+ Authorization: `Bearer ${options.apiKey}`
574
+ } : {},
575
+ ...options.headers
576
+ },
577
+ `ai-sdk/open-responses/${VERSION}`
578
+ );
579
+ const createResponsesModel = (modelId) => {
580
+ return new OpenResponsesLanguageModel(modelId, {
581
+ provider: `${providerName}.responses`,
582
+ headers: getHeaders,
583
+ url: options.url,
584
+ fetch: options.fetch,
585
+ generateId: () => generateId()
586
+ });
587
+ };
588
+ const createLanguageModel = (modelId) => {
589
+ if (new.target) {
590
+ throw new Error(
591
+ "The OpenAI model function cannot be called with the new keyword."
592
+ );
593
+ }
594
+ return createResponsesModel(modelId);
595
+ };
596
+ const provider = function(modelId) {
597
+ return createLanguageModel(modelId);
598
+ };
599
+ provider.specificationVersion = "v3";
600
+ provider.languageModel = createLanguageModel;
601
+ provider.embeddingModel = (modelId) => {
602
+ throw new NoSuchModelError({ modelId, modelType: "embeddingModel" });
603
+ };
604
+ provider.imageModel = (modelId) => {
605
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
606
+ };
607
+ return provider;
608
+ }
609
+ export {
610
+ VERSION,
611
+ createOpenResponses
612
+ };
613
+ //# sourceMappingURL=index.mjs.map