@ai-sdk/open-responses 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,629 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ VERSION: () => VERSION,
24
+ createOpenResponses: () => createOpenResponses
25
+ });
26
+ module.exports = __toCommonJS(src_exports);
27
+
28
+ // src/version.ts
29
+ var VERSION = true ? "1.0.0" : "0.0.0-test";
30
+
31
+ // src/open-responses-provider.ts
32
+ var import_provider = require("@ai-sdk/provider");
33
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
34
+
35
+ // src/responses/open-responses-language-model.ts
36
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
37
+ var import_v42 = require("zod/v4");
38
+
39
+ // src/responses/convert-to-open-responses-input.ts
40
+ var import_provider_utils = require("@ai-sdk/provider-utils");
41
+ async function convertToOpenResponsesInput({
42
+ prompt
43
+ }) {
44
+ var _a, _b;
45
+ const input = [];
46
+ const warnings = [];
47
+ const systemMessages = [];
48
+ for (const { role, content } of prompt) {
49
+ switch (role) {
50
+ case "system": {
51
+ systemMessages.push(content);
52
+ break;
53
+ }
54
+ case "user": {
55
+ const userContent = [];
56
+ for (const part of content) {
57
+ switch (part.type) {
58
+ case "text": {
59
+ userContent.push({ type: "input_text", text: part.text });
60
+ break;
61
+ }
62
+ case "file": {
63
+ if (!part.mediaType.startsWith("image/")) {
64
+ warnings.push({
65
+ type: "other",
66
+ message: `unsupported file content type: ${part.mediaType}`
67
+ });
68
+ break;
69
+ }
70
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
71
+ userContent.push({
72
+ type: "input_image",
73
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : {
74
+ image_url: `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
75
+ }
76
+ });
77
+ break;
78
+ }
79
+ }
80
+ }
81
+ input.push({ type: "message", role: "user", content: userContent });
82
+ break;
83
+ }
84
+ case "assistant": {
85
+ const assistantContent = [];
86
+ const toolCalls = [];
87
+ for (const part of content) {
88
+ switch (part.type) {
89
+ case "text": {
90
+ assistantContent.push({ type: "output_text", text: part.text });
91
+ break;
92
+ }
93
+ case "tool-call": {
94
+ const argumentsValue = typeof part.input === "string" ? part.input : JSON.stringify(part.input);
95
+ toolCalls.push({
96
+ type: "function_call",
97
+ call_id: part.toolCallId,
98
+ name: part.toolName,
99
+ arguments: argumentsValue
100
+ });
101
+ break;
102
+ }
103
+ }
104
+ }
105
+ if (assistantContent.length > 0) {
106
+ input.push({
107
+ type: "message",
108
+ role: "assistant",
109
+ content: assistantContent
110
+ });
111
+ }
112
+ for (const toolCall of toolCalls) {
113
+ input.push(toolCall);
114
+ }
115
+ break;
116
+ }
117
+ case "tool": {
118
+ for (const part of content) {
119
+ if (part.type === "tool-result") {
120
+ const output = part.output;
121
+ let contentValue;
122
+ switch (output.type) {
123
+ case "text":
124
+ case "error-text":
125
+ contentValue = output.value;
126
+ break;
127
+ case "execution-denied":
128
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
129
+ break;
130
+ case "json":
131
+ case "error-json":
132
+ contentValue = JSON.stringify(output.value);
133
+ break;
134
+ case "content": {
135
+ const contentParts = [];
136
+ for (const item of output.value) {
137
+ switch (item.type) {
138
+ case "text": {
139
+ contentParts.push({
140
+ type: "input_text",
141
+ text: item.text
142
+ });
143
+ break;
144
+ }
145
+ case "image-data": {
146
+ contentParts.push({
147
+ type: "input_image",
148
+ image_url: `data:${item.mediaType};base64,${item.data}`
149
+ });
150
+ break;
151
+ }
152
+ case "image-url": {
153
+ contentParts.push({
154
+ type: "input_image",
155
+ image_url: item.url
156
+ });
157
+ break;
158
+ }
159
+ case "file-data": {
160
+ contentParts.push({
161
+ type: "input_file",
162
+ filename: (_b = item.filename) != null ? _b : "data",
163
+ file_data: `data:${item.mediaType};base64,${item.data}`
164
+ });
165
+ break;
166
+ }
167
+ default: {
168
+ warnings.push({
169
+ type: "other",
170
+ message: `unsupported tool content part type: ${item.type}`
171
+ });
172
+ break;
173
+ }
174
+ }
175
+ }
176
+ contentValue = contentParts;
177
+ break;
178
+ }
179
+ }
180
+ input.push({
181
+ type: "function_call_output",
182
+ call_id: part.toolCallId,
183
+ output: contentValue
184
+ });
185
+ }
186
+ }
187
+ break;
188
+ }
189
+ }
190
+ }
191
+ return {
192
+ input,
193
+ instructions: systemMessages.length > 0 ? systemMessages.join("\n") : void 0,
194
+ warnings
195
+ };
196
+ }
197
+
198
+ // src/responses/open-responses-api.ts
199
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
200
+ var import_v4 = require("zod/v4");
201
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
202
+ var openResponsesErrorSchema = (0, import_provider_utils2.lazySchema)(
203
+ () => (0, import_provider_utils3.zodSchema)(
204
+ import_v4.z.object({
205
+ error: import_v4.z.object({
206
+ message: import_v4.z.string(),
207
+ type: import_v4.z.string(),
208
+ param: import_v4.z.string(),
209
+ code: import_v4.z.string()
210
+ })
211
+ })
212
+ )
213
+ );
214
+
215
+ // src/responses/map-open-responses-finish-reason.ts
216
+ function mapOpenResponsesFinishReason({
217
+ finishReason,
218
+ hasToolCalls
219
+ }) {
220
+ switch (finishReason) {
221
+ case void 0:
222
+ case null:
223
+ return hasToolCalls ? "tool-calls" : "stop";
224
+ case "max_output_tokens":
225
+ return "length";
226
+ case "content_filter":
227
+ return "content-filter";
228
+ default:
229
+ return hasToolCalls ? "tool-calls" : "other";
230
+ }
231
+ }
232
+
233
+ // src/responses/open-responses-language-model.ts
234
+ var OpenResponsesLanguageModel = class {
235
+ constructor(modelId, config) {
236
+ this.specificationVersion = "v3";
237
+ this.supportedUrls = {
238
+ "image/*": [/^https?:\/\/.*$/]
239
+ };
240
+ this.modelId = modelId;
241
+ this.config = config;
242
+ }
243
+ get provider() {
244
+ return this.config.provider;
245
+ }
246
+ async getArgs({
247
+ maxOutputTokens,
248
+ temperature,
249
+ stopSequences,
250
+ topP,
251
+ topK,
252
+ presencePenalty,
253
+ frequencyPenalty,
254
+ seed,
255
+ prompt,
256
+ providerOptions,
257
+ tools,
258
+ toolChoice,
259
+ responseFormat
260
+ }) {
261
+ var _a;
262
+ const warnings = [];
263
+ if (stopSequences != null) {
264
+ warnings.push({ type: "unsupported", feature: "stopSequences" });
265
+ }
266
+ if (topK != null) {
267
+ warnings.push({ type: "unsupported", feature: "topK" });
268
+ }
269
+ if (seed != null) {
270
+ warnings.push({ type: "unsupported", feature: "seed" });
271
+ }
272
+ const {
273
+ input,
274
+ instructions,
275
+ warnings: inputWarnings
276
+ } = await convertToOpenResponsesInput({
277
+ prompt
278
+ });
279
+ warnings.push(...inputWarnings);
280
+ const functionTools = tools == null ? void 0 : tools.filter((tool) => tool.type === "function").map((tool) => ({
281
+ type: "function",
282
+ name: tool.name,
283
+ description: tool.description,
284
+ parameters: tool.inputSchema,
285
+ ...tool.strict != null ? { strict: tool.strict } : {}
286
+ }));
287
+ const convertedToolChoice = toolChoice == null ? void 0 : toolChoice.type === "tool" ? { type: "function", name: toolChoice.toolName } : toolChoice.type;
288
+ const textFormat = (responseFormat == null ? void 0 : responseFormat.type) === "json" ? {
289
+ type: "json_schema",
290
+ ...responseFormat.schema != null ? {
291
+ name: (_a = responseFormat.name) != null ? _a : "response",
292
+ description: responseFormat.description,
293
+ schema: responseFormat.schema,
294
+ strict: true
295
+ } : {}
296
+ } : void 0;
297
+ return {
298
+ body: {
299
+ model: this.modelId,
300
+ input,
301
+ instructions,
302
+ max_output_tokens: maxOutputTokens,
303
+ temperature,
304
+ top_p: topP,
305
+ presence_penalty: presencePenalty,
306
+ frequency_penalty: frequencyPenalty,
307
+ tools: (functionTools == null ? void 0 : functionTools.length) ? functionTools : void 0,
308
+ tool_choice: convertedToolChoice,
309
+ ...textFormat != null && { text: { format: textFormat } }
310
+ },
311
+ warnings
312
+ };
313
+ }
314
+ async doGenerate(options) {
315
+ var _a, _b, _c, _d, _e, _f;
316
+ const { body, warnings } = await this.getArgs(options);
317
+ const {
318
+ responseHeaders,
319
+ value: response,
320
+ rawValue: rawResponse
321
+ } = await (0, import_provider_utils4.postJsonToApi)({
322
+ url: this.config.url,
323
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
324
+ body,
325
+ failedResponseHandler: (0, import_provider_utils4.createJsonErrorResponseHandler)({
326
+ errorSchema: openResponsesErrorSchema,
327
+ errorToMessage: (error) => error.error.message
328
+ }),
329
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
330
+ // do not validate the response body, only apply types to the response body
331
+ (0, import_provider_utils4.jsonSchema)(() => {
332
+ throw new Error("json schema not implemented");
333
+ })
334
+ ),
335
+ abortSignal: options.abortSignal,
336
+ fetch: this.config.fetch
337
+ });
338
+ const content = [];
339
+ let hasToolCalls = false;
340
+ for (const part of response.output) {
341
+ switch (part.type) {
342
+ // TODO AI SDK 7 adjust reasoning in the specification to better support the reasoning structure from open responses.
343
+ case "reasoning": {
344
+ for (const contentPart of (_a = part.content) != null ? _a : []) {
345
+ content.push({
346
+ type: "reasoning",
347
+ text: contentPart.text
348
+ });
349
+ }
350
+ break;
351
+ }
352
+ case "message": {
353
+ for (const contentPart of part.content) {
354
+ content.push({
355
+ type: "text",
356
+ text: contentPart.text
357
+ });
358
+ }
359
+ break;
360
+ }
361
+ case "function_call": {
362
+ hasToolCalls = true;
363
+ content.push({
364
+ type: "tool-call",
365
+ toolCallId: part.call_id,
366
+ toolName: part.name,
367
+ input: part.arguments
368
+ });
369
+ break;
370
+ }
371
+ }
372
+ }
373
+ const usage = response.usage;
374
+ const inputTokens = usage == null ? void 0 : usage.input_tokens;
375
+ const cachedInputTokens = (_b = usage == null ? void 0 : usage.input_tokens_details) == null ? void 0 : _b.cached_tokens;
376
+ const outputTokens = usage == null ? void 0 : usage.output_tokens;
377
+ const reasoningTokens = (_c = usage == null ? void 0 : usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens;
378
+ return {
379
+ content,
380
+ finishReason: {
381
+ unified: mapOpenResponsesFinishReason({
382
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
383
+ hasToolCalls
384
+ }),
385
+ raw: (_f = (_e = response.incomplete_details) == null ? void 0 : _e.reason) != null ? _f : void 0
386
+ },
387
+ usage: {
388
+ inputTokens: {
389
+ total: inputTokens,
390
+ noCache: (inputTokens != null ? inputTokens : 0) - (cachedInputTokens != null ? cachedInputTokens : 0),
391
+ cacheRead: cachedInputTokens,
392
+ cacheWrite: void 0
393
+ },
394
+ outputTokens: {
395
+ total: outputTokens,
396
+ text: (outputTokens != null ? outputTokens : 0) - (reasoningTokens != null ? reasoningTokens : 0),
397
+ reasoning: reasoningTokens
398
+ },
399
+ raw: response.usage
400
+ },
401
+ request: { body },
402
+ response: {
403
+ id: response.id,
404
+ timestamp: new Date(response.created_at * 1e3),
405
+ modelId: response.model,
406
+ headers: responseHeaders,
407
+ body: rawResponse
408
+ },
409
+ providerMetadata: void 0,
410
+ warnings
411
+ };
412
+ }
413
+ async doStream(options) {
414
+ const { body, warnings } = await this.getArgs(options);
415
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
416
+ url: this.config.url,
417
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
418
+ body: {
419
+ ...body,
420
+ stream: true
421
+ },
422
+ failedResponseHandler: (0, import_provider_utils4.createJsonErrorResponseHandler)({
423
+ errorSchema: openResponsesErrorSchema,
424
+ errorToMessage: (error) => error.error.message
425
+ }),
426
+ // TODO consider validation
427
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(import_v42.z.any()),
428
+ abortSignal: options.abortSignal,
429
+ fetch: this.config.fetch
430
+ });
431
+ const usage = {
432
+ inputTokens: {
433
+ total: void 0,
434
+ noCache: void 0,
435
+ cacheRead: void 0,
436
+ cacheWrite: void 0
437
+ },
438
+ outputTokens: {
439
+ total: void 0,
440
+ text: void 0,
441
+ reasoning: void 0
442
+ }
443
+ };
444
+ const updateUsage = (responseUsage) => {
445
+ var _a, _b;
446
+ if (!responseUsage) {
447
+ return;
448
+ }
449
+ const inputTokens = responseUsage.input_tokens;
450
+ const cachedInputTokens = (_a = responseUsage.input_tokens_details) == null ? void 0 : _a.cached_tokens;
451
+ const outputTokens = responseUsage.output_tokens;
452
+ const reasoningTokens = (_b = responseUsage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens;
453
+ usage.inputTokens = {
454
+ total: inputTokens,
455
+ noCache: (inputTokens != null ? inputTokens : 0) - (cachedInputTokens != null ? cachedInputTokens : 0),
456
+ cacheRead: cachedInputTokens,
457
+ cacheWrite: void 0
458
+ };
459
+ usage.outputTokens = {
460
+ total: outputTokens,
461
+ text: (outputTokens != null ? outputTokens : 0) - (reasoningTokens != null ? reasoningTokens : 0),
462
+ reasoning: reasoningTokens
463
+ };
464
+ usage.raw = responseUsage;
465
+ };
466
+ let isActiveReasoning = false;
467
+ let hasToolCalls = false;
468
+ let finishReason = {
469
+ unified: "other",
470
+ raw: void 0
471
+ };
472
+ const toolCallsByItemId = {};
473
+ return {
474
+ stream: response.pipeThrough(
475
+ new TransformStream({
476
+ start(controller) {
477
+ controller.enqueue({ type: "stream-start", warnings });
478
+ },
479
+ transform(parseResult, controller) {
480
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
481
+ if (options.includeRawChunks) {
482
+ controller.enqueue({
483
+ type: "raw",
484
+ rawValue: parseResult.rawValue
485
+ });
486
+ }
487
+ if (!parseResult.success) {
488
+ controller.enqueue({ type: "error", error: parseResult.error });
489
+ return;
490
+ }
491
+ const chunk = parseResult.value;
492
+ if (chunk.type === "response.output_item.added" && chunk.item.type === "function_call") {
493
+ toolCallsByItemId[chunk.item.id] = {
494
+ toolName: chunk.item.name,
495
+ toolCallId: chunk.item.call_id,
496
+ arguments: chunk.item.arguments
497
+ };
498
+ } else if (chunk.type === "response.function_call_arguments.delta") {
499
+ const functionCallChunk = chunk;
500
+ const toolCall = (_a = toolCallsByItemId[functionCallChunk.item_id]) != null ? _a : toolCallsByItemId[functionCallChunk.item_id] = {};
501
+ toolCall.arguments = ((_b = toolCall.arguments) != null ? _b : "") + functionCallChunk.delta;
502
+ } else if (chunk.type === "response.function_call_arguments.done") {
503
+ const functionCallChunk = chunk;
504
+ const toolCall = (_c = toolCallsByItemId[functionCallChunk.item_id]) != null ? _c : toolCallsByItemId[functionCallChunk.item_id] = {};
505
+ toolCall.arguments = functionCallChunk.arguments;
506
+ } else if (chunk.type === "response.output_item.done" && chunk.item.type === "function_call") {
507
+ const toolCall = toolCallsByItemId[chunk.item.id];
508
+ const toolName = (_d = toolCall == null ? void 0 : toolCall.toolName) != null ? _d : chunk.item.name;
509
+ const toolCallId = (_e = toolCall == null ? void 0 : toolCall.toolCallId) != null ? _e : chunk.item.call_id;
510
+ const input = (_g = (_f = toolCall == null ? void 0 : toolCall.arguments) != null ? _f : chunk.item.arguments) != null ? _g : "";
511
+ controller.enqueue({
512
+ type: "tool-call",
513
+ toolCallId,
514
+ toolName,
515
+ input
516
+ });
517
+ hasToolCalls = true;
518
+ delete toolCallsByItemId[chunk.item.id];
519
+ } else if (chunk.type === "response.output_item.added" && chunk.item.type === "reasoning") {
520
+ controller.enqueue({
521
+ type: "reasoning-start",
522
+ id: chunk.item.id
523
+ });
524
+ isActiveReasoning = true;
525
+ } else if (chunk.type === "response.reasoning_text.delta") {
526
+ const reasoningChunk = chunk;
527
+ controller.enqueue({
528
+ type: "reasoning-delta",
529
+ id: reasoningChunk.item_id,
530
+ delta: reasoningChunk.delta
531
+ });
532
+ } else if (chunk.type === "response.output_item.done" && chunk.item.type === "reasoning") {
533
+ controller.enqueue({ type: "reasoning-end", id: chunk.item.id });
534
+ isActiveReasoning = false;
535
+ } else if (chunk.type === "response.output_item.added" && chunk.item.type === "message") {
536
+ controller.enqueue({ type: "text-start", id: chunk.item.id });
537
+ } else if (chunk.type === "response.output_text.delta") {
538
+ controller.enqueue({
539
+ type: "text-delta",
540
+ id: chunk.item_id,
541
+ delta: chunk.delta
542
+ });
543
+ } else if (chunk.type === "response.output_item.done" && chunk.item.type === "message") {
544
+ controller.enqueue({ type: "text-end", id: chunk.item.id });
545
+ } else if (chunk.type === "response.completed" || chunk.type === "response.incomplete") {
546
+ const reason = (_h = chunk.response.incomplete_details) == null ? void 0 : _h.reason;
547
+ finishReason = {
548
+ unified: mapOpenResponsesFinishReason({
549
+ finishReason: reason,
550
+ hasToolCalls
551
+ }),
552
+ raw: reason != null ? reason : void 0
553
+ };
554
+ updateUsage(chunk.response.usage);
555
+ } else if (chunk.type === "response.failed") {
556
+ finishReason = {
557
+ unified: "error",
558
+ raw: (_j = (_i = chunk.response.error) == null ? void 0 : _i.code) != null ? _j : chunk.response.status
559
+ };
560
+ updateUsage(chunk.response.usage);
561
+ }
562
+ },
563
+ flush(controller) {
564
+ if (isActiveReasoning) {
565
+ controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
566
+ }
567
+ controller.enqueue({
568
+ type: "finish",
569
+ finishReason,
570
+ usage,
571
+ providerMetadata: void 0
572
+ });
573
+ }
574
+ })
575
+ ),
576
+ request: { body },
577
+ response: { headers: responseHeaders }
578
+ };
579
+ }
580
+ };
581
+
582
+ // src/open-responses-provider.ts
583
+ function createOpenResponses(options) {
584
+ const providerName = options.name;
585
+ const getHeaders = () => (0, import_provider_utils5.withUserAgentSuffix)(
586
+ {
587
+ ...options.apiKey ? {
588
+ Authorization: `Bearer ${options.apiKey}`
589
+ } : {},
590
+ ...options.headers
591
+ },
592
+ `ai-sdk/open-responses/${VERSION}`
593
+ );
594
+ const createResponsesModel = (modelId) => {
595
+ return new OpenResponsesLanguageModel(modelId, {
596
+ provider: `${providerName}.responses`,
597
+ headers: getHeaders,
598
+ url: options.url,
599
+ fetch: options.fetch,
600
+ generateId: () => (0, import_provider_utils5.generateId)()
601
+ });
602
+ };
603
+ const createLanguageModel = (modelId) => {
604
+ if (new.target) {
605
+ throw new Error(
606
+ "The OpenAI model function cannot be called with the new keyword."
607
+ );
608
+ }
609
+ return createResponsesModel(modelId);
610
+ };
611
+ const provider = function(modelId) {
612
+ return createLanguageModel(modelId);
613
+ };
614
+ provider.specificationVersion = "v3";
615
+ provider.languageModel = createLanguageModel;
616
+ provider.embeddingModel = (modelId) => {
617
+ throw new import_provider.NoSuchModelError({ modelId, modelType: "embeddingModel" });
618
+ };
619
+ provider.imageModel = (modelId) => {
620
+ throw new import_provider.NoSuchModelError({ modelId, modelType: "imageModel" });
621
+ };
622
+ return provider;
623
+ }
624
+ // Annotate the CommonJS export names for ESM import in node:
625
+ 0 && (module.exports = {
626
+ VERSION,
627
+ createOpenResponses
628
+ });
629
+ //# sourceMappingURL=index.js.map