@ai-sdk/open-responses 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,9 @@
1
+ import { FetchFunction } from '@ai-sdk/provider-utils';
2
+
3
+ export type OpenResponsesConfig = {
4
+ provider: string;
5
+ url: string;
6
+ headers: () => Record<string, string | undefined>;
7
+ fetch?: FetchFunction;
8
+ generateId: () => string;
9
+ };
@@ -0,0 +1,500 @@
1
+ import {
2
+ LanguageModelV3,
3
+ LanguageModelV3CallOptions,
4
+ LanguageModelV3Content,
5
+ LanguageModelV3FinishReason,
6
+ LanguageModelV3GenerateResult,
7
+ LanguageModelV3StreamPart,
8
+ LanguageModelV3StreamResult,
9
+ LanguageModelV3Usage,
10
+ SharedV3Warning,
11
+ } from '@ai-sdk/provider';
12
+ import {
13
+ combineHeaders,
14
+ createEventSourceResponseHandler,
15
+ createJsonErrorResponseHandler,
16
+ createJsonResponseHandler,
17
+ jsonSchema,
18
+ ParseResult,
19
+ postJsonToApi,
20
+ } from '@ai-sdk/provider-utils';
21
+ import { z } from 'zod/v4';
22
+ import { convertToOpenResponsesInput } from './convert-to-open-responses-input';
23
+ import {
24
+ FunctionToolParam,
25
+ OpenResponsesRequestBody,
26
+ OpenResponsesResponseBody,
27
+ OpenResponsesChunk,
28
+ openResponsesErrorSchema,
29
+ ToolChoiceParam,
30
+ } from './open-responses-api';
31
+ import { mapOpenResponsesFinishReason } from './map-open-responses-finish-reason';
32
+ import { OpenResponsesConfig } from './open-responses-config';
33
+
34
+ export class OpenResponsesLanguageModel implements LanguageModelV3 {
35
+ readonly specificationVersion = 'v3';
36
+
37
+ readonly modelId: string;
38
+
39
+ private readonly config: OpenResponsesConfig;
40
+
41
+ constructor(modelId: string, config: OpenResponsesConfig) {
42
+ this.modelId = modelId;
43
+ this.config = config;
44
+ }
45
+
46
+ readonly supportedUrls: Record<string, RegExp[]> = {
47
+ 'image/*': [/^https?:\/\/.*$/],
48
+ };
49
+
50
+ get provider(): string {
51
+ return this.config.provider;
52
+ }
53
+
54
+ private async getArgs({
55
+ maxOutputTokens,
56
+ temperature,
57
+ stopSequences,
58
+ topP,
59
+ topK,
60
+ presencePenalty,
61
+ frequencyPenalty,
62
+ seed,
63
+ prompt,
64
+ providerOptions,
65
+ tools,
66
+ toolChoice,
67
+ responseFormat,
68
+ }: LanguageModelV3CallOptions): Promise<{
69
+ body: Omit<OpenResponsesRequestBody, 'stream' | 'stream_options'>;
70
+ warnings: SharedV3Warning[];
71
+ }> {
72
+ const warnings: SharedV3Warning[] = [];
73
+
74
+ if (stopSequences != null) {
75
+ warnings.push({ type: 'unsupported', feature: 'stopSequences' });
76
+ }
77
+
78
+ if (topK != null) {
79
+ warnings.push({ type: 'unsupported', feature: 'topK' });
80
+ }
81
+
82
+ if (seed != null) {
83
+ warnings.push({ type: 'unsupported', feature: 'seed' });
84
+ }
85
+
86
+ const {
87
+ input,
88
+ instructions,
89
+ warnings: inputWarnings,
90
+ } = await convertToOpenResponsesInput({
91
+ prompt,
92
+ });
93
+
94
+ warnings.push(...inputWarnings);
95
+
96
+ // Convert function tools to the Open Responses format
97
+ const functionTools: FunctionToolParam[] | undefined = tools
98
+ ?.filter(tool => tool.type === 'function')
99
+ .map(tool => ({
100
+ type: 'function' as const,
101
+ name: tool.name,
102
+ description: tool.description,
103
+ parameters: tool.inputSchema,
104
+ ...(tool.strict != null ? { strict: tool.strict } : {}),
105
+ }));
106
+
107
+ // Convert tool choice to the Open Responses format
108
+ const convertedToolChoice: ToolChoiceParam | undefined =
109
+ toolChoice == null
110
+ ? undefined
111
+ : toolChoice.type === 'tool'
112
+ ? { type: 'function', name: toolChoice.toolName }
113
+ : toolChoice.type; // 'auto' | 'none' | 'required'
114
+
115
+ const textFormat =
116
+ responseFormat?.type === 'json'
117
+ ? {
118
+ type: 'json_schema' as const,
119
+ ...(responseFormat.schema != null
120
+ ? {
121
+ name: responseFormat.name ?? 'response',
122
+ description: responseFormat.description,
123
+ schema: responseFormat.schema,
124
+ strict: true,
125
+ }
126
+ : {}),
127
+ }
128
+ : undefined;
129
+
130
+ return {
131
+ body: {
132
+ model: this.modelId,
133
+ input,
134
+ instructions,
135
+ max_output_tokens: maxOutputTokens,
136
+ temperature,
137
+ top_p: topP,
138
+ presence_penalty: presencePenalty,
139
+ frequency_penalty: frequencyPenalty,
140
+ tools: functionTools?.length ? functionTools : undefined,
141
+ tool_choice: convertedToolChoice,
142
+ ...(textFormat != null && { text: { format: textFormat } }),
143
+ },
144
+ warnings,
145
+ };
146
+ }
147
+
148
+ async doGenerate(
149
+ options: LanguageModelV3CallOptions,
150
+ ): Promise<LanguageModelV3GenerateResult> {
151
+ const { body, warnings } = await this.getArgs(options);
152
+
153
+ const {
154
+ responseHeaders,
155
+ value: response,
156
+ rawValue: rawResponse,
157
+ } = await postJsonToApi({
158
+ url: this.config.url,
159
+ headers: combineHeaders(this.config.headers(), options.headers),
160
+ body,
161
+ failedResponseHandler: createJsonErrorResponseHandler({
162
+ errorSchema: openResponsesErrorSchema,
163
+ errorToMessage: error => error.error.message,
164
+ }),
165
+ successfulResponseHandler: createJsonResponseHandler(
166
+ // do not validate the response body, only apply types to the response body
167
+ jsonSchema<OpenResponsesResponseBody>(() => {
168
+ throw new Error('json schema not implemented');
169
+ }),
170
+ ),
171
+ abortSignal: options.abortSignal,
172
+ fetch: this.config.fetch,
173
+ });
174
+
175
+ const content: Array<LanguageModelV3Content> = [];
176
+ let hasToolCalls = false;
177
+
178
+ for (const part of response.output!) {
179
+ switch (part.type) {
180
+ // TODO AI SDK 7 adjust reasoning in the specification to better support the reasoning structure from open responses.
181
+ case 'reasoning': {
182
+ for (const contentPart of part.content ?? []) {
183
+ content.push({
184
+ type: 'reasoning',
185
+ text: contentPart.text,
186
+ });
187
+ }
188
+ break;
189
+ }
190
+
191
+ case 'message': {
192
+ for (const contentPart of part.content) {
193
+ content.push({
194
+ type: 'text',
195
+ text: contentPart.text,
196
+ });
197
+ }
198
+
199
+ break;
200
+ }
201
+
202
+ case 'function_call': {
203
+ hasToolCalls = true;
204
+ content.push({
205
+ type: 'tool-call',
206
+ toolCallId: part.call_id,
207
+ toolName: part.name,
208
+ input: part.arguments,
209
+ });
210
+ break;
211
+ }
212
+ }
213
+ }
214
+
215
+ const usage = response.usage;
216
+ const inputTokens = usage?.input_tokens;
217
+ const cachedInputTokens = usage?.input_tokens_details?.cached_tokens;
218
+ const outputTokens = usage?.output_tokens;
219
+ const reasoningTokens = usage?.output_tokens_details?.reasoning_tokens;
220
+
221
+ return {
222
+ content,
223
+ finishReason: {
224
+ unified: mapOpenResponsesFinishReason({
225
+ finishReason: response.incomplete_details?.reason,
226
+ hasToolCalls,
227
+ }),
228
+ raw: response.incomplete_details?.reason ?? undefined,
229
+ },
230
+ usage: {
231
+ inputTokens: {
232
+ total: inputTokens,
233
+ noCache: (inputTokens ?? 0) - (cachedInputTokens ?? 0),
234
+ cacheRead: cachedInputTokens,
235
+ cacheWrite: undefined,
236
+ },
237
+ outputTokens: {
238
+ total: outputTokens,
239
+ text: (outputTokens ?? 0) - (reasoningTokens ?? 0),
240
+ reasoning: reasoningTokens,
241
+ },
242
+ raw: response.usage,
243
+ },
244
+ request: { body },
245
+ response: {
246
+ id: response.id,
247
+ timestamp: new Date(response.created_at! * 1000),
248
+ modelId: response.model,
249
+ headers: responseHeaders,
250
+ body: rawResponse,
251
+ },
252
+ providerMetadata: undefined,
253
+ warnings,
254
+ };
255
+ }
256
+
257
+ async doStream(
258
+ options: LanguageModelV3CallOptions,
259
+ ): Promise<LanguageModelV3StreamResult> {
260
+ const { body, warnings } = await this.getArgs(options);
261
+
262
+ const { responseHeaders, value: response } = await postJsonToApi({
263
+ url: this.config.url,
264
+ headers: combineHeaders(this.config.headers(), options.headers),
265
+ body: {
266
+ ...body,
267
+ stream: true,
268
+ } satisfies OpenResponsesRequestBody,
269
+ failedResponseHandler: createJsonErrorResponseHandler({
270
+ errorSchema: openResponsesErrorSchema,
271
+ errorToMessage: error => error.error.message,
272
+ }),
273
+ // TODO consider validation
274
+ successfulResponseHandler: createEventSourceResponseHandler(z.any()),
275
+ abortSignal: options.abortSignal,
276
+ fetch: this.config.fetch,
277
+ });
278
+
279
+ const usage: LanguageModelV3Usage = {
280
+ inputTokens: {
281
+ total: undefined,
282
+ noCache: undefined,
283
+ cacheRead: undefined,
284
+ cacheWrite: undefined,
285
+ },
286
+ outputTokens: {
287
+ total: undefined,
288
+ text: undefined,
289
+ reasoning: undefined,
290
+ },
291
+ };
292
+
293
+ const updateUsage = (
294
+ responseUsage?: OpenResponsesResponseBody['usage'],
295
+ ) => {
296
+ if (!responseUsage) {
297
+ return;
298
+ }
299
+
300
+ const inputTokens = responseUsage.input_tokens;
301
+ const cachedInputTokens =
302
+ responseUsage.input_tokens_details?.cached_tokens;
303
+ const outputTokens = responseUsage.output_tokens;
304
+ const reasoningTokens =
305
+ responseUsage.output_tokens_details?.reasoning_tokens;
306
+
307
+ usage.inputTokens = {
308
+ total: inputTokens,
309
+ noCache: (inputTokens ?? 0) - (cachedInputTokens ?? 0),
310
+ cacheRead: cachedInputTokens,
311
+ cacheWrite: undefined,
312
+ };
313
+ usage.outputTokens = {
314
+ total: outputTokens,
315
+ text: (outputTokens ?? 0) - (reasoningTokens ?? 0),
316
+ reasoning: reasoningTokens,
317
+ };
318
+ usage.raw = responseUsage;
319
+ };
320
+
321
+ let isActiveReasoning = false;
322
+ let hasToolCalls = false;
323
+ let finishReason: LanguageModelV3FinishReason = {
324
+ unified: 'other',
325
+ raw: undefined,
326
+ };
327
+ const toolCallsByItemId: Record<
328
+ string,
329
+ { toolName?: string; toolCallId?: string; arguments?: string }
330
+ > = {};
331
+
332
+ return {
333
+ stream: response.pipeThrough(
334
+ new TransformStream<
335
+ ParseResult<OpenResponsesChunk>,
336
+ LanguageModelV3StreamPart
337
+ >({
338
+ start(controller) {
339
+ controller.enqueue({ type: 'stream-start', warnings });
340
+ },
341
+
342
+ transform(parseResult, controller) {
343
+ if (options.includeRawChunks) {
344
+ controller.enqueue({
345
+ type: 'raw',
346
+ rawValue: parseResult.rawValue,
347
+ });
348
+ }
349
+
350
+ if (!parseResult.success) {
351
+ controller.enqueue({ type: 'error', error: parseResult.error });
352
+ return;
353
+ }
354
+
355
+ const chunk = parseResult.value;
356
+
357
+ // Tool call events (single-shot tool-call when complete)
358
+ if (
359
+ chunk.type === 'response.output_item.added' &&
360
+ chunk.item.type === 'function_call'
361
+ ) {
362
+ toolCallsByItemId[chunk.item.id] = {
363
+ toolName: chunk.item.name,
364
+ toolCallId: chunk.item.call_id,
365
+ arguments: chunk.item.arguments,
366
+ };
367
+ } else if (
368
+ (chunk as { type: string }).type ===
369
+ 'response.function_call_arguments.delta'
370
+ ) {
371
+ const functionCallChunk = chunk as {
372
+ item_id: string;
373
+ delta: string;
374
+ };
375
+ const toolCall =
376
+ toolCallsByItemId[functionCallChunk.item_id] ??
377
+ (toolCallsByItemId[functionCallChunk.item_id] = {});
378
+ toolCall.arguments =
379
+ (toolCall.arguments ?? '') + functionCallChunk.delta;
380
+ } else if (
381
+ (chunk as { type: string }).type ===
382
+ 'response.function_call_arguments.done'
383
+ ) {
384
+ const functionCallChunk = chunk as {
385
+ item_id: string;
386
+ arguments: string;
387
+ };
388
+ const toolCall =
389
+ toolCallsByItemId[functionCallChunk.item_id] ??
390
+ (toolCallsByItemId[functionCallChunk.item_id] = {});
391
+ toolCall.arguments = functionCallChunk.arguments;
392
+ } else if (
393
+ chunk.type === 'response.output_item.done' &&
394
+ chunk.item.type === 'function_call'
395
+ ) {
396
+ const toolCall = toolCallsByItemId[chunk.item.id];
397
+ const toolName = toolCall?.toolName ?? chunk.item.name;
398
+ const toolCallId = toolCall?.toolCallId ?? chunk.item.call_id;
399
+ const input = toolCall?.arguments ?? chunk.item.arguments ?? '';
400
+
401
+ controller.enqueue({
402
+ type: 'tool-call',
403
+ toolCallId,
404
+ toolName,
405
+ input,
406
+ });
407
+ hasToolCalls = true;
408
+
409
+ delete toolCallsByItemId[chunk.item.id];
410
+ }
411
+
412
+ // Reasoning events (note: response.reasoning_text.delta is an LM Studio extension, not in official spec)
413
+ else if (
414
+ chunk.type === 'response.output_item.added' &&
415
+ chunk.item.type === 'reasoning'
416
+ ) {
417
+ controller.enqueue({
418
+ type: 'reasoning-start',
419
+ id: chunk.item.id,
420
+ });
421
+ isActiveReasoning = true;
422
+ } else if (
423
+ (chunk as { type: string }).type ===
424
+ 'response.reasoning_text.delta'
425
+ ) {
426
+ const reasoningChunk = chunk as {
427
+ item_id: string;
428
+ delta: string;
429
+ };
430
+ controller.enqueue({
431
+ type: 'reasoning-delta',
432
+ id: reasoningChunk.item_id,
433
+ delta: reasoningChunk.delta,
434
+ });
435
+ } else if (
436
+ chunk.type === 'response.output_item.done' &&
437
+ chunk.item.type === 'reasoning'
438
+ ) {
439
+ controller.enqueue({ type: 'reasoning-end', id: chunk.item.id });
440
+ isActiveReasoning = false;
441
+ }
442
+
443
+ // Text events
444
+ else if (
445
+ chunk.type === 'response.output_item.added' &&
446
+ chunk.item.type === 'message'
447
+ ) {
448
+ controller.enqueue({ type: 'text-start', id: chunk.item.id });
449
+ } else if (chunk.type === 'response.output_text.delta') {
450
+ controller.enqueue({
451
+ type: 'text-delta',
452
+ id: chunk.item_id,
453
+ delta: chunk.delta,
454
+ });
455
+ } else if (
456
+ chunk.type === 'response.output_item.done' &&
457
+ chunk.item.type === 'message'
458
+ ) {
459
+ controller.enqueue({ type: 'text-end', id: chunk.item.id });
460
+ } else if (
461
+ chunk.type === 'response.completed' ||
462
+ chunk.type === 'response.incomplete'
463
+ ) {
464
+ const reason = chunk.response.incomplete_details?.reason;
465
+ finishReason = {
466
+ unified: mapOpenResponsesFinishReason({
467
+ finishReason: reason,
468
+ hasToolCalls,
469
+ }),
470
+ raw: reason ?? undefined,
471
+ };
472
+ updateUsage(chunk.response.usage);
473
+ } else if (chunk.type === 'response.failed') {
474
+ finishReason = {
475
+ unified: 'error',
476
+ raw: chunk.response.error?.code ?? chunk.response.status,
477
+ };
478
+ updateUsage(chunk.response.usage);
479
+ }
480
+ },
481
+
482
+ flush(controller) {
483
+ if (isActiveReasoning) {
484
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-0' });
485
+ }
486
+
487
+ controller.enqueue({
488
+ type: 'finish',
489
+ finishReason,
490
+ usage,
491
+ providerMetadata: undefined,
492
+ });
493
+ },
494
+ }),
495
+ ),
496
+ request: { body },
497
+ response: { headers: responseHeaders },
498
+ };
499
+ }
500
+ }
package/src/version.ts ADDED
@@ -0,0 +1,6 @@
1
+ // Version string of this package injected at build time.
2
+ declare const __PACKAGE_VERSION__: string | undefined;
3
+ export const VERSION: string =
4
+ typeof __PACKAGE_VERSION__ !== 'undefined'
5
+ ? __PACKAGE_VERSION__
6
+ : '0.0.0-test';