ai 3.0.21 → 3.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/index.d.mts +42 -1
  2. package/dist/index.d.ts +42 -1
  3. package/dist/index.js +104 -177
  4. package/dist/index.js.map +1 -1
  5. package/dist/index.mjs +65 -138
  6. package/dist/index.mjs.map +1 -1
  7. package/package.json +4 -33
  8. package/react/dist/index.d.mts +6 -2
  9. package/react/dist/index.d.ts +6 -2
  10. package/react/dist/index.js +107 -24
  11. package/react/dist/index.js.map +1 -1
  12. package/react/dist/index.mjs +107 -24
  13. package/react/dist/index.mjs.map +1 -1
  14. package/rsc/dist/rsc-server.mjs +3 -3
  15. package/rsc/dist/rsc-server.mjs.map +1 -1
  16. package/solid/dist/index.d.mts +6 -2
  17. package/solid/dist/index.d.ts +6 -2
  18. package/solid/dist/index.js +105 -23
  19. package/solid/dist/index.js.map +1 -1
  20. package/solid/dist/index.mjs +105 -23
  21. package/solid/dist/index.mjs.map +1 -1
  22. package/svelte/dist/index.d.mts +6 -2
  23. package/svelte/dist/index.d.ts +6 -2
  24. package/svelte/dist/index.js +107 -24
  25. package/svelte/dist/index.js.map +1 -1
  26. package/svelte/dist/index.mjs +107 -24
  27. package/svelte/dist/index.mjs.map +1 -1
  28. package/vue/dist/index.d.mts +6 -2
  29. package/vue/dist/index.d.ts +6 -2
  30. package/vue/dist/index.js +105 -23
  31. package/vue/dist/index.js.map +1 -1
  32. package/vue/dist/index.mjs +105 -23
  33. package/vue/dist/index.mjs.map +1 -1
  34. package/anthropic/dist/index.d.mts +0 -51
  35. package/anthropic/dist/index.d.ts +0 -51
  36. package/anthropic/dist/index.js +0 -792
  37. package/anthropic/dist/index.js.map +0 -1
  38. package/anthropic/dist/index.mjs +0 -760
  39. package/anthropic/dist/index.mjs.map +0 -1
  40. package/google/dist/index.d.mts +0 -47
  41. package/google/dist/index.d.ts +0 -47
  42. package/google/dist/index.js +0 -796
  43. package/google/dist/index.js.map +0 -1
  44. package/google/dist/index.mjs +0 -764
  45. package/google/dist/index.mjs.map +0 -1
  46. package/mistral/dist/index.d.mts +0 -52
  47. package/mistral/dist/index.d.ts +0 -52
  48. package/mistral/dist/index.js +0 -763
  49. package/mistral/dist/index.js.map +0 -1
  50. package/mistral/dist/index.mjs +0 -731
  51. package/mistral/dist/index.mjs.map +0 -1
  52. package/openai/dist/index.d.mts +0 -116
  53. package/openai/dist/index.d.ts +0 -116
  54. package/openai/dist/index.js +0 -1143
  55. package/openai/dist/index.js.map +0 -1
  56. package/openai/dist/index.mjs +0 -1115
  57. package/openai/dist/index.mjs.map +0 -1
@@ -1,1115 +0,0 @@
1
- // spec/util/generate-id.ts
2
- import { customAlphabet } from "nanoid/non-secure";
3
- var generateId = customAlphabet(
4
- "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
5
- 7
6
- );
7
-
8
- // spec/util/load-api-key.ts
9
- import { LoadAPIKeyError } from "@ai-sdk/provider";
10
- function loadApiKey({
11
- apiKey,
12
- environmentVariableName,
13
- apiKeyParameterName = "apiKey",
14
- description
15
- }) {
16
- if (typeof apiKey === "string") {
17
- return apiKey;
18
- }
19
- if (apiKey != null) {
20
- throw new LoadAPIKeyError({
21
- message: `${description} API key must be a string.`
22
- });
23
- }
24
- if (typeof process === "undefined") {
25
- throw new LoadAPIKeyError({
26
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
27
- });
28
- }
29
- apiKey = process.env[environmentVariableName];
30
- if (apiKey == null) {
31
- throw new LoadAPIKeyError({
32
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
33
- });
34
- }
35
- if (typeof apiKey !== "string") {
36
- throw new LoadAPIKeyError({
37
- message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`
38
- });
39
- }
40
- return apiKey;
41
- }
42
-
43
- // spec/util/parse-json.ts
44
- import { JSONParseError, TypeValidationError as TypeValidationError2 } from "@ai-sdk/provider";
45
- import SecureJSON from "secure-json-parse";
46
-
47
- // spec/util/validate-types.ts
48
- import { TypeValidationError } from "@ai-sdk/provider";
49
- function validateTypes({
50
- value,
51
- schema
52
- }) {
53
- try {
54
- return schema.parse(value);
55
- } catch (error) {
56
- throw new TypeValidationError({ value, cause: error });
57
- }
58
- }
59
- function safeValidateTypes({
60
- value,
61
- schema
62
- }) {
63
- try {
64
- const validationResult = schema.safeParse(value);
65
- if (validationResult.success) {
66
- return {
67
- success: true,
68
- value: validationResult.data
69
- };
70
- }
71
- return {
72
- success: false,
73
- error: new TypeValidationError({
74
- value,
75
- cause: validationResult.error
76
- })
77
- };
78
- } catch (error) {
79
- return {
80
- success: false,
81
- error: TypeValidationError.isTypeValidationError(error) ? error : new TypeValidationError({ value, cause: error })
82
- };
83
- }
84
- }
85
-
86
- // spec/util/parse-json.ts
87
- function parseJSON({
88
- text,
89
- schema
90
- }) {
91
- try {
92
- const value = SecureJSON.parse(text);
93
- if (schema == null) {
94
- return value;
95
- }
96
- return validateTypes({ value, schema });
97
- } catch (error) {
98
- if (JSONParseError.isJSONParseError(error) || TypeValidationError2.isTypeValidationError(error)) {
99
- throw error;
100
- }
101
- throw new JSONParseError({ text, cause: error });
102
- }
103
- }
104
- function safeParseJSON({
105
- text,
106
- schema
107
- }) {
108
- try {
109
- const value = SecureJSON.parse(text);
110
- if (schema == null) {
111
- return {
112
- success: true,
113
- value
114
- };
115
- }
116
- return safeValidateTypes({ value, schema });
117
- } catch (error) {
118
- return {
119
- success: false,
120
- error: JSONParseError.isJSONParseError(error) ? error : new JSONParseError({ text, cause: error })
121
- };
122
- }
123
- }
124
- function isParseableJson(input) {
125
- try {
126
- SecureJSON.parse(input);
127
- return true;
128
- } catch (e) {
129
- return false;
130
- }
131
- }
132
-
133
- // spec/util/post-to-api.ts
134
- import { APICallError } from "@ai-sdk/provider";
135
- var postJsonToApi = async ({
136
- url,
137
- headers,
138
- body,
139
- failedResponseHandler,
140
- successfulResponseHandler,
141
- abortSignal
142
- }) => postToApi({
143
- url,
144
- headers: {
145
- ...headers,
146
- "Content-Type": "application/json"
147
- },
148
- body: {
149
- content: JSON.stringify(body),
150
- values: body
151
- },
152
- failedResponseHandler,
153
- successfulResponseHandler,
154
- abortSignal
155
- });
156
- var postToApi = async ({
157
- url,
158
- headers = {},
159
- body,
160
- successfulResponseHandler,
161
- failedResponseHandler,
162
- abortSignal
163
- }) => {
164
- try {
165
- const definedHeaders = Object.fromEntries(
166
- Object.entries(headers).filter(([_key, value]) => value != null)
167
- );
168
- const response = await fetch(url, {
169
- method: "POST",
170
- headers: definedHeaders,
171
- body: body.content,
172
- signal: abortSignal
173
- });
174
- if (!response.ok) {
175
- try {
176
- throw await failedResponseHandler({
177
- response,
178
- url,
179
- requestBodyValues: body.values
180
- });
181
- } catch (error) {
182
- if (error instanceof Error) {
183
- if (error.name === "AbortError" || APICallError.isAPICallError(error)) {
184
- throw error;
185
- }
186
- }
187
- throw new APICallError({
188
- message: "Failed to process error response",
189
- cause: error,
190
- statusCode: response.status,
191
- url,
192
- requestBodyValues: body.values
193
- });
194
- }
195
- }
196
- try {
197
- return await successfulResponseHandler({
198
- response,
199
- url,
200
- requestBodyValues: body.values
201
- });
202
- } catch (error) {
203
- if (error instanceof Error) {
204
- if (error.name === "AbortError" || APICallError.isAPICallError(error)) {
205
- throw error;
206
- }
207
- }
208
- throw new APICallError({
209
- message: "Failed to process successful response",
210
- cause: error,
211
- statusCode: response.status,
212
- url,
213
- requestBodyValues: body.values
214
- });
215
- }
216
- } catch (error) {
217
- if (error instanceof Error) {
218
- if (error.name === "AbortError") {
219
- throw error;
220
- }
221
- }
222
- if (error instanceof TypeError && error.message === "fetch failed") {
223
- const cause = error.cause;
224
- if (cause != null) {
225
- throw new APICallError({
226
- message: `Cannot connect to API: ${cause.message}`,
227
- cause,
228
- url,
229
- requestBodyValues: body.values,
230
- isRetryable: true
231
- // retry when network error
232
- });
233
- }
234
- }
235
- throw error;
236
- }
237
- };
238
-
239
- // spec/util/response-handler.ts
240
- import { APICallError as APICallError2, NoResponseBodyError } from "@ai-sdk/provider";
241
- import {
242
- EventSourceParserStream
243
- } from "eventsource-parser/stream";
244
- var createJsonErrorResponseHandler = ({
245
- errorSchema,
246
- errorToMessage,
247
- isRetryable
248
- }) => async ({ response, url, requestBodyValues }) => {
249
- const responseBody = await response.text();
250
- if (responseBody.trim() === "") {
251
- return new APICallError2({
252
- message: response.statusText,
253
- url,
254
- requestBodyValues,
255
- statusCode: response.status,
256
- responseBody,
257
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
258
- });
259
- }
260
- try {
261
- const parsedError = parseJSON({
262
- text: responseBody,
263
- schema: errorSchema
264
- });
265
- return new APICallError2({
266
- message: errorToMessage(parsedError),
267
- url,
268
- requestBodyValues,
269
- statusCode: response.status,
270
- responseBody,
271
- data: parsedError,
272
- isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
273
- });
274
- } catch (parseError) {
275
- return new APICallError2({
276
- message: response.statusText,
277
- url,
278
- requestBodyValues,
279
- statusCode: response.status,
280
- responseBody,
281
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
282
- });
283
- }
284
- };
285
- var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
286
- if (response.body == null) {
287
- throw new NoResponseBodyError();
288
- }
289
- return response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(
290
- new TransformStream({
291
- transform({ data }, controller) {
292
- if (data === "[DONE]") {
293
- return;
294
- }
295
- controller.enqueue(
296
- safeParseJSON({
297
- text: data,
298
- schema: chunkSchema
299
- })
300
- );
301
- }
302
- })
303
- );
304
- };
305
- var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
306
- const responseBody = await response.text();
307
- const parsedResult = safeParseJSON({
308
- text: responseBody,
309
- schema: responseSchema
310
- });
311
- if (!parsedResult.success) {
312
- throw new APICallError2({
313
- message: "Invalid JSON response",
314
- cause: parsedResult.error,
315
- statusCode: response.status,
316
- responseBody,
317
- url,
318
- requestBodyValues
319
- });
320
- }
321
- return parsedResult.value;
322
- };
323
-
324
- // spec/util/scale.ts
325
- function scale({
326
- inputMin = 0,
327
- inputMax = 1,
328
- outputMin,
329
- outputMax,
330
- value
331
- }) {
332
- if (value === void 0) {
333
- return void 0;
334
- }
335
- const inputRange = inputMax - inputMin;
336
- const outputRange = outputMax - outputMin;
337
- return (value - inputMin) * outputRange / inputRange + outputMin;
338
- }
339
-
340
- // spec/util/uint8-utils.ts
341
- function convertUint8ArrayToBase64(array) {
342
- let latin1string = "";
343
- for (let i = 0; i < array.length; i++) {
344
- latin1string += String.fromCodePoint(array[i]);
345
- }
346
- return globalThis.btoa(latin1string);
347
- }
348
-
349
- // openai/openai-chat-language-model.ts
350
- import {
351
- InvalidResponseDataError,
352
- UnsupportedFunctionalityError
353
- } from "@ai-sdk/provider";
354
- import { z as z2 } from "zod";
355
-
356
- // openai/convert-to-openai-chat-messages.ts
357
- function convertToOpenAIChatMessages(prompt) {
358
- const messages = [];
359
- for (const { role, content } of prompt) {
360
- switch (role) {
361
- case "system": {
362
- messages.push({ role: "system", content });
363
- break;
364
- }
365
- case "user": {
366
- messages.push({
367
- role: "user",
368
- content: content.map((part) => {
369
- var _a;
370
- switch (part.type) {
371
- case "text": {
372
- return { type: "text", text: part.text };
373
- }
374
- case "image": {
375
- return {
376
- type: "image_url",
377
- image_url: {
378
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
379
- }
380
- };
381
- }
382
- }
383
- })
384
- });
385
- break;
386
- }
387
- case "assistant": {
388
- let text = "";
389
- const toolCalls = [];
390
- for (const part of content) {
391
- switch (part.type) {
392
- case "text": {
393
- text += part.text;
394
- break;
395
- }
396
- case "tool-call": {
397
- toolCalls.push({
398
- id: part.toolCallId,
399
- type: "function",
400
- function: {
401
- name: part.toolName,
402
- arguments: JSON.stringify(part.args)
403
- }
404
- });
405
- break;
406
- }
407
- default: {
408
- const _exhaustiveCheck = part;
409
- throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
410
- }
411
- }
412
- }
413
- messages.push({
414
- role: "assistant",
415
- content: text,
416
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
417
- });
418
- break;
419
- }
420
- case "tool": {
421
- for (const toolResponse of content) {
422
- messages.push({
423
- role: "tool",
424
- tool_call_id: toolResponse.toolCallId,
425
- content: JSON.stringify(toolResponse.result)
426
- });
427
- }
428
- break;
429
- }
430
- default: {
431
- const _exhaustiveCheck = role;
432
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
433
- }
434
- }
435
- }
436
- return messages;
437
- }
438
-
439
- // openai/map-openai-finish-reason.ts
440
- function mapOpenAIFinishReason(finishReason) {
441
- switch (finishReason) {
442
- case "stop":
443
- return "stop";
444
- case "length":
445
- return "length";
446
- case "content_filter":
447
- return "content-filter";
448
- case "function_call":
449
- case "tool_calls":
450
- return "tool-calls";
451
- default:
452
- return "other";
453
- }
454
- }
455
-
456
- // openai/openai-error.ts
457
- import { z } from "zod";
458
- var openAIErrorDataSchema = z.object({
459
- error: z.object({
460
- message: z.string(),
461
- type: z.string(),
462
- param: z.any().nullable(),
463
- code: z.string().nullable()
464
- })
465
- });
466
- var openaiFailedResponseHandler = createJsonErrorResponseHandler({
467
- errorSchema: openAIErrorDataSchema,
468
- errorToMessage: (data) => data.error.message
469
- });
470
-
471
- // openai/openai-chat-language-model.ts
472
- var OpenAIChatLanguageModel = class {
473
- constructor(modelId, settings, config) {
474
- this.specificationVersion = "v1";
475
- this.defaultObjectGenerationMode = "tool";
476
- this.modelId = modelId;
477
- this.settings = settings;
478
- this.config = config;
479
- }
480
- get provider() {
481
- return this.config.provider;
482
- }
483
- getArgs({
484
- mode,
485
- prompt,
486
- maxTokens,
487
- temperature,
488
- topP,
489
- frequencyPenalty,
490
- presencePenalty,
491
- seed
492
- }) {
493
- var _a;
494
- const type = mode.type;
495
- const baseArgs = {
496
- // model id:
497
- model: this.modelId,
498
- // model specific settings:
499
- logit_bias: this.settings.logitBias,
500
- user: this.settings.user,
501
- // standardized settings:
502
- max_tokens: maxTokens,
503
- temperature: scale({
504
- value: temperature,
505
- outputMin: 0,
506
- outputMax: 2
507
- }),
508
- top_p: topP,
509
- frequency_penalty: scale({
510
- value: frequencyPenalty,
511
- inputMin: -1,
512
- inputMax: 1,
513
- outputMin: -2,
514
- outputMax: 2
515
- }),
516
- presence_penalty: scale({
517
- value: presencePenalty,
518
- inputMin: -1,
519
- inputMax: 1,
520
- outputMin: -2,
521
- outputMax: 2
522
- }),
523
- seed,
524
- // messages:
525
- messages: convertToOpenAIChatMessages(prompt)
526
- };
527
- switch (type) {
528
- case "regular": {
529
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
530
- return {
531
- ...baseArgs,
532
- tools: tools == null ? void 0 : tools.map((tool) => ({
533
- type: "function",
534
- function: {
535
- name: tool.name,
536
- description: tool.description,
537
- parameters: tool.parameters
538
- }
539
- }))
540
- };
541
- }
542
- case "object-json": {
543
- return {
544
- ...baseArgs,
545
- response_format: { type: "json_object" }
546
- };
547
- }
548
- case "object-tool": {
549
- return {
550
- ...baseArgs,
551
- tool_choice: { type: "function", function: { name: mode.tool.name } },
552
- tools: [
553
- {
554
- type: "function",
555
- function: {
556
- name: mode.tool.name,
557
- description: mode.tool.description,
558
- parameters: mode.tool.parameters
559
- }
560
- }
561
- ]
562
- };
563
- }
564
- case "object-grammar": {
565
- throw new UnsupportedFunctionalityError({
566
- functionality: "object-grammar mode"
567
- });
568
- }
569
- default: {
570
- const _exhaustiveCheck = type;
571
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
572
- }
573
- }
574
- }
575
- async doGenerate(options) {
576
- var _a, _b;
577
- const args = this.getArgs(options);
578
- const response = await postJsonToApi({
579
- url: `${this.config.baseUrl}/chat/completions`,
580
- headers: this.config.headers(),
581
- body: args,
582
- failedResponseHandler: openaiFailedResponseHandler,
583
- successfulResponseHandler: createJsonResponseHandler(
584
- openAIChatResponseSchema
585
- ),
586
- abortSignal: options.abortSignal
587
- });
588
- const { messages: rawPrompt, ...rawSettings } = args;
589
- const choice = response.choices[0];
590
- return {
591
- text: (_a = choice.message.content) != null ? _a : void 0,
592
- toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => ({
593
- toolCallType: "function",
594
- toolCallId: toolCall.id,
595
- toolName: toolCall.function.name,
596
- args: toolCall.function.arguments
597
- })),
598
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
599
- usage: {
600
- promptTokens: response.usage.prompt_tokens,
601
- completionTokens: response.usage.completion_tokens
602
- },
603
- rawCall: { rawPrompt, rawSettings },
604
- warnings: []
605
- };
606
- }
607
- async doStream(options) {
608
- const args = this.getArgs(options);
609
- const response = await postJsonToApi({
610
- url: `${this.config.baseUrl}/chat/completions`,
611
- headers: this.config.headers(),
612
- body: {
613
- ...args,
614
- stream: true
615
- },
616
- failedResponseHandler: openaiFailedResponseHandler,
617
- successfulResponseHandler: createEventSourceResponseHandler(
618
- openaiChatChunkSchema
619
- ),
620
- abortSignal: options.abortSignal
621
- });
622
- const { messages: rawPrompt, ...rawSettings } = args;
623
- const toolCalls = [];
624
- let finishReason = "other";
625
- let usage = {
626
- promptTokens: Number.NaN,
627
- completionTokens: Number.NaN
628
- };
629
- return {
630
- stream: response.pipeThrough(
631
- new TransformStream({
632
- transform(chunk, controller) {
633
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
634
- if (!chunk.success) {
635
- controller.enqueue({ type: "error", error: chunk.error });
636
- return;
637
- }
638
- const value = chunk.value;
639
- if (value.usage != null) {
640
- usage = {
641
- promptTokens: value.usage.prompt_tokens,
642
- completionTokens: value.usage.completion_tokens
643
- };
644
- }
645
- const choice = value.choices[0];
646
- if ((choice == null ? void 0 : choice.finish_reason) != null) {
647
- finishReason = mapOpenAIFinishReason(choice.finish_reason);
648
- }
649
- if ((choice == null ? void 0 : choice.delta) == null) {
650
- return;
651
- }
652
- const delta = choice.delta;
653
- if (delta.content != null) {
654
- controller.enqueue({
655
- type: "text-delta",
656
- textDelta: delta.content
657
- });
658
- }
659
- if (delta.tool_calls != null) {
660
- for (const toolCallDelta of delta.tool_calls) {
661
- const index = toolCallDelta.index;
662
- if (toolCalls[index] == null) {
663
- if (toolCallDelta.type !== "function") {
664
- throw new InvalidResponseDataError({
665
- data: toolCallDelta,
666
- message: `Expected 'function' type.`
667
- });
668
- }
669
- if (toolCallDelta.id == null) {
670
- throw new InvalidResponseDataError({
671
- data: toolCallDelta,
672
- message: `Expected 'id' to be a string.`
673
- });
674
- }
675
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
676
- throw new InvalidResponseDataError({
677
- data: toolCallDelta,
678
- message: `Expected 'function.name' to be a string.`
679
- });
680
- }
681
- toolCalls[index] = {
682
- id: toolCallDelta.id,
683
- type: "function",
684
- function: {
685
- name: toolCallDelta.function.name,
686
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
687
- }
688
- };
689
- continue;
690
- }
691
- const toolCall = toolCalls[index];
692
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.arguments) != null) {
693
- toolCall.function.arguments += (_e = (_d = toolCallDelta.function) == null ? void 0 : _d.arguments) != null ? _e : "";
694
- }
695
- controller.enqueue({
696
- type: "tool-call-delta",
697
- toolCallType: "function",
698
- toolCallId: toolCall.id,
699
- toolName: toolCall.function.name,
700
- argsTextDelta: (_f = toolCallDelta.function.arguments) != null ? _f : ""
701
- });
702
- if (((_g = toolCall.function) == null ? void 0 : _g.name) == null || ((_h = toolCall.function) == null ? void 0 : _h.arguments) == null || !isParseableJson(toolCall.function.arguments)) {
703
- continue;
704
- }
705
- controller.enqueue({
706
- type: "tool-call",
707
- toolCallType: "function",
708
- toolCallId: (_i = toolCall.id) != null ? _i : generateId(),
709
- toolName: toolCall.function.name,
710
- args: toolCall.function.arguments
711
- });
712
- }
713
- }
714
- },
715
- flush(controller) {
716
- controller.enqueue({ type: "finish", finishReason, usage });
717
- }
718
- })
719
- ),
720
- rawCall: { rawPrompt, rawSettings },
721
- warnings: []
722
- };
723
- }
724
- };
725
- var openAIChatResponseSchema = z2.object({
726
- choices: z2.array(
727
- z2.object({
728
- message: z2.object({
729
- role: z2.literal("assistant"),
730
- content: z2.string().nullable(),
731
- tool_calls: z2.array(
732
- z2.object({
733
- id: z2.string(),
734
- type: z2.literal("function"),
735
- function: z2.object({
736
- name: z2.string(),
737
- arguments: z2.string()
738
- })
739
- })
740
- ).optional()
741
- }),
742
- index: z2.number(),
743
- finish_reason: z2.string().optional().nullable()
744
- })
745
- ),
746
- object: z2.literal("chat.completion"),
747
- usage: z2.object({
748
- prompt_tokens: z2.number(),
749
- completion_tokens: z2.number()
750
- })
751
- });
752
- var openaiChatChunkSchema = z2.object({
753
- object: z2.literal("chat.completion.chunk"),
754
- choices: z2.array(
755
- z2.object({
756
- delta: z2.object({
757
- role: z2.enum(["assistant"]).optional(),
758
- content: z2.string().nullable().optional(),
759
- tool_calls: z2.array(
760
- z2.object({
761
- index: z2.number(),
762
- id: z2.string().optional(),
763
- type: z2.literal("function").optional(),
764
- function: z2.object({
765
- name: z2.string().optional(),
766
- arguments: z2.string().optional()
767
- })
768
- })
769
- ).optional()
770
- }),
771
- finish_reason: z2.string().nullable().optional(),
772
- index: z2.number()
773
- })
774
- ),
775
- usage: z2.object({
776
- prompt_tokens: z2.number(),
777
- completion_tokens: z2.number()
778
- }).optional().nullable()
779
- });
780
-
781
- // openai/openai-completion-language-model.ts
782
- import {
783
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
784
- } from "@ai-sdk/provider";
785
- import { z as z3 } from "zod";
786
-
787
- // openai/convert-to-openai-completion-prompt.ts
788
- import {
789
- InvalidPromptError,
790
- UnsupportedFunctionalityError as UnsupportedFunctionalityError2
791
- } from "@ai-sdk/provider";
792
- function convertToOpenAICompletionPrompt({
793
- prompt,
794
- inputFormat,
795
- user = "user",
796
- assistant = "assistant"
797
- }) {
798
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
799
- return { prompt: prompt[0].content[0].text };
800
- }
801
- let text = "";
802
- if (prompt[0].role === "system") {
803
- text += `${prompt[0].content}
804
-
805
- `;
806
- prompt = prompt.slice(1);
807
- }
808
- for (const { role, content } of prompt) {
809
- switch (role) {
810
- case "system": {
811
- throw new InvalidPromptError({
812
- message: "Unexpected system message in prompt: ${content}",
813
- prompt
814
- });
815
- }
816
- case "user": {
817
- const userMessage = content.map((part) => {
818
- switch (part.type) {
819
- case "text": {
820
- return part.text;
821
- }
822
- case "image": {
823
- throw new UnsupportedFunctionalityError2({
824
- functionality: "images"
825
- });
826
- }
827
- }
828
- }).join("");
829
- text += `${user}:
830
- ${userMessage}
831
-
832
- `;
833
- break;
834
- }
835
- case "assistant": {
836
- const assistantMessage = content.map((part) => {
837
- switch (part.type) {
838
- case "text": {
839
- return part.text;
840
- }
841
- case "tool-call": {
842
- throw new UnsupportedFunctionalityError2({
843
- functionality: "tool-call messages"
844
- });
845
- }
846
- }
847
- }).join("");
848
- text += `${assistant}:
849
- ${assistantMessage}
850
-
851
- `;
852
- break;
853
- }
854
- case "tool": {
855
- throw new UnsupportedFunctionalityError2({
856
- functionality: "tool messages"
857
- });
858
- }
859
- default: {
860
- const _exhaustiveCheck = role;
861
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
862
- }
863
- }
864
- }
865
- text += `${assistant}:
866
- `;
867
- return {
868
- prompt: text,
869
- stopSequences: [`
870
- ${user}:`]
871
- };
872
- }
873
-
874
- // openai/openai-completion-language-model.ts
875
- var OpenAICompletionLanguageModel = class {
876
- constructor(modelId, settings, config) {
877
- this.specificationVersion = "v1";
878
- this.defaultObjectGenerationMode = void 0;
879
- this.modelId = modelId;
880
- this.settings = settings;
881
- this.config = config;
882
- }
883
- get provider() {
884
- return this.config.provider;
885
- }
886
- getArgs({
887
- mode,
888
- inputFormat,
889
- prompt,
890
- maxTokens,
891
- temperature,
892
- topP,
893
- frequencyPenalty,
894
- presencePenalty,
895
- seed
896
- }) {
897
- var _a;
898
- const type = mode.type;
899
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
900
- const baseArgs = {
901
- // model id:
902
- model: this.modelId,
903
- // model specific settings:
904
- echo: this.settings.echo,
905
- logit_bias: this.settings.logitBias,
906
- suffix: this.settings.suffix,
907
- user: this.settings.user,
908
- // standardized settings:
909
- max_tokens: maxTokens,
910
- temperature: scale({
911
- value: temperature,
912
- outputMin: 0,
913
- outputMax: 2
914
- }),
915
- top_p: topP,
916
- frequency_penalty: scale({
917
- value: frequencyPenalty,
918
- inputMin: -1,
919
- inputMax: 1,
920
- outputMin: -2,
921
- outputMax: 2
922
- }),
923
- presence_penalty: scale({
924
- value: presencePenalty,
925
- inputMin: -1,
926
- inputMax: 1,
927
- outputMin: -2,
928
- outputMax: 2
929
- }),
930
- seed,
931
- // prompt:
932
- prompt: completionPrompt,
933
- // stop sequences:
934
- stop: stopSequences
935
- };
936
- switch (type) {
937
- case "regular": {
938
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
939
- throw new UnsupportedFunctionalityError3({
940
- functionality: "tools"
941
- });
942
- }
943
- return baseArgs;
944
- }
945
- case "object-json": {
946
- throw new UnsupportedFunctionalityError3({
947
- functionality: "object-json mode"
948
- });
949
- }
950
- case "object-tool": {
951
- throw new UnsupportedFunctionalityError3({
952
- functionality: "object-tool mode"
953
- });
954
- }
955
- case "object-grammar": {
956
- throw new UnsupportedFunctionalityError3({
957
- functionality: "object-grammar mode"
958
- });
959
- }
960
- default: {
961
- const _exhaustiveCheck = type;
962
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
963
- }
964
- }
965
- }
966
- async doGenerate(options) {
967
- const args = this.getArgs(options);
968
- const response = await postJsonToApi({
969
- url: `${this.config.baseUrl}/completions`,
970
- headers: this.config.headers(),
971
- body: args,
972
- failedResponseHandler: openaiFailedResponseHandler,
973
- successfulResponseHandler: createJsonResponseHandler(
974
- openAICompletionResponseSchema
975
- ),
976
- abortSignal: options.abortSignal
977
- });
978
- const { prompt: rawPrompt, ...rawSettings } = args;
979
- const choice = response.choices[0];
980
- return {
981
- text: choice.text,
982
- usage: {
983
- promptTokens: response.usage.prompt_tokens,
984
- completionTokens: response.usage.completion_tokens
985
- },
986
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
987
- rawCall: { rawPrompt, rawSettings },
988
- warnings: []
989
- };
990
- }
991
- async doStream(options) {
992
- const args = this.getArgs(options);
993
- const response = await postJsonToApi({
994
- url: `${this.config.baseUrl}/completions`,
995
- headers: this.config.headers(),
996
- body: {
997
- ...this.getArgs(options),
998
- stream: true
999
- },
1000
- failedResponseHandler: openaiFailedResponseHandler,
1001
- successfulResponseHandler: createEventSourceResponseHandler(
1002
- openaiCompletionChunkSchema
1003
- ),
1004
- abortSignal: options.abortSignal
1005
- });
1006
- const { prompt: rawPrompt, ...rawSettings } = args;
1007
- let finishReason = "other";
1008
- let usage = {
1009
- promptTokens: Number.NaN,
1010
- completionTokens: Number.NaN
1011
- };
1012
- return {
1013
- stream: response.pipeThrough(
1014
- new TransformStream({
1015
- transform(chunk, controller) {
1016
- if (!chunk.success) {
1017
- controller.enqueue({ type: "error", error: chunk.error });
1018
- return;
1019
- }
1020
- const value = chunk.value;
1021
- if (value.usage != null) {
1022
- usage = {
1023
- promptTokens: value.usage.prompt_tokens,
1024
- completionTokens: value.usage.completion_tokens
1025
- };
1026
- }
1027
- const choice = value.choices[0];
1028
- if ((choice == null ? void 0 : choice.finish_reason) != null) {
1029
- finishReason = mapOpenAIFinishReason(choice.finish_reason);
1030
- }
1031
- if ((choice == null ? void 0 : choice.text) != null) {
1032
- controller.enqueue({
1033
- type: "text-delta",
1034
- textDelta: choice.text
1035
- });
1036
- }
1037
- },
1038
- flush(controller) {
1039
- controller.enqueue({ type: "finish", finishReason, usage });
1040
- }
1041
- })
1042
- ),
1043
- rawCall: { rawPrompt, rawSettings },
1044
- warnings: []
1045
- };
1046
- }
1047
- };
1048
- var openAICompletionResponseSchema = z3.object({
1049
- choices: z3.array(
1050
- z3.object({
1051
- text: z3.string(),
1052
- finish_reason: z3.string()
1053
- })
1054
- ),
1055
- usage: z3.object({
1056
- prompt_tokens: z3.number(),
1057
- completion_tokens: z3.number()
1058
- })
1059
- });
1060
- var openaiCompletionChunkSchema = z3.object({
1061
- object: z3.literal("text_completion"),
1062
- choices: z3.array(
1063
- z3.object({
1064
- text: z3.string(),
1065
- finish_reason: z3.enum(["stop", "length", "content_filter"]).optional().nullable(),
1066
- index: z3.number()
1067
- })
1068
- ),
1069
- usage: z3.object({
1070
- prompt_tokens: z3.number(),
1071
- completion_tokens: z3.number()
1072
- }).optional().nullable()
1073
- });
1074
-
1075
- // openai/openai-facade.ts
1076
- var OpenAI = class {
1077
- constructor(options = {}) {
1078
- this.baseUrl = options.baseUrl;
1079
- this.apiKey = options.apiKey;
1080
- this.organization = options.organization;
1081
- }
1082
- get baseConfig() {
1083
- var _a;
1084
- return {
1085
- organization: this.organization,
1086
- baseUrl: (_a = this.baseUrl) != null ? _a : "https://api.openai.com/v1",
1087
- headers: () => ({
1088
- Authorization: `Bearer ${loadApiKey({
1089
- apiKey: this.apiKey,
1090
- environmentVariableName: "OPENAI_API_KEY",
1091
- description: "OpenAI"
1092
- })}`,
1093
- "OpenAI-Organization": this.organization
1094
- })
1095
- };
1096
- }
1097
- chat(modelId, settings = {}) {
1098
- return new OpenAIChatLanguageModel(modelId, settings, {
1099
- provider: "openai.chat",
1100
- ...this.baseConfig
1101
- });
1102
- }
1103
- completion(modelId, settings = {}) {
1104
- return new OpenAICompletionLanguageModel(modelId, settings, {
1105
- provider: "openai.completion",
1106
- ...this.baseConfig
1107
- });
1108
- }
1109
- };
1110
- var openai = new OpenAI();
1111
- export {
1112
- OpenAI,
1113
- openai
1114
- };
1115
- //# sourceMappingURL=index.mjs.map