ai 3.1.0-canary.3 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +1 -1
  2. package/dist/index.d.mts +982 -24
  3. package/dist/index.d.ts +982 -24
  4. package/dist/index.js +1748 -175
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1723 -174
  7. package/dist/index.mjs.map +1 -1
  8. package/package.json +14 -31
  9. package/prompts/dist/index.d.mts +13 -1
  10. package/prompts/dist/index.d.ts +13 -1
  11. package/prompts/dist/index.js +13 -0
  12. package/prompts/dist/index.js.map +1 -1
  13. package/prompts/dist/index.mjs +12 -0
  14. package/prompts/dist/index.mjs.map +1 -1
  15. package/react/dist/index.d.mts +27 -6
  16. package/react/dist/index.d.ts +31 -8
  17. package/react/dist/index.js +155 -141
  18. package/react/dist/index.js.map +1 -1
  19. package/react/dist/index.mjs +154 -141
  20. package/react/dist/index.mjs.map +1 -1
  21. package/react/dist/index.server.d.mts +4 -2
  22. package/react/dist/index.server.d.ts +4 -2
  23. package/react/dist/index.server.js.map +1 -1
  24. package/react/dist/index.server.mjs.map +1 -1
  25. package/rsc/dist/index.d.ts +385 -20
  26. package/rsc/dist/rsc-client.d.mts +1 -1
  27. package/rsc/dist/rsc-client.mjs +2 -0
  28. package/rsc/dist/rsc-client.mjs.map +1 -1
  29. package/rsc/dist/rsc-server.d.mts +367 -20
  30. package/rsc/dist/rsc-server.mjs +676 -35
  31. package/rsc/dist/rsc-server.mjs.map +1 -1
  32. package/rsc/dist/rsc-shared.d.mts +24 -9
  33. package/rsc/dist/rsc-shared.mjs +98 -4
  34. package/rsc/dist/rsc-shared.mjs.map +1 -1
  35. package/solid/dist/index.d.mts +7 -3
  36. package/solid/dist/index.d.ts +7 -3
  37. package/solid/dist/index.js +106 -107
  38. package/solid/dist/index.js.map +1 -1
  39. package/solid/dist/index.mjs +106 -107
  40. package/solid/dist/index.mjs.map +1 -1
  41. package/svelte/dist/index.d.mts +7 -3
  42. package/svelte/dist/index.d.ts +7 -3
  43. package/svelte/dist/index.js +109 -109
  44. package/svelte/dist/index.js.map +1 -1
  45. package/svelte/dist/index.mjs +109 -109
  46. package/svelte/dist/index.mjs.map +1 -1
  47. package/vue/dist/index.d.mts +7 -3
  48. package/vue/dist/index.d.ts +7 -3
  49. package/vue/dist/index.js +106 -107
  50. package/vue/dist/index.js.map +1 -1
  51. package/vue/dist/index.mjs +106 -107
  52. package/vue/dist/index.mjs.map +1 -1
  53. package/ai-model-specification/dist/index.d.mts +0 -606
  54. package/ai-model-specification/dist/index.d.ts +0 -606
  55. package/ai-model-specification/dist/index.js +0 -617
  56. package/ai-model-specification/dist/index.js.map +0 -1
  57. package/ai-model-specification/dist/index.mjs +0 -560
  58. package/ai-model-specification/dist/index.mjs.map +0 -1
  59. package/core/dist/index.d.mts +0 -590
  60. package/core/dist/index.d.ts +0 -590
  61. package/core/dist/index.js +0 -1528
  62. package/core/dist/index.js.map +0 -1
  63. package/core/dist/index.mjs +0 -1481
  64. package/core/dist/index.mjs.map +0 -1
  65. package/provider/dist/index.d.mts +0 -429
  66. package/provider/dist/index.d.ts +0 -429
  67. package/provider/dist/index.js +0 -1194
  68. package/provider/dist/index.js.map +0 -1
  69. package/provider/dist/index.mjs +0 -1158
  70. package/provider/dist/index.mjs.map +0 -1
@@ -1,1194 +0,0 @@
1
- "use strict";
2
- var __create = Object.create;
3
- var __defProp = Object.defineProperty;
4
- var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
- var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __getProtoOf = Object.getPrototypeOf;
7
- var __hasOwnProp = Object.prototype.hasOwnProperty;
8
- var __export = (target, all) => {
9
- for (var name in all)
10
- __defProp(target, name, { get: all[name], enumerable: true });
11
- };
12
- var __copyProps = (to, from, except, desc) => {
13
- if (from && typeof from === "object" || typeof from === "function") {
14
- for (let key of __getOwnPropNames(from))
15
- if (!__hasOwnProp.call(to, key) && key !== except)
16
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
- }
18
- return to;
19
- };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
-
30
- // provider/index.ts
31
- var provider_exports = {};
32
- __export(provider_exports, {
33
- OpenAI: () => OpenAI,
34
- openai: () => openai
35
- });
36
- module.exports = __toCommonJS(provider_exports);
37
-
38
- // ai-model-specification/errors/api-call-error.ts
39
- var APICallError = class extends Error {
40
- constructor({
41
- message,
42
- url,
43
- requestBodyValues,
44
- statusCode,
45
- responseBody,
46
- cause,
47
- isRetryable = statusCode != null && (statusCode === 408 || // request timeout
48
- statusCode === 409 || // conflict
49
- statusCode === 429 || // too many requests
50
- statusCode >= 500),
51
- // server error
52
- data
53
- }) {
54
- super(message);
55
- this.name = "ApiCallError";
56
- this.url = url;
57
- this.requestBodyValues = requestBodyValues;
58
- this.statusCode = statusCode;
59
- this.responseBody = responseBody;
60
- this.cause = cause;
61
- this.isRetryable = isRetryable;
62
- this.data = data;
63
- }
64
- toJSON() {
65
- return {
66
- name: this.name,
67
- message: this.message,
68
- url: this.url,
69
- requestBodyValues: this.requestBodyValues,
70
- statusCode: this.statusCode,
71
- responseBody: this.responseBody,
72
- cause: this.cause,
73
- isRetryable: this.isRetryable,
74
- data: this.data
75
- };
76
- }
77
- };
78
-
79
- // ai-model-specification/util/get-error-message.ts
80
- function getErrorMessage(error) {
81
- if (error == null) {
82
- return "unknown error";
83
- }
84
- if (typeof error === "string") {
85
- return error;
86
- }
87
- if (error instanceof Error) {
88
- return error.message;
89
- }
90
- return JSON.stringify(error);
91
- }
92
-
93
- // ai-model-specification/errors/json-parse-error.ts
94
- var JSONParseError = class extends Error {
95
- constructor({ text, cause }) {
96
- super(
97
- `JSON parsing failed: Text: ${text}.
98
- Error message: ${getErrorMessage(cause)}`
99
- );
100
- this.name = "JSONParseError";
101
- this.cause = cause;
102
- this.text = text;
103
- }
104
- toJSON() {
105
- return {
106
- name: this.name,
107
- message: this.message,
108
- cause: this.cause,
109
- stack: this.stack,
110
- valueText: this.text
111
- };
112
- }
113
- };
114
-
115
- // ai-model-specification/errors/load-api-key-error.ts
116
- var LoadAPIKeyError = class extends Error {
117
- constructor({ message }) {
118
- super(message);
119
- this.name = "LoadAPIKeyError";
120
- }
121
- toJSON() {
122
- return {
123
- name: this.name,
124
- message: this.message
125
- };
126
- }
127
- };
128
-
129
- // ai-model-specification/errors/type-validation-error.ts
130
- var TypeValidationError = class extends Error {
131
- constructor({ value, cause }) {
132
- super(
133
- `Type validation failed: Value: ${JSON.stringify(value)}.
134
- Error message: ${getErrorMessage(cause)}`
135
- );
136
- this.name = "TypeValidationError";
137
- this.cause = cause;
138
- this.value = value;
139
- }
140
- toJSON() {
141
- return {
142
- name: this.name,
143
- message: this.message,
144
- cause: this.cause,
145
- stack: this.stack,
146
- value: this.value
147
- };
148
- }
149
- };
150
-
151
- // ai-model-specification/errors/unsupported-functionality-error.ts
152
- var UnsupportedFunctionalityError = class extends Error {
153
- constructor({
154
- provider,
155
- functionality
156
- }) {
157
- super(
158
- `Functionality not supported by the provider. Provider: ${provider}.
159
- Functionality: ${functionality}`
160
- );
161
- this.name = "UnsupportedFunctionalityError";
162
- this.provider = provider;
163
- this.functionality = functionality;
164
- }
165
- toJSON() {
166
- return {
167
- name: this.name,
168
- message: this.message,
169
- stack: this.stack,
170
- provider: this.provider,
171
- functionality: this.functionality
172
- };
173
- }
174
- };
175
-
176
- // ai-model-specification/util/load-api-key.ts
177
- function loadApiKey({
178
- apiKey,
179
- environmentVariableName,
180
- apiKeyParameterName = "apiKey",
181
- description
182
- }) {
183
- if (apiKey != null) {
184
- return apiKey;
185
- }
186
- if (typeof process === "undefined") {
187
- throw new LoadAPIKeyError({
188
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
189
- });
190
- }
191
- apiKey = process.env[environmentVariableName];
192
- if (apiKey == null) {
193
- throw new LoadAPIKeyError({
194
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
195
- });
196
- }
197
- return apiKey;
198
- }
199
-
200
- // ai-model-specification/util/parse-json.ts
201
- var import_secure_json_parse = __toESM(require("secure-json-parse"));
202
-
203
- // ai-model-specification/util/validate-types.ts
204
- function validateTypes({
205
- value,
206
- schema
207
- }) {
208
- try {
209
- return schema.parse(value);
210
- } catch (error) {
211
- throw new TypeValidationError({ value, cause: error });
212
- }
213
- }
214
- function safeValidateTypes({
215
- value,
216
- schema
217
- }) {
218
- try {
219
- const validationResult = schema.safeParse(value);
220
- if (validationResult.success) {
221
- return {
222
- success: true,
223
- value: validationResult.data
224
- };
225
- }
226
- return {
227
- success: false,
228
- error: new TypeValidationError({
229
- value,
230
- cause: validationResult.error
231
- })
232
- };
233
- } catch (error) {
234
- return {
235
- success: false,
236
- error: error instanceof TypeValidationError ? error : new TypeValidationError({ value, cause: error })
237
- };
238
- }
239
- }
240
-
241
- // ai-model-specification/util/parse-json.ts
242
- function parseJSON({
243
- text,
244
- schema
245
- }) {
246
- try {
247
- const value = import_secure_json_parse.default.parse(text);
248
- if (schema == null) {
249
- return value;
250
- }
251
- return validateTypes({ value, schema });
252
- } catch (error) {
253
- if (error instanceof JSONParseError || error instanceof TypeValidationError) {
254
- throw error;
255
- }
256
- throw new JSONParseError({ text, cause: error });
257
- }
258
- }
259
- function safeParseJSON({
260
- text,
261
- schema
262
- }) {
263
- try {
264
- const value = import_secure_json_parse.default.parse(text);
265
- if (schema == null) {
266
- return {
267
- success: true,
268
- value
269
- };
270
- }
271
- return safeValidateTypes({ value, schema });
272
- } catch (error) {
273
- return {
274
- success: false,
275
- error: error instanceof JSONParseError ? error : new JSONParseError({ text, cause: error })
276
- };
277
- }
278
- }
279
- function isParseableJson(input) {
280
- try {
281
- import_secure_json_parse.default.parse(input);
282
- return true;
283
- } catch (e) {
284
- return false;
285
- }
286
- }
287
-
288
- // ai-model-specification/util/post-to-api.ts
289
- var postJsonToApi = async ({
290
- url,
291
- headers,
292
- body,
293
- failedResponseHandler,
294
- successfulResponseHandler,
295
- abortSignal
296
- }) => postToApi({
297
- url,
298
- headers: {
299
- ...headers,
300
- "Content-Type": "application/json"
301
- },
302
- body: {
303
- content: JSON.stringify(body),
304
- values: body
305
- },
306
- failedResponseHandler,
307
- successfulResponseHandler,
308
- abortSignal
309
- });
310
- var postToApi = async ({
311
- url,
312
- headers = {},
313
- body,
314
- successfulResponseHandler,
315
- failedResponseHandler,
316
- abortSignal
317
- }) => {
318
- try {
319
- const definedHeaders = Object.fromEntries(
320
- Object.entries(headers).filter(([_key, value]) => value != null)
321
- );
322
- const response = await fetch(url, {
323
- method: "POST",
324
- headers: definedHeaders,
325
- body: body.content,
326
- signal: abortSignal
327
- });
328
- if (!response.ok) {
329
- try {
330
- throw await failedResponseHandler({
331
- response,
332
- url,
333
- requestBodyValues: body.values
334
- });
335
- } catch (error) {
336
- if (error instanceof Error) {
337
- if (error.name === "AbortError" || error instanceof APICallError) {
338
- throw error;
339
- }
340
- }
341
- throw new APICallError({
342
- message: "Failed to process error response",
343
- cause: error,
344
- statusCode: response.status,
345
- url,
346
- requestBodyValues: body.values
347
- });
348
- }
349
- }
350
- try {
351
- return await successfulResponseHandler({
352
- response,
353
- url,
354
- requestBodyValues: body.values
355
- });
356
- } catch (error) {
357
- if (error instanceof Error) {
358
- if (error.name === "AbortError" || error instanceof APICallError) {
359
- throw error;
360
- }
361
- }
362
- throw new APICallError({
363
- message: "Failed to process successful response",
364
- cause: error,
365
- statusCode: response.status,
366
- url,
367
- requestBodyValues: body.values
368
- });
369
- }
370
- } catch (error) {
371
- if (error instanceof Error) {
372
- if (error.name === "AbortError") {
373
- throw error;
374
- }
375
- }
376
- if (error instanceof TypeError && error.message === "fetch failed") {
377
- const cause = error.cause;
378
- if (cause != null) {
379
- throw new APICallError({
380
- message: `Cannot connect to API: ${cause.message}`,
381
- cause,
382
- url,
383
- requestBodyValues: body.values,
384
- isRetryable: true
385
- // retry when network error
386
- });
387
- }
388
- }
389
- throw error;
390
- }
391
- };
392
-
393
- // ai-model-specification/util/response-handler.ts
394
- var import_stream = require("eventsource-parser/stream");
395
- var createJsonErrorResponseHandler = ({
396
- errorSchema,
397
- errorToMessage,
398
- isRetryable
399
- }) => async ({ response, url, requestBodyValues }) => {
400
- const responseBody = await response.text();
401
- if (responseBody.trim() === "") {
402
- return new APICallError({
403
- message: response.statusText,
404
- url,
405
- requestBodyValues,
406
- statusCode: response.status,
407
- responseBody,
408
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
409
- });
410
- }
411
- try {
412
- const parsedError = parseJSON({
413
- text: responseBody,
414
- schema: errorSchema
415
- });
416
- return new APICallError({
417
- message: errorToMessage(parsedError),
418
- url,
419
- requestBodyValues,
420
- statusCode: response.status,
421
- responseBody,
422
- data: parsedError,
423
- isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
424
- });
425
- } catch (parseError) {
426
- return new APICallError({
427
- message: response.statusText,
428
- url,
429
- requestBodyValues,
430
- statusCode: response.status,
431
- responseBody,
432
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
433
- });
434
- }
435
- };
436
- var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
437
- if (response.body == null) {
438
- throw new Error("No response body");
439
- }
440
- return response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new import_stream.EventSourceParserStream()).pipeThrough(
441
- new TransformStream({
442
- transform({ data }, controller) {
443
- if (data === "[DONE]") {
444
- return;
445
- }
446
- const parseResult = safeParseJSON({
447
- text: data,
448
- schema: chunkSchema
449
- });
450
- controller.enqueue(
451
- parseResult.success ? { type: "value", value: parseResult.value } : { type: "error", error: parseResult.error }
452
- );
453
- }
454
- })
455
- );
456
- };
457
- var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
458
- const responseBody = await response.text();
459
- const parsedResult = safeParseJSON({
460
- text: responseBody,
461
- schema: responseSchema
462
- });
463
- if (!parsedResult.success) {
464
- throw new APICallError({
465
- message: "Invalid JSON response",
466
- cause: parsedResult.error,
467
- statusCode: response.status,
468
- responseBody,
469
- url,
470
- requestBodyValues
471
- });
472
- }
473
- return parsedResult.value;
474
- };
475
-
476
- // ai-model-specification/util/scale.ts
477
- function scale({
478
- inputMin = 0,
479
- inputMax = 1,
480
- outputMin,
481
- outputMax,
482
- value
483
- }) {
484
- if (value === void 0) {
485
- return void 0;
486
- }
487
- const inputRange = inputMax - inputMin;
488
- const outputRange = outputMax - outputMin;
489
- return (value - inputMin) * outputRange / inputRange + outputMin;
490
- }
491
-
492
- // ai-model-specification/util/uint8-utils.ts
493
- function convertUint8ArrayToBase64(array) {
494
- let latin1string = "";
495
- for (const value of array) {
496
- latin1string += String.fromCodePoint(value);
497
- }
498
- return globalThis.btoa(latin1string);
499
- }
500
-
501
- // provider/openai/openai-chat-language-model.ts
502
- var import_nanoid = require("nanoid");
503
- var import_zod2 = require("zod");
504
-
505
- // provider/openai/convert-to-openai-chat-messages.ts
506
- function convertToOpenAIChatMessages(prompt) {
507
- const messages = [];
508
- for (const { role, content } of prompt) {
509
- switch (role) {
510
- case "system": {
511
- messages.push({ role: "system", content });
512
- break;
513
- }
514
- case "user": {
515
- messages.push({
516
- role: "user",
517
- content: content.map((part) => {
518
- var _a;
519
- switch (part.type) {
520
- case "text": {
521
- return { type: "text", text: part.text };
522
- }
523
- case "image": {
524
- return {
525
- type: "image_url",
526
- image_url: {
527
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
528
- }
529
- };
530
- }
531
- }
532
- })
533
- });
534
- break;
535
- }
536
- case "assistant": {
537
- let text = "";
538
- const toolCalls = [];
539
- for (const part of content) {
540
- switch (part.type) {
541
- case "text": {
542
- text += part.text;
543
- break;
544
- }
545
- case "tool-call": {
546
- toolCalls.push({
547
- id: part.toolCallId,
548
- type: "function",
549
- function: {
550
- name: part.toolName,
551
- arguments: JSON.stringify(part.args)
552
- }
553
- });
554
- break;
555
- }
556
- default: {
557
- const _exhaustiveCheck = part;
558
- throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
559
- }
560
- }
561
- }
562
- messages.push({
563
- role: "assistant",
564
- content: text,
565
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
566
- });
567
- break;
568
- }
569
- case "tool": {
570
- for (const toolResponse of content) {
571
- messages.push({
572
- role: "tool",
573
- tool_call_id: toolResponse.toolCallId,
574
- content: JSON.stringify(toolResponse.result)
575
- });
576
- }
577
- break;
578
- }
579
- default: {
580
- const _exhaustiveCheck = role;
581
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
582
- }
583
- }
584
- }
585
- return messages;
586
- }
587
-
588
- // provider/openai/openai-error.ts
589
- var import_zod = require("zod");
590
- var openAIErrorDataSchema = import_zod.z.object({
591
- error: import_zod.z.object({
592
- message: import_zod.z.string(),
593
- type: import_zod.z.string(),
594
- param: import_zod.z.any().nullable(),
595
- code: import_zod.z.string().nullable()
596
- })
597
- });
598
- var openaiFailedResponseHandler = createJsonErrorResponseHandler({
599
- errorSchema: openAIErrorDataSchema,
600
- errorToMessage: (data) => data.error.message
601
- });
602
-
603
- // provider/openai/map-openai-finish-reason.ts
604
- function mapOpenAIFinishReason(finishReason) {
605
- switch (finishReason) {
606
- case "stop":
607
- return "stop";
608
- case "length":
609
- return "length";
610
- case "content-filter":
611
- return "content-filter";
612
- case "function_call":
613
- case "tool-calls":
614
- return "tool-calls";
615
- default:
616
- return "other";
617
- }
618
- }
619
-
620
- // provider/openai/openai-chat-language-model.ts
621
- var OpenAIChatLanguageModel = class {
622
- constructor(modelId, settings, config) {
623
- this.specificationVersion = "v1";
624
- this.defaultObjectGenerationMode = "tool";
625
- this.modelId = modelId;
626
- this.settings = settings;
627
- this.config = config;
628
- }
629
- get provider() {
630
- return this.config.provider;
631
- }
632
- getArgs({
633
- mode,
634
- prompt,
635
- maxTokens,
636
- temperature,
637
- topP,
638
- frequencyPenalty,
639
- presencePenalty,
640
- seed
641
- }) {
642
- var _a;
643
- const type = mode.type;
644
- const baseArgs = {
645
- // model id:
646
- model: this.modelId,
647
- // model specific settings:
648
- logit_bias: this.settings.logitBias,
649
- user: this.settings.user,
650
- // standardized settings:
651
- max_tokens: maxTokens,
652
- temperature: scale({
653
- value: temperature,
654
- outputMin: 0,
655
- outputMax: 2
656
- }),
657
- top_p: topP,
658
- frequency_penalty: scale({
659
- value: frequencyPenalty,
660
- inputMin: -1,
661
- inputMax: 1,
662
- outputMin: -2,
663
- outputMax: 2
664
- }),
665
- presence_penalty: scale({
666
- value: presencePenalty,
667
- inputMin: -1,
668
- inputMax: 1,
669
- outputMin: -2,
670
- outputMax: 2
671
- }),
672
- seed,
673
- // messages:
674
- messages: convertToOpenAIChatMessages(prompt)
675
- };
676
- switch (type) {
677
- case "regular": {
678
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
679
- return {
680
- ...baseArgs,
681
- tools: tools == null ? void 0 : tools.map((tool) => ({
682
- type: "function",
683
- function: {
684
- name: tool.name,
685
- description: tool.description,
686
- parameters: tool.parameters
687
- }
688
- }))
689
- };
690
- }
691
- case "object-json": {
692
- return {
693
- ...baseArgs,
694
- response_format: { type: "json_object" }
695
- };
696
- }
697
- case "object-tool": {
698
- return {
699
- ...baseArgs,
700
- tool_choice: { type: "function", function: { name: mode.tool.name } },
701
- tools: [{ type: "function", function: mode.tool }]
702
- };
703
- }
704
- case "object-grammar": {
705
- throw new UnsupportedFunctionalityError({
706
- functionality: "object-grammar mode",
707
- provider: this.provider
708
- });
709
- }
710
- default: {
711
- const _exhaustiveCheck = type;
712
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
713
- }
714
- }
715
- }
716
- async doGenerate(options) {
717
- var _a, _b;
718
- const args = this.getArgs(options);
719
- const response = await postJsonToApi({
720
- url: `${this.config.baseUrl}/chat/completions`,
721
- headers: this.config.headers(),
722
- body: args,
723
- failedResponseHandler: openaiFailedResponseHandler,
724
- successfulResponseHandler: createJsonResponseHandler(
725
- openAIChatResponseSchema
726
- ),
727
- abortSignal: options.abortSignal
728
- });
729
- const { messages: rawPrompt, ...rawSettings } = args;
730
- const choice = response.choices[0];
731
- return {
732
- text: (_a = choice.message.content) != null ? _a : void 0,
733
- toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => ({
734
- toolCallType: "function",
735
- toolCallId: toolCall.id,
736
- toolName: toolCall.function.name,
737
- args: toolCall.function.arguments
738
- })),
739
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
740
- usage: {
741
- promptTokens: response.usage.prompt_tokens,
742
- completionTokens: response.usage.completion_tokens
743
- },
744
- rawCall: { rawPrompt, rawSettings },
745
- warnings: []
746
- };
747
- }
748
- async doStream(options) {
749
- const args = this.getArgs(options);
750
- const response = await postJsonToApi({
751
- url: `${this.config.baseUrl}/chat/completions`,
752
- headers: this.config.headers(),
753
- body: {
754
- ...args,
755
- stream: true
756
- },
757
- failedResponseHandler: openaiFailedResponseHandler,
758
- successfulResponseHandler: createEventSourceResponseHandler(
759
- openaiChatChunkSchema
760
- ),
761
- abortSignal: options.abortSignal
762
- });
763
- const { messages: rawPrompt, ...rawSettings } = args;
764
- const toolCalls = [];
765
- return {
766
- stream: response.pipeThrough(
767
- new TransformStream({
768
- transform(chunk, controller) {
769
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
770
- if (chunk.type === "error") {
771
- controller.enqueue(chunk);
772
- return;
773
- }
774
- const value = chunk.value;
775
- if (((_b = (_a = value.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null) {
776
- return;
777
- }
778
- const delta = value.choices[0].delta;
779
- if (delta.content != null) {
780
- controller.enqueue({
781
- type: "text-delta",
782
- textDelta: delta.content
783
- });
784
- }
785
- if (delta.tool_calls != null) {
786
- for (const toolCallDelta of delta.tool_calls) {
787
- const index = toolCallDelta.index;
788
- if (toolCalls[index] == null) {
789
- toolCalls[index] = toolCallDelta;
790
- continue;
791
- }
792
- const toolCall = toolCalls[index];
793
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.arguments) != null) {
794
- toolCall.function.arguments += (_e = (_d = toolCallDelta.function) == null ? void 0 : _d.arguments) != null ? _e : "";
795
- }
796
- controller.enqueue({
797
- type: "tool-call-delta",
798
- toolCallId: (_f = toolCall.id) != null ? _f : "",
799
- // TODO empty?
800
- toolName: (_h = (_g = toolCall.function) == null ? void 0 : _g.name) != null ? _h : "",
801
- // TODO empty?
802
- argsTextDelta: (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : ""
803
- // TODO empty?
804
- });
805
- if (((_k = toolCall.function) == null ? void 0 : _k.name) == null || ((_l = toolCall.function) == null ? void 0 : _l.arguments) == null || !isParseableJson(toolCall.function.arguments)) {
806
- continue;
807
- }
808
- controller.enqueue({
809
- type: "tool-call",
810
- toolCallType: "function",
811
- toolCallId: (_m = toolCall.id) != null ? _m : (0, import_nanoid.nanoid)(),
812
- toolName: toolCall.function.name,
813
- args: toolCall.function.arguments
814
- });
815
- }
816
- }
817
- }
818
- })
819
- ),
820
- rawCall: { rawPrompt, rawSettings },
821
- warnings: []
822
- };
823
- }
824
- };
825
- var openAIChatResponseSchema = import_zod2.z.object({
826
- choices: import_zod2.z.array(
827
- import_zod2.z.object({
828
- message: import_zod2.z.object({
829
- role: import_zod2.z.literal("assistant"),
830
- content: import_zod2.z.string().nullable(),
831
- tool_calls: import_zod2.z.array(
832
- import_zod2.z.object({
833
- id: import_zod2.z.string(),
834
- type: import_zod2.z.literal("function"),
835
- function: import_zod2.z.object({
836
- name: import_zod2.z.string(),
837
- arguments: import_zod2.z.string()
838
- })
839
- })
840
- ).optional()
841
- }),
842
- index: import_zod2.z.number(),
843
- finish_reason: import_zod2.z.string().optional().nullable()
844
- })
845
- ),
846
- object: import_zod2.z.literal("chat.completion"),
847
- usage: import_zod2.z.object({
848
- prompt_tokens: import_zod2.z.number(),
849
- completion_tokens: import_zod2.z.number()
850
- })
851
- });
852
- var openaiChatChunkSchema = import_zod2.z.object({
853
- object: import_zod2.z.literal("chat.completion.chunk"),
854
- choices: import_zod2.z.array(
855
- import_zod2.z.object({
856
- delta: import_zod2.z.object({
857
- role: import_zod2.z.enum(["assistant"]).optional(),
858
- content: import_zod2.z.string().nullable().optional(),
859
- tool_calls: import_zod2.z.array(
860
- import_zod2.z.object({
861
- index: import_zod2.z.number(),
862
- id: import_zod2.z.string().optional(),
863
- type: import_zod2.z.literal("function").optional(),
864
- function: import_zod2.z.object({
865
- name: import_zod2.z.string().optional(),
866
- arguments: import_zod2.z.string().optional()
867
- })
868
- })
869
- ).optional()
870
- }),
871
- finish_reason: import_zod2.z.string().nullable().optional(),
872
- index: import_zod2.z.number()
873
- })
874
- )
875
- });
876
-
877
- // provider/openai/openai-completion-language-model.ts
878
- var import_zod3 = require("zod");
879
-
880
- // provider/openai/convert-to-openai-completion-prompt.ts
881
- function convertToOpenAICompletionPrompt({
882
- prompt,
883
- inputFormat,
884
- provider,
885
- user = "user",
886
- assistant = "assistant"
887
- }) {
888
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
889
- return { prompt: prompt[0].content[0].text };
890
- }
891
- let text = "";
892
- if (prompt[0].role === "system") {
893
- text += `${prompt[0].content}
894
-
895
- `;
896
- prompt = prompt.slice(1);
897
- }
898
- for (const { role, content } of prompt) {
899
- switch (role) {
900
- case "system": {
901
- throw new Error(`Unexpected system message in prompt: ${content}`);
902
- break;
903
- }
904
- case "user": {
905
- const userMessage = content.map((part) => {
906
- switch (part.type) {
907
- case "text": {
908
- return part.text;
909
- }
910
- case "image": {
911
- throw new UnsupportedFunctionalityError({
912
- provider,
913
- functionality: "images"
914
- });
915
- }
916
- }
917
- }).join("");
918
- text += `${user}:
919
- ${userMessage}
920
-
921
- `;
922
- break;
923
- }
924
- case "assistant": {
925
- const assistantMessage = content.map((part) => {
926
- switch (part.type) {
927
- case "text": {
928
- return part.text;
929
- }
930
- case "tool-call": {
931
- throw new UnsupportedFunctionalityError({
932
- provider,
933
- functionality: "tool-call messages"
934
- });
935
- }
936
- }
937
- }).join("");
938
- text += `${assistant}:
939
- ${assistantMessage}
940
-
941
- `;
942
- break;
943
- }
944
- case "tool": {
945
- throw new UnsupportedFunctionalityError({
946
- provider,
947
- functionality: "tool messages"
948
- });
949
- }
950
- default: {
951
- const _exhaustiveCheck = role;
952
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
953
- }
954
- }
955
- }
956
- text += `${assistant}:
957
- `;
958
- return {
959
- prompt: text,
960
- stopSequences: [`
961
- ${user}:`]
962
- };
963
- }
964
-
965
- // provider/openai/openai-completion-language-model.ts
966
- var OpenAICompletionLanguageModel = class {
967
- constructor(modelId, settings, config) {
968
- this.specificationVersion = "v1";
969
- this.defaultObjectGenerationMode = void 0;
970
- this.modelId = modelId;
971
- this.settings = settings;
972
- this.config = config;
973
- }
974
- get provider() {
975
- return this.config.provider;
976
- }
977
- getArgs({
978
- mode,
979
- inputFormat,
980
- prompt,
981
- maxTokens,
982
- temperature,
983
- topP,
984
- frequencyPenalty,
985
- presencePenalty,
986
- seed
987
- }) {
988
- var _a;
989
- const type = mode.type;
990
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({
991
- prompt,
992
- inputFormat,
993
- provider: this.provider
994
- });
995
- const baseArgs = {
996
- // model id:
997
- model: this.modelId,
998
- // model specific settings:
999
- echo: this.settings.echo,
1000
- logit_bias: this.settings.logitBias,
1001
- suffix: this.settings.suffix,
1002
- user: this.settings.user,
1003
- // standardized settings:
1004
- max_tokens: maxTokens,
1005
- temperature: scale({
1006
- value: temperature,
1007
- outputMin: 0,
1008
- outputMax: 2
1009
- }),
1010
- top_p: topP,
1011
- frequency_penalty: scale({
1012
- value: frequencyPenalty,
1013
- inputMin: -1,
1014
- inputMax: 1,
1015
- outputMin: -2,
1016
- outputMax: 2
1017
- }),
1018
- presence_penalty: scale({
1019
- value: presencePenalty,
1020
- inputMin: -1,
1021
- inputMax: 1,
1022
- outputMin: -2,
1023
- outputMax: 2
1024
- }),
1025
- seed,
1026
- // prompt:
1027
- prompt: completionPrompt,
1028
- // stop sequences:
1029
- stop: stopSequences
1030
- };
1031
- switch (type) {
1032
- case "regular": {
1033
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1034
- throw new UnsupportedFunctionalityError({
1035
- functionality: "tools",
1036
- provider: this.provider
1037
- });
1038
- }
1039
- return baseArgs;
1040
- }
1041
- case "object-json": {
1042
- throw new UnsupportedFunctionalityError({
1043
- functionality: "object-json mode",
1044
- provider: this.provider
1045
- });
1046
- }
1047
- case "object-tool": {
1048
- throw new UnsupportedFunctionalityError({
1049
- functionality: "object-tool mode",
1050
- provider: this.provider
1051
- });
1052
- }
1053
- case "object-grammar": {
1054
- throw new UnsupportedFunctionalityError({
1055
- functionality: "object-grammar mode",
1056
- provider: this.provider
1057
- });
1058
- }
1059
- default: {
1060
- const _exhaustiveCheck = type;
1061
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1062
- }
1063
- }
1064
- }
1065
- async doGenerate(options) {
1066
- const args = this.getArgs(options);
1067
- const response = await postJsonToApi({
1068
- url: `${this.config.baseUrl}/completions`,
1069
- headers: this.config.headers(),
1070
- body: args,
1071
- failedResponseHandler: openaiFailedResponseHandler,
1072
- successfulResponseHandler: createJsonResponseHandler(
1073
- openAICompletionResponseSchema
1074
- ),
1075
- abortSignal: options.abortSignal
1076
- });
1077
- const { prompt: rawPrompt, ...rawSettings } = args;
1078
- const choice = response.choices[0];
1079
- return {
1080
- text: choice.text,
1081
- usage: {
1082
- promptTokens: response.usage.prompt_tokens,
1083
- completionTokens: response.usage.completion_tokens
1084
- },
1085
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
1086
- rawCall: { rawPrompt, rawSettings },
1087
- warnings: []
1088
- };
1089
- }
1090
- async doStream(options) {
1091
- const args = this.getArgs(options);
1092
- const response = await postJsonToApi({
1093
- url: `${this.config.baseUrl}/completions`,
1094
- headers: this.config.headers(),
1095
- body: {
1096
- ...this.getArgs(options),
1097
- stream: true
1098
- },
1099
- failedResponseHandler: openaiFailedResponseHandler,
1100
- successfulResponseHandler: createEventSourceResponseHandler(
1101
- openaiCompletionChunkSchema
1102
- ),
1103
- abortSignal: options.abortSignal
1104
- });
1105
- const { prompt: rawPrompt, ...rawSettings } = args;
1106
- return {
1107
- stream: response.pipeThrough(
1108
- new TransformStream({
1109
- transform(chunk, controller) {
1110
- var _a, _b;
1111
- if (chunk.type === "error") {
1112
- controller.enqueue(chunk);
1113
- return;
1114
- }
1115
- const value = chunk.value;
1116
- if (((_b = (_a = value.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.text) != null) {
1117
- controller.enqueue({
1118
- type: "text-delta",
1119
- textDelta: value.choices[0].text
1120
- });
1121
- }
1122
- }
1123
- })
1124
- ),
1125
- rawCall: { rawPrompt, rawSettings },
1126
- warnings: []
1127
- };
1128
- }
1129
- };
1130
- var openAICompletionResponseSchema = import_zod3.z.object({
1131
- choices: import_zod3.z.array(
1132
- import_zod3.z.object({
1133
- text: import_zod3.z.string(),
1134
- finish_reason: import_zod3.z.string()
1135
- })
1136
- ),
1137
- usage: import_zod3.z.object({
1138
- prompt_tokens: import_zod3.z.number(),
1139
- completion_tokens: import_zod3.z.number()
1140
- })
1141
- });
1142
- var openaiCompletionChunkSchema = import_zod3.z.object({
1143
- object: import_zod3.z.literal("text_completion"),
1144
- choices: import_zod3.z.array(
1145
- import_zod3.z.object({
1146
- text: import_zod3.z.string(),
1147
- finish_reason: import_zod3.z.enum(["stop", "length", "content_filter"]).optional().nullable(),
1148
- index: import_zod3.z.number()
1149
- })
1150
- )
1151
- });
1152
-
1153
- // provider/openai/openai-facade.ts
1154
- var OpenAI = class {
1155
- constructor(options = {}) {
1156
- this.baseUrl = options.baseUrl;
1157
- this.apiKey = options.apiKey;
1158
- this.organization = options.organization;
1159
- }
1160
- get baseConfig() {
1161
- var _a;
1162
- return {
1163
- organization: this.organization,
1164
- baseUrl: (_a = this.baseUrl) != null ? _a : "https://api.openai.com/v1",
1165
- headers: () => ({
1166
- Authorization: `Bearer ${loadApiKey({
1167
- apiKey: this.apiKey,
1168
- environmentVariableName: "OPENAI_API_KEY",
1169
- description: "OpenAI"
1170
- })}`,
1171
- "OpenAI-Organization": this.organization
1172
- })
1173
- };
1174
- }
1175
- chat(modelId, settings = {}) {
1176
- return new OpenAIChatLanguageModel(modelId, settings, {
1177
- provider: "openai.chat",
1178
- ...this.baseConfig
1179
- });
1180
- }
1181
- completion(modelId, settings = {}) {
1182
- return new OpenAICompletionLanguageModel(modelId, settings, {
1183
- provider: "openai.completion",
1184
- ...this.baseConfig
1185
- });
1186
- }
1187
- };
1188
- var openai = new OpenAI();
1189
- // Annotate the CommonJS export names for ESM import in node:
1190
- 0 && (module.exports = {
1191
- OpenAI,
1192
- openai
1193
- });
1194
- //# sourceMappingURL=index.js.map