ai 3.1.0-canary.4 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/dist/index.d.mts +982 -24
  2. package/dist/index.d.ts +982 -24
  3. package/dist/index.js +1748 -175
  4. package/dist/index.js.map +1 -1
  5. package/dist/index.mjs +1723 -174
  6. package/dist/index.mjs.map +1 -1
  7. package/package.json +11 -28
  8. package/prompts/dist/index.d.mts +13 -1
  9. package/prompts/dist/index.d.ts +13 -1
  10. package/prompts/dist/index.js +13 -0
  11. package/prompts/dist/index.js.map +1 -1
  12. package/prompts/dist/index.mjs +12 -0
  13. package/prompts/dist/index.mjs.map +1 -1
  14. package/react/dist/index.d.mts +23 -6
  15. package/react/dist/index.d.ts +27 -8
  16. package/react/dist/index.js +154 -141
  17. package/react/dist/index.js.map +1 -1
  18. package/react/dist/index.mjs +153 -141
  19. package/react/dist/index.mjs.map +1 -1
  20. package/react/dist/index.server.d.mts +4 -2
  21. package/react/dist/index.server.d.ts +4 -2
  22. package/react/dist/index.server.js.map +1 -1
  23. package/react/dist/index.server.mjs.map +1 -1
  24. package/rsc/dist/index.d.ts +388 -21
  25. package/rsc/dist/rsc-client.d.mts +1 -1
  26. package/rsc/dist/rsc-client.mjs +2 -0
  27. package/rsc/dist/rsc-client.mjs.map +1 -1
  28. package/rsc/dist/rsc-server.d.mts +370 -21
  29. package/rsc/dist/rsc-server.mjs +677 -36
  30. package/rsc/dist/rsc-server.mjs.map +1 -1
  31. package/rsc/dist/rsc-shared.d.mts +24 -9
  32. package/rsc/dist/rsc-shared.mjs +98 -4
  33. package/rsc/dist/rsc-shared.mjs.map +1 -1
  34. package/solid/dist/index.d.mts +7 -3
  35. package/solid/dist/index.d.ts +7 -3
  36. package/solid/dist/index.js +106 -107
  37. package/solid/dist/index.js.map +1 -1
  38. package/solid/dist/index.mjs +106 -107
  39. package/solid/dist/index.mjs.map +1 -1
  40. package/svelte/dist/index.d.mts +7 -3
  41. package/svelte/dist/index.d.ts +7 -3
  42. package/svelte/dist/index.js +109 -109
  43. package/svelte/dist/index.js.map +1 -1
  44. package/svelte/dist/index.mjs +109 -109
  45. package/svelte/dist/index.mjs.map +1 -1
  46. package/vue/dist/index.d.mts +7 -3
  47. package/vue/dist/index.d.ts +7 -3
  48. package/vue/dist/index.js +106 -107
  49. package/vue/dist/index.js.map +1 -1
  50. package/vue/dist/index.mjs +106 -107
  51. package/vue/dist/index.mjs.map +1 -1
  52. package/ai-model-specification/dist/index.d.mts +0 -665
  53. package/ai-model-specification/dist/index.d.ts +0 -665
  54. package/ai-model-specification/dist/index.js +0 -716
  55. package/ai-model-specification/dist/index.js.map +0 -1
  56. package/ai-model-specification/dist/index.mjs +0 -656
  57. package/ai-model-specification/dist/index.mjs.map +0 -1
  58. package/core/dist/index.d.mts +0 -626
  59. package/core/dist/index.d.ts +0 -626
  60. package/core/dist/index.js +0 -1918
  61. package/core/dist/index.js.map +0 -1
  62. package/core/dist/index.mjs +0 -1873
  63. package/core/dist/index.mjs.map +0 -1
  64. package/openai/dist/index.d.mts +0 -429
  65. package/openai/dist/index.d.ts +0 -429
  66. package/openai/dist/index.js +0 -1231
  67. package/openai/dist/index.js.map +0 -1
  68. package/openai/dist/index.mjs +0 -1195
  69. package/openai/dist/index.mjs.map +0 -1
@@ -1,1231 +0,0 @@
1
- "use strict";
2
- var __create = Object.create;
3
- var __defProp = Object.defineProperty;
4
- var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
- var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __getProtoOf = Object.getPrototypeOf;
7
- var __hasOwnProp = Object.prototype.hasOwnProperty;
8
- var __export = (target, all) => {
9
- for (var name in all)
10
- __defProp(target, name, { get: all[name], enumerable: true });
11
- };
12
- var __copyProps = (to, from, except, desc) => {
13
- if (from && typeof from === "object" || typeof from === "function") {
14
- for (let key of __getOwnPropNames(from))
15
- if (!__hasOwnProp.call(to, key) && key !== except)
16
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
- }
18
- return to;
19
- };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
-
30
- // openai/index.ts
31
- var openai_exports = {};
32
- __export(openai_exports, {
33
- OpenAI: () => OpenAI,
34
- openai: () => openai
35
- });
36
- module.exports = __toCommonJS(openai_exports);
37
-
38
- // ai-model-specification/errors/api-call-error.ts
39
- var APICallError = class extends Error {
40
- constructor({
41
- message,
42
- url,
43
- requestBodyValues,
44
- statusCode,
45
- responseBody,
46
- cause,
47
- isRetryable = statusCode != null && (statusCode === 408 || // request timeout
48
- statusCode === 409 || // conflict
49
- statusCode === 429 || // too many requests
50
- statusCode >= 500),
51
- // server error
52
- data
53
- }) {
54
- super(message);
55
- this.name = "AI_APICallError";
56
- this.url = url;
57
- this.requestBodyValues = requestBodyValues;
58
- this.statusCode = statusCode;
59
- this.responseBody = responseBody;
60
- this.cause = cause;
61
- this.isRetryable = isRetryable;
62
- this.data = data;
63
- }
64
- static isAPICallError(error) {
65
- return error instanceof Error && error.name === "AI_APICallError" && typeof error.url === "string" && typeof error.requestBodyValues === "object" && (error.statusCode == null || typeof error.statusCode === "number") && (error.responseBody == null || typeof error.responseBody === "string") && (error.cause == null || typeof error.cause === "object") && typeof error.isRetryable === "boolean" && (error.data == null || typeof error.data === "object");
66
- }
67
- toJSON() {
68
- return {
69
- name: this.name,
70
- message: this.message,
71
- url: this.url,
72
- requestBodyValues: this.requestBodyValues,
73
- statusCode: this.statusCode,
74
- responseBody: this.responseBody,
75
- cause: this.cause,
76
- isRetryable: this.isRetryable,
77
- data: this.data
78
- };
79
- }
80
- };
81
-
82
- // ai-model-specification/errors/invalid-prompt-error.ts
83
- var InvalidPromptError = class extends Error {
84
- constructor({ prompt: prompt2, message }) {
85
- super(`Invalid prompt: ${message}`);
86
- this.name = "AI_InvalidPromptError";
87
- this.prompt = prompt2;
88
- }
89
- static isInvalidPromptError(error) {
90
- return error instanceof Error && error.name === "AI_InvalidPromptError" && prompt != null;
91
- }
92
- toJSON() {
93
- return {
94
- name: this.name,
95
- message: this.message,
96
- stack: this.stack,
97
- prompt: this.prompt
98
- };
99
- }
100
- };
101
-
102
- // ai-model-specification/util/get-error-message.ts
103
- function getErrorMessage(error) {
104
- if (error == null) {
105
- return "unknown error";
106
- }
107
- if (typeof error === "string") {
108
- return error;
109
- }
110
- if (error instanceof Error) {
111
- return error.message;
112
- }
113
- return JSON.stringify(error);
114
- }
115
-
116
- // ai-model-specification/errors/load-api-key-error.ts
117
- var LoadAPIKeyError = class extends Error {
118
- constructor({ message }) {
119
- super(message);
120
- this.name = "AI_LoadAPIKeyError";
121
- }
122
- static isLoadAPIKeyError(error) {
123
- return error instanceof Error && error.name === "AI_LoadAPIKeyError";
124
- }
125
- toJSON() {
126
- return {
127
- name: this.name,
128
- message: this.message
129
- };
130
- }
131
- };
132
-
133
- // ai-model-specification/util/load-api-key.ts
134
- function loadApiKey({
135
- apiKey,
136
- environmentVariableName,
137
- apiKeyParameterName = "apiKey",
138
- description
139
- }) {
140
- if (apiKey != null) {
141
- return apiKey;
142
- }
143
- if (typeof process === "undefined") {
144
- throw new LoadAPIKeyError({
145
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
146
- });
147
- }
148
- apiKey = process.env[environmentVariableName];
149
- if (apiKey == null) {
150
- throw new LoadAPIKeyError({
151
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
152
- });
153
- }
154
- return apiKey;
155
- }
156
-
157
- // ai-model-specification/util/parse-json.ts
158
- var import_secure_json_parse = __toESM(require("secure-json-parse"));
159
-
160
- // ai-model-specification/errors/json-parse-error.ts
161
- var JSONParseError = class extends Error {
162
- constructor({ text, cause }) {
163
- super(
164
- `JSON parsing failed: Text: ${text}.
165
- Error message: ${getErrorMessage(cause)}`
166
- );
167
- this.name = "AI_JSONParseError";
168
- this.cause = cause;
169
- this.text = text;
170
- }
171
- static isJSONParseError(error) {
172
- return error instanceof Error && error.name === "AI_JSONParseError" && typeof error.text === "string" && typeof error.cause === "string";
173
- }
174
- toJSON() {
175
- return {
176
- name: this.name,
177
- message: this.message,
178
- cause: this.cause,
179
- stack: this.stack,
180
- valueText: this.text
181
- };
182
- }
183
- };
184
-
185
- // ai-model-specification/errors/type-validation-error.ts
186
- var TypeValidationError = class extends Error {
187
- constructor({ value, cause }) {
188
- super(
189
- `Type validation failed: Value: ${JSON.stringify(value)}.
190
- Error message: ${getErrorMessage(cause)}`
191
- );
192
- this.name = "AI_TypeValidationError";
193
- this.cause = cause;
194
- this.value = value;
195
- }
196
- static isTypeValidationError(error) {
197
- return error instanceof Error && error.name === "AI_TypeValidationError" && typeof error.value === "string" && typeof error.cause === "string";
198
- }
199
- toJSON() {
200
- return {
201
- name: this.name,
202
- message: this.message,
203
- cause: this.cause,
204
- stack: this.stack,
205
- value: this.value
206
- };
207
- }
208
- };
209
-
210
- // ai-model-specification/util/validate-types.ts
211
- function validateTypes({
212
- value,
213
- schema
214
- }) {
215
- try {
216
- return schema.parse(value);
217
- } catch (error) {
218
- throw new TypeValidationError({ value, cause: error });
219
- }
220
- }
221
- function safeValidateTypes({
222
- value,
223
- schema
224
- }) {
225
- try {
226
- const validationResult = schema.safeParse(value);
227
- if (validationResult.success) {
228
- return {
229
- success: true,
230
- value: validationResult.data
231
- };
232
- }
233
- return {
234
- success: false,
235
- error: new TypeValidationError({
236
- value,
237
- cause: validationResult.error
238
- })
239
- };
240
- } catch (error) {
241
- return {
242
- success: false,
243
- error: TypeValidationError.isTypeValidationError(error) ? error : new TypeValidationError({ value, cause: error })
244
- };
245
- }
246
- }
247
-
248
- // ai-model-specification/util/parse-json.ts
249
- function parseJSON({
250
- text,
251
- schema
252
- }) {
253
- try {
254
- const value = import_secure_json_parse.default.parse(text);
255
- if (schema == null) {
256
- return value;
257
- }
258
- return validateTypes({ value, schema });
259
- } catch (error) {
260
- if (JSONParseError.isJSONParseError(error) || TypeValidationError.isTypeValidationError(error)) {
261
- throw error;
262
- }
263
- throw new JSONParseError({ text, cause: error });
264
- }
265
- }
266
- function safeParseJSON({
267
- text,
268
- schema
269
- }) {
270
- try {
271
- const value = import_secure_json_parse.default.parse(text);
272
- if (schema == null) {
273
- return {
274
- success: true,
275
- value
276
- };
277
- }
278
- return safeValidateTypes({ value, schema });
279
- } catch (error) {
280
- return {
281
- success: false,
282
- error: JSONParseError.isJSONParseError(error) ? error : new JSONParseError({ text, cause: error })
283
- };
284
- }
285
- }
286
- function isParseableJson(input) {
287
- try {
288
- import_secure_json_parse.default.parse(input);
289
- return true;
290
- } catch (e) {
291
- return false;
292
- }
293
- }
294
-
295
- // ai-model-specification/util/post-to-api.ts
296
- var postJsonToApi = async ({
297
- url,
298
- headers,
299
- body,
300
- failedResponseHandler,
301
- successfulResponseHandler,
302
- abortSignal
303
- }) => postToApi({
304
- url,
305
- headers: {
306
- ...headers,
307
- "Content-Type": "application/json"
308
- },
309
- body: {
310
- content: JSON.stringify(body),
311
- values: body
312
- },
313
- failedResponseHandler,
314
- successfulResponseHandler,
315
- abortSignal
316
- });
317
- var postToApi = async ({
318
- url,
319
- headers = {},
320
- body,
321
- successfulResponseHandler,
322
- failedResponseHandler,
323
- abortSignal
324
- }) => {
325
- try {
326
- const definedHeaders = Object.fromEntries(
327
- Object.entries(headers).filter(([_key, value]) => value != null)
328
- );
329
- const response = await fetch(url, {
330
- method: "POST",
331
- headers: definedHeaders,
332
- body: body.content,
333
- signal: abortSignal
334
- });
335
- if (!response.ok) {
336
- try {
337
- throw await failedResponseHandler({
338
- response,
339
- url,
340
- requestBodyValues: body.values
341
- });
342
- } catch (error) {
343
- if (error instanceof Error) {
344
- if (error.name === "AbortError" || APICallError.isAPICallError(error)) {
345
- throw error;
346
- }
347
- }
348
- throw new APICallError({
349
- message: "Failed to process error response",
350
- cause: error,
351
- statusCode: response.status,
352
- url,
353
- requestBodyValues: body.values
354
- });
355
- }
356
- }
357
- try {
358
- return await successfulResponseHandler({
359
- response,
360
- url,
361
- requestBodyValues: body.values
362
- });
363
- } catch (error) {
364
- if (error instanceof Error) {
365
- if (error.name === "AbortError" || APICallError.isAPICallError(error)) {
366
- throw error;
367
- }
368
- }
369
- throw new APICallError({
370
- message: "Failed to process successful response",
371
- cause: error,
372
- statusCode: response.status,
373
- url,
374
- requestBodyValues: body.values
375
- });
376
- }
377
- } catch (error) {
378
- if (error instanceof Error) {
379
- if (error.name === "AbortError") {
380
- throw error;
381
- }
382
- }
383
- if (error instanceof TypeError && error.message === "fetch failed") {
384
- const cause = error.cause;
385
- if (cause != null) {
386
- throw new APICallError({
387
- message: `Cannot connect to API: ${cause.message}`,
388
- cause,
389
- url,
390
- requestBodyValues: body.values,
391
- isRetryable: true
392
- // retry when network error
393
- });
394
- }
395
- }
396
- throw error;
397
- }
398
- };
399
-
400
- // ai-model-specification/util/response-handler.ts
401
- var import_stream = require("eventsource-parser/stream");
402
- var createJsonErrorResponseHandler = ({
403
- errorSchema,
404
- errorToMessage,
405
- isRetryable
406
- }) => async ({ response, url, requestBodyValues }) => {
407
- const responseBody = await response.text();
408
- if (responseBody.trim() === "") {
409
- return new APICallError({
410
- message: response.statusText,
411
- url,
412
- requestBodyValues,
413
- statusCode: response.status,
414
- responseBody,
415
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
416
- });
417
- }
418
- try {
419
- const parsedError = parseJSON({
420
- text: responseBody,
421
- schema: errorSchema
422
- });
423
- return new APICallError({
424
- message: errorToMessage(parsedError),
425
- url,
426
- requestBodyValues,
427
- statusCode: response.status,
428
- responseBody,
429
- data: parsedError,
430
- isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
431
- });
432
- } catch (parseError) {
433
- return new APICallError({
434
- message: response.statusText,
435
- url,
436
- requestBodyValues,
437
- statusCode: response.status,
438
- responseBody,
439
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
440
- });
441
- }
442
- };
443
- var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
444
- if (response.body == null) {
445
- throw new Error("No response body");
446
- }
447
- return response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new import_stream.EventSourceParserStream()).pipeThrough(
448
- new TransformStream({
449
- transform({ data }, controller) {
450
- if (data === "[DONE]") {
451
- return;
452
- }
453
- const parseResult = safeParseJSON({
454
- text: data,
455
- schema: chunkSchema
456
- });
457
- controller.enqueue(
458
- parseResult.success ? { type: "value", value: parseResult.value } : { type: "error", error: parseResult.error }
459
- );
460
- }
461
- })
462
- );
463
- };
464
- var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
465
- const responseBody = await response.text();
466
- const parsedResult = safeParseJSON({
467
- text: responseBody,
468
- schema: responseSchema
469
- });
470
- if (!parsedResult.success) {
471
- throw new APICallError({
472
- message: "Invalid JSON response",
473
- cause: parsedResult.error,
474
- statusCode: response.status,
475
- responseBody,
476
- url,
477
- requestBodyValues
478
- });
479
- }
480
- return parsedResult.value;
481
- };
482
-
483
- // ai-model-specification/util/scale.ts
484
- function scale({
485
- inputMin = 0,
486
- inputMax = 1,
487
- outputMin,
488
- outputMax,
489
- value
490
- }) {
491
- if (value === void 0) {
492
- return void 0;
493
- }
494
- const inputRange = inputMax - inputMin;
495
- const outputRange = outputMax - outputMin;
496
- return (value - inputMin) * outputRange / inputRange + outputMin;
497
- }
498
-
499
- // ai-model-specification/util/uint8-utils.ts
500
- function convertUint8ArrayToBase64(array) {
501
- let latin1string = "";
502
- for (const value of array) {
503
- latin1string += String.fromCodePoint(value);
504
- }
505
- return globalThis.btoa(latin1string);
506
- }
507
-
508
- // ai-model-specification/errors/unsupported-functionality-error.ts
509
- var UnsupportedFunctionalityError = class extends Error {
510
- constructor({
511
- provider,
512
- functionality
513
- }) {
514
- super(
515
- `Functionality not supported by the provider. Provider: ${provider}.
516
- Functionality: ${functionality}`
517
- );
518
- this.name = "AI_UnsupportedFunctionalityError";
519
- this.provider = provider;
520
- this.functionality = functionality;
521
- }
522
- static isUnsupportedFunctionalityError(error) {
523
- return error instanceof Error && error.name === "AI_UnsupportedFunctionalityError" && typeof error.provider === "string" && typeof error.functionality === "string";
524
- }
525
- toJSON() {
526
- return {
527
- name: this.name,
528
- message: this.message,
529
- stack: this.stack,
530
- provider: this.provider,
531
- functionality: this.functionality
532
- };
533
- }
534
- };
535
-
536
- // openai/openai-chat-language-model.ts
537
- var import_nanoid = require("nanoid");
538
- var import_zod2 = require("zod");
539
-
540
- // openai/convert-to-openai-chat-messages.ts
541
- function convertToOpenAIChatMessages(prompt2) {
542
- const messages = [];
543
- for (const { role, content } of prompt2) {
544
- switch (role) {
545
- case "system": {
546
- messages.push({ role: "system", content });
547
- break;
548
- }
549
- case "user": {
550
- messages.push({
551
- role: "user",
552
- content: content.map((part) => {
553
- var _a;
554
- switch (part.type) {
555
- case "text": {
556
- return { type: "text", text: part.text };
557
- }
558
- case "image": {
559
- return {
560
- type: "image_url",
561
- image_url: {
562
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
563
- }
564
- };
565
- }
566
- }
567
- })
568
- });
569
- break;
570
- }
571
- case "assistant": {
572
- let text = "";
573
- const toolCalls = [];
574
- for (const part of content) {
575
- switch (part.type) {
576
- case "text": {
577
- text += part.text;
578
- break;
579
- }
580
- case "tool-call": {
581
- toolCalls.push({
582
- id: part.toolCallId,
583
- type: "function",
584
- function: {
585
- name: part.toolName,
586
- arguments: JSON.stringify(part.args)
587
- }
588
- });
589
- break;
590
- }
591
- default: {
592
- const _exhaustiveCheck = part;
593
- throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
594
- }
595
- }
596
- }
597
- messages.push({
598
- role: "assistant",
599
- content: text,
600
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
601
- });
602
- break;
603
- }
604
- case "tool": {
605
- for (const toolResponse of content) {
606
- messages.push({
607
- role: "tool",
608
- tool_call_id: toolResponse.toolCallId,
609
- content: JSON.stringify(toolResponse.result)
610
- });
611
- }
612
- break;
613
- }
614
- default: {
615
- const _exhaustiveCheck = role;
616
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
617
- }
618
- }
619
- }
620
- return messages;
621
- }
622
-
623
- // openai/map-openai-finish-reason.ts
624
- function mapOpenAIFinishReason(finishReason) {
625
- switch (finishReason) {
626
- case "stop":
627
- return "stop";
628
- case "length":
629
- return "length";
630
- case "content-filter":
631
- return "content-filter";
632
- case "function_call":
633
- case "tool-calls":
634
- return "tool-calls";
635
- default:
636
- return "other";
637
- }
638
- }
639
-
640
- // openai/openai-error.ts
641
- var import_zod = require("zod");
642
- var openAIErrorDataSchema = import_zod.z.object({
643
- error: import_zod.z.object({
644
- message: import_zod.z.string(),
645
- type: import_zod.z.string(),
646
- param: import_zod.z.any().nullable(),
647
- code: import_zod.z.string().nullable()
648
- })
649
- });
650
- var openaiFailedResponseHandler = createJsonErrorResponseHandler({
651
- errorSchema: openAIErrorDataSchema,
652
- errorToMessage: (data) => data.error.message
653
- });
654
-
655
- // openai/openai-chat-language-model.ts
656
- var OpenAIChatLanguageModel = class {
657
- constructor(modelId, settings, config) {
658
- this.specificationVersion = "v1";
659
- this.defaultObjectGenerationMode = "tool";
660
- this.modelId = modelId;
661
- this.settings = settings;
662
- this.config = config;
663
- }
664
- get provider() {
665
- return this.config.provider;
666
- }
667
- getArgs({
668
- mode,
669
- prompt: prompt2,
670
- maxTokens,
671
- temperature,
672
- topP,
673
- frequencyPenalty,
674
- presencePenalty,
675
- seed
676
- }) {
677
- var _a;
678
- const type = mode.type;
679
- const baseArgs = {
680
- // model id:
681
- model: this.modelId,
682
- // model specific settings:
683
- logit_bias: this.settings.logitBias,
684
- user: this.settings.user,
685
- // standardized settings:
686
- max_tokens: maxTokens,
687
- temperature: scale({
688
- value: temperature,
689
- outputMin: 0,
690
- outputMax: 2
691
- }),
692
- top_p: topP,
693
- frequency_penalty: scale({
694
- value: frequencyPenalty,
695
- inputMin: -1,
696
- inputMax: 1,
697
- outputMin: -2,
698
- outputMax: 2
699
- }),
700
- presence_penalty: scale({
701
- value: presencePenalty,
702
- inputMin: -1,
703
- inputMax: 1,
704
- outputMin: -2,
705
- outputMax: 2
706
- }),
707
- seed,
708
- // messages:
709
- messages: convertToOpenAIChatMessages(prompt2)
710
- };
711
- switch (type) {
712
- case "regular": {
713
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
714
- return {
715
- ...baseArgs,
716
- tools: tools == null ? void 0 : tools.map((tool) => ({
717
- type: "function",
718
- function: {
719
- name: tool.name,
720
- description: tool.description,
721
- parameters: tool.parameters
722
- }
723
- }))
724
- };
725
- }
726
- case "object-json": {
727
- return {
728
- ...baseArgs,
729
- response_format: { type: "json_object" }
730
- };
731
- }
732
- case "object-tool": {
733
- return {
734
- ...baseArgs,
735
- tool_choice: { type: "function", function: { name: mode.tool.name } },
736
- tools: [{ type: "function", function: mode.tool }]
737
- };
738
- }
739
- case "object-grammar": {
740
- throw new UnsupportedFunctionalityError({
741
- functionality: "object-grammar mode",
742
- provider: this.provider
743
- });
744
- }
745
- default: {
746
- const _exhaustiveCheck = type;
747
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
748
- }
749
- }
750
- }
751
- async doGenerate(options) {
752
- var _a, _b;
753
- const args = this.getArgs(options);
754
- const response = await postJsonToApi({
755
- url: `${this.config.baseUrl}/chat/completions`,
756
- headers: this.config.headers(),
757
- body: args,
758
- failedResponseHandler: openaiFailedResponseHandler,
759
- successfulResponseHandler: createJsonResponseHandler(
760
- openAIChatResponseSchema
761
- ),
762
- abortSignal: options.abortSignal
763
- });
764
- const { messages: rawPrompt, ...rawSettings } = args;
765
- const choice = response.choices[0];
766
- return {
767
- text: (_a = choice.message.content) != null ? _a : void 0,
768
- toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => ({
769
- toolCallType: "function",
770
- toolCallId: toolCall.id,
771
- toolName: toolCall.function.name,
772
- args: toolCall.function.arguments
773
- })),
774
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
775
- usage: {
776
- promptTokens: response.usage.prompt_tokens,
777
- completionTokens: response.usage.completion_tokens
778
- },
779
- rawCall: { rawPrompt, rawSettings },
780
- warnings: []
781
- };
782
- }
783
- async doStream(options) {
784
- const args = this.getArgs(options);
785
- const response = await postJsonToApi({
786
- url: `${this.config.baseUrl}/chat/completions`,
787
- headers: this.config.headers(),
788
- body: {
789
- ...args,
790
- stream: true
791
- },
792
- failedResponseHandler: openaiFailedResponseHandler,
793
- successfulResponseHandler: createEventSourceResponseHandler(
794
- openaiChatChunkSchema
795
- ),
796
- abortSignal: options.abortSignal
797
- });
798
- const { messages: rawPrompt, ...rawSettings } = args;
799
- const toolCalls = [];
800
- return {
801
- stream: response.pipeThrough(
802
- new TransformStream({
803
- transform(chunk, controller) {
804
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
805
- if (chunk.type === "error") {
806
- controller.enqueue(chunk);
807
- return;
808
- }
809
- const value = chunk.value;
810
- if (((_b = (_a = value.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null) {
811
- return;
812
- }
813
- const delta = value.choices[0].delta;
814
- if (delta.content != null) {
815
- controller.enqueue({
816
- type: "text-delta",
817
- textDelta: delta.content
818
- });
819
- }
820
- if (delta.tool_calls != null) {
821
- for (const toolCallDelta of delta.tool_calls) {
822
- const index = toolCallDelta.index;
823
- if (toolCalls[index] == null) {
824
- toolCalls[index] = toolCallDelta;
825
- continue;
826
- }
827
- const toolCall = toolCalls[index];
828
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.arguments) != null) {
829
- toolCall.function.arguments += (_e = (_d = toolCallDelta.function) == null ? void 0 : _d.arguments) != null ? _e : "";
830
- }
831
- controller.enqueue({
832
- type: "tool-call-delta",
833
- toolCallId: (_f = toolCall.id) != null ? _f : "",
834
- // TODO empty?
835
- toolName: (_h = (_g = toolCall.function) == null ? void 0 : _g.name) != null ? _h : "",
836
- // TODO empty?
837
- argsTextDelta: (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : ""
838
- // TODO empty?
839
- });
840
- if (((_k = toolCall.function) == null ? void 0 : _k.name) == null || ((_l = toolCall.function) == null ? void 0 : _l.arguments) == null || !isParseableJson(toolCall.function.arguments)) {
841
- continue;
842
- }
843
- controller.enqueue({
844
- type: "tool-call",
845
- toolCallType: "function",
846
- toolCallId: (_m = toolCall.id) != null ? _m : (0, import_nanoid.nanoid)(),
847
- toolName: toolCall.function.name,
848
- args: toolCall.function.arguments
849
- });
850
- }
851
- }
852
- }
853
- })
854
- ),
855
- rawCall: { rawPrompt, rawSettings },
856
- warnings: []
857
- };
858
- }
859
- };
860
- var openAIChatResponseSchema = import_zod2.z.object({
861
- choices: import_zod2.z.array(
862
- import_zod2.z.object({
863
- message: import_zod2.z.object({
864
- role: import_zod2.z.literal("assistant"),
865
- content: import_zod2.z.string().nullable(),
866
- tool_calls: import_zod2.z.array(
867
- import_zod2.z.object({
868
- id: import_zod2.z.string(),
869
- type: import_zod2.z.literal("function"),
870
- function: import_zod2.z.object({
871
- name: import_zod2.z.string(),
872
- arguments: import_zod2.z.string()
873
- })
874
- })
875
- ).optional()
876
- }),
877
- index: import_zod2.z.number(),
878
- finish_reason: import_zod2.z.string().optional().nullable()
879
- })
880
- ),
881
- object: import_zod2.z.literal("chat.completion"),
882
- usage: import_zod2.z.object({
883
- prompt_tokens: import_zod2.z.number(),
884
- completion_tokens: import_zod2.z.number()
885
- })
886
- });
887
- var openaiChatChunkSchema = import_zod2.z.object({
888
- object: import_zod2.z.literal("chat.completion.chunk"),
889
- choices: import_zod2.z.array(
890
- import_zod2.z.object({
891
- delta: import_zod2.z.object({
892
- role: import_zod2.z.enum(["assistant"]).optional(),
893
- content: import_zod2.z.string().nullable().optional(),
894
- tool_calls: import_zod2.z.array(
895
- import_zod2.z.object({
896
- index: import_zod2.z.number(),
897
- id: import_zod2.z.string().optional(),
898
- type: import_zod2.z.literal("function").optional(),
899
- function: import_zod2.z.object({
900
- name: import_zod2.z.string().optional(),
901
- arguments: import_zod2.z.string().optional()
902
- })
903
- })
904
- ).optional()
905
- }),
906
- finish_reason: import_zod2.z.string().nullable().optional(),
907
- index: import_zod2.z.number()
908
- })
909
- )
910
- });
911
-
912
- // openai/openai-completion-language-model.ts
913
- var import_zod3 = require("zod");
914
-
915
- // openai/convert-to-openai-completion-prompt.ts
916
- function convertToOpenAICompletionPrompt({
917
- prompt: prompt2,
918
- inputFormat,
919
- provider,
920
- user = "user",
921
- assistant = "assistant"
922
- }) {
923
- if (inputFormat === "prompt" && prompt2.length === 1 && prompt2[0].role === "user" && prompt2[0].content.length === 1 && prompt2[0].content[0].type === "text") {
924
- return { prompt: prompt2[0].content[0].text };
925
- }
926
- let text = "";
927
- if (prompt2[0].role === "system") {
928
- text += `${prompt2[0].content}
929
-
930
- `;
931
- prompt2 = prompt2.slice(1);
932
- }
933
- for (const { role, content } of prompt2) {
934
- switch (role) {
935
- case "system": {
936
- throw new InvalidPromptError({
937
- message: "Unexpected system message in prompt: ${content}",
938
- prompt: prompt2
939
- });
940
- }
941
- case "user": {
942
- const userMessage = content.map((part) => {
943
- switch (part.type) {
944
- case "text": {
945
- return part.text;
946
- }
947
- case "image": {
948
- throw new UnsupportedFunctionalityError({
949
- provider,
950
- functionality: "images"
951
- });
952
- }
953
- }
954
- }).join("");
955
- text += `${user}:
956
- ${userMessage}
957
-
958
- `;
959
- break;
960
- }
961
- case "assistant": {
962
- const assistantMessage = content.map((part) => {
963
- switch (part.type) {
964
- case "text": {
965
- return part.text;
966
- }
967
- case "tool-call": {
968
- throw new UnsupportedFunctionalityError({
969
- provider,
970
- functionality: "tool-call messages"
971
- });
972
- }
973
- }
974
- }).join("");
975
- text += `${assistant}:
976
- ${assistantMessage}
977
-
978
- `;
979
- break;
980
- }
981
- case "tool": {
982
- throw new UnsupportedFunctionalityError({
983
- provider,
984
- functionality: "tool messages"
985
- });
986
- }
987
- default: {
988
- const _exhaustiveCheck = role;
989
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
990
- }
991
- }
992
- }
993
- text += `${assistant}:
994
- `;
995
- return {
996
- prompt: text,
997
- stopSequences: [`
998
- ${user}:`]
999
- };
1000
- }
1001
-
1002
- // openai/openai-completion-language-model.ts
1003
- var OpenAICompletionLanguageModel = class {
1004
- constructor(modelId, settings, config) {
1005
- this.specificationVersion = "v1";
1006
- this.defaultObjectGenerationMode = void 0;
1007
- this.modelId = modelId;
1008
- this.settings = settings;
1009
- this.config = config;
1010
- }
1011
- get provider() {
1012
- return this.config.provider;
1013
- }
1014
- getArgs({
1015
- mode,
1016
- inputFormat,
1017
- prompt: prompt2,
1018
- maxTokens,
1019
- temperature,
1020
- topP,
1021
- frequencyPenalty,
1022
- presencePenalty,
1023
- seed
1024
- }) {
1025
- var _a;
1026
- const type = mode.type;
1027
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({
1028
- prompt: prompt2,
1029
- inputFormat,
1030
- provider: this.provider
1031
- });
1032
- const baseArgs = {
1033
- // model id:
1034
- model: this.modelId,
1035
- // model specific settings:
1036
- echo: this.settings.echo,
1037
- logit_bias: this.settings.logitBias,
1038
- suffix: this.settings.suffix,
1039
- user: this.settings.user,
1040
- // standardized settings:
1041
- max_tokens: maxTokens,
1042
- temperature: scale({
1043
- value: temperature,
1044
- outputMin: 0,
1045
- outputMax: 2
1046
- }),
1047
- top_p: topP,
1048
- frequency_penalty: scale({
1049
- value: frequencyPenalty,
1050
- inputMin: -1,
1051
- inputMax: 1,
1052
- outputMin: -2,
1053
- outputMax: 2
1054
- }),
1055
- presence_penalty: scale({
1056
- value: presencePenalty,
1057
- inputMin: -1,
1058
- inputMax: 1,
1059
- outputMin: -2,
1060
- outputMax: 2
1061
- }),
1062
- seed,
1063
- // prompt:
1064
- prompt: completionPrompt,
1065
- // stop sequences:
1066
- stop: stopSequences
1067
- };
1068
- switch (type) {
1069
- case "regular": {
1070
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1071
- throw new UnsupportedFunctionalityError({
1072
- functionality: "tools",
1073
- provider: this.provider
1074
- });
1075
- }
1076
- return baseArgs;
1077
- }
1078
- case "object-json": {
1079
- throw new UnsupportedFunctionalityError({
1080
- functionality: "object-json mode",
1081
- provider: this.provider
1082
- });
1083
- }
1084
- case "object-tool": {
1085
- throw new UnsupportedFunctionalityError({
1086
- functionality: "object-tool mode",
1087
- provider: this.provider
1088
- });
1089
- }
1090
- case "object-grammar": {
1091
- throw new UnsupportedFunctionalityError({
1092
- functionality: "object-grammar mode",
1093
- provider: this.provider
1094
- });
1095
- }
1096
- default: {
1097
- const _exhaustiveCheck = type;
1098
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1099
- }
1100
- }
1101
- }
1102
- async doGenerate(options) {
1103
- const args = this.getArgs(options);
1104
- const response = await postJsonToApi({
1105
- url: `${this.config.baseUrl}/completions`,
1106
- headers: this.config.headers(),
1107
- body: args,
1108
- failedResponseHandler: openaiFailedResponseHandler,
1109
- successfulResponseHandler: createJsonResponseHandler(
1110
- openAICompletionResponseSchema
1111
- ),
1112
- abortSignal: options.abortSignal
1113
- });
1114
- const { prompt: rawPrompt, ...rawSettings } = args;
1115
- const choice = response.choices[0];
1116
- return {
1117
- text: choice.text,
1118
- usage: {
1119
- promptTokens: response.usage.prompt_tokens,
1120
- completionTokens: response.usage.completion_tokens
1121
- },
1122
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
1123
- rawCall: { rawPrompt, rawSettings },
1124
- warnings: []
1125
- };
1126
- }
1127
- async doStream(options) {
1128
- const args = this.getArgs(options);
1129
- const response = await postJsonToApi({
1130
- url: `${this.config.baseUrl}/completions`,
1131
- headers: this.config.headers(),
1132
- body: {
1133
- ...this.getArgs(options),
1134
- stream: true
1135
- },
1136
- failedResponseHandler: openaiFailedResponseHandler,
1137
- successfulResponseHandler: createEventSourceResponseHandler(
1138
- openaiCompletionChunkSchema
1139
- ),
1140
- abortSignal: options.abortSignal
1141
- });
1142
- const { prompt: rawPrompt, ...rawSettings } = args;
1143
- return {
1144
- stream: response.pipeThrough(
1145
- new TransformStream({
1146
- transform(chunk, controller) {
1147
- var _a, _b;
1148
- if (chunk.type === "error") {
1149
- controller.enqueue(chunk);
1150
- return;
1151
- }
1152
- const value = chunk.value;
1153
- if (((_b = (_a = value.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.text) != null) {
1154
- controller.enqueue({
1155
- type: "text-delta",
1156
- textDelta: value.choices[0].text
1157
- });
1158
- }
1159
- }
1160
- })
1161
- ),
1162
- rawCall: { rawPrompt, rawSettings },
1163
- warnings: []
1164
- };
1165
- }
1166
- };
1167
- var openAICompletionResponseSchema = import_zod3.z.object({
1168
- choices: import_zod3.z.array(
1169
- import_zod3.z.object({
1170
- text: import_zod3.z.string(),
1171
- finish_reason: import_zod3.z.string()
1172
- })
1173
- ),
1174
- usage: import_zod3.z.object({
1175
- prompt_tokens: import_zod3.z.number(),
1176
- completion_tokens: import_zod3.z.number()
1177
- })
1178
- });
1179
- var openaiCompletionChunkSchema = import_zod3.z.object({
1180
- object: import_zod3.z.literal("text_completion"),
1181
- choices: import_zod3.z.array(
1182
- import_zod3.z.object({
1183
- text: import_zod3.z.string(),
1184
- finish_reason: import_zod3.z.enum(["stop", "length", "content_filter"]).optional().nullable(),
1185
- index: import_zod3.z.number()
1186
- })
1187
- )
1188
- });
1189
-
1190
- // openai/openai-facade.ts
1191
- var OpenAI = class {
1192
- constructor(options = {}) {
1193
- this.baseUrl = options.baseUrl;
1194
- this.apiKey = options.apiKey;
1195
- this.organization = options.organization;
1196
- }
1197
- get baseConfig() {
1198
- var _a;
1199
- return {
1200
- organization: this.organization,
1201
- baseUrl: (_a = this.baseUrl) != null ? _a : "https://api.openai.com/v1",
1202
- headers: () => ({
1203
- Authorization: `Bearer ${loadApiKey({
1204
- apiKey: this.apiKey,
1205
- environmentVariableName: "OPENAI_API_KEY",
1206
- description: "OpenAI"
1207
- })}`,
1208
- "OpenAI-Organization": this.organization
1209
- })
1210
- };
1211
- }
1212
- chat(modelId, settings = {}) {
1213
- return new OpenAIChatLanguageModel(modelId, settings, {
1214
- provider: "openai.chat",
1215
- ...this.baseConfig
1216
- });
1217
- }
1218
- completion(modelId, settings = {}) {
1219
- return new OpenAICompletionLanguageModel(modelId, settings, {
1220
- provider: "openai.completion",
1221
- ...this.baseConfig
1222
- });
1223
- }
1224
- };
1225
- var openai = new OpenAI();
1226
- // Annotate the CommonJS export names for ESM import in node:
1227
- 0 && (module.exports = {
1228
- OpenAI,
1229
- openai
1230
- });
1231
- //# sourceMappingURL=index.js.map