ai 3.1.0-canary.4 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/dist/index.d.mts +982 -24
  2. package/dist/index.d.ts +982 -24
  3. package/dist/index.js +1748 -175
  4. package/dist/index.js.map +1 -1
  5. package/dist/index.mjs +1723 -174
  6. package/dist/index.mjs.map +1 -1
  7. package/package.json +11 -28
  8. package/prompts/dist/index.d.mts +13 -1
  9. package/prompts/dist/index.d.ts +13 -1
  10. package/prompts/dist/index.js +13 -0
  11. package/prompts/dist/index.js.map +1 -1
  12. package/prompts/dist/index.mjs +12 -0
  13. package/prompts/dist/index.mjs.map +1 -1
  14. package/react/dist/index.d.mts +23 -6
  15. package/react/dist/index.d.ts +27 -8
  16. package/react/dist/index.js +154 -141
  17. package/react/dist/index.js.map +1 -1
  18. package/react/dist/index.mjs +153 -141
  19. package/react/dist/index.mjs.map +1 -1
  20. package/react/dist/index.server.d.mts +4 -2
  21. package/react/dist/index.server.d.ts +4 -2
  22. package/react/dist/index.server.js.map +1 -1
  23. package/react/dist/index.server.mjs.map +1 -1
  24. package/rsc/dist/index.d.ts +388 -21
  25. package/rsc/dist/rsc-client.d.mts +1 -1
  26. package/rsc/dist/rsc-client.mjs +2 -0
  27. package/rsc/dist/rsc-client.mjs.map +1 -1
  28. package/rsc/dist/rsc-server.d.mts +370 -21
  29. package/rsc/dist/rsc-server.mjs +677 -36
  30. package/rsc/dist/rsc-server.mjs.map +1 -1
  31. package/rsc/dist/rsc-shared.d.mts +24 -9
  32. package/rsc/dist/rsc-shared.mjs +98 -4
  33. package/rsc/dist/rsc-shared.mjs.map +1 -1
  34. package/solid/dist/index.d.mts +7 -3
  35. package/solid/dist/index.d.ts +7 -3
  36. package/solid/dist/index.js +106 -107
  37. package/solid/dist/index.js.map +1 -1
  38. package/solid/dist/index.mjs +106 -107
  39. package/solid/dist/index.mjs.map +1 -1
  40. package/svelte/dist/index.d.mts +7 -3
  41. package/svelte/dist/index.d.ts +7 -3
  42. package/svelte/dist/index.js +109 -109
  43. package/svelte/dist/index.js.map +1 -1
  44. package/svelte/dist/index.mjs +109 -109
  45. package/svelte/dist/index.mjs.map +1 -1
  46. package/vue/dist/index.d.mts +7 -3
  47. package/vue/dist/index.d.ts +7 -3
  48. package/vue/dist/index.js +106 -107
  49. package/vue/dist/index.js.map +1 -1
  50. package/vue/dist/index.mjs +106 -107
  51. package/vue/dist/index.mjs.map +1 -1
  52. package/ai-model-specification/dist/index.d.mts +0 -665
  53. package/ai-model-specification/dist/index.d.ts +0 -665
  54. package/ai-model-specification/dist/index.js +0 -716
  55. package/ai-model-specification/dist/index.js.map +0 -1
  56. package/ai-model-specification/dist/index.mjs +0 -656
  57. package/ai-model-specification/dist/index.mjs.map +0 -1
  58. package/core/dist/index.d.mts +0 -626
  59. package/core/dist/index.d.ts +0 -626
  60. package/core/dist/index.js +0 -1918
  61. package/core/dist/index.js.map +0 -1
  62. package/core/dist/index.mjs +0 -1873
  63. package/core/dist/index.mjs.map +0 -1
  64. package/openai/dist/index.d.mts +0 -429
  65. package/openai/dist/index.d.ts +0 -429
  66. package/openai/dist/index.js +0 -1231
  67. package/openai/dist/index.js.map +0 -1
  68. package/openai/dist/index.mjs +0 -1195
  69. package/openai/dist/index.mjs.map +0 -1
@@ -1,1195 +0,0 @@
1
- // ai-model-specification/errors/api-call-error.ts
2
- var APICallError = class extends Error {
3
- constructor({
4
- message,
5
- url,
6
- requestBodyValues,
7
- statusCode,
8
- responseBody,
9
- cause,
10
- isRetryable = statusCode != null && (statusCode === 408 || // request timeout
11
- statusCode === 409 || // conflict
12
- statusCode === 429 || // too many requests
13
- statusCode >= 500),
14
- // server error
15
- data
16
- }) {
17
- super(message);
18
- this.name = "AI_APICallError";
19
- this.url = url;
20
- this.requestBodyValues = requestBodyValues;
21
- this.statusCode = statusCode;
22
- this.responseBody = responseBody;
23
- this.cause = cause;
24
- this.isRetryable = isRetryable;
25
- this.data = data;
26
- }
27
- static isAPICallError(error) {
28
- return error instanceof Error && error.name === "AI_APICallError" && typeof error.url === "string" && typeof error.requestBodyValues === "object" && (error.statusCode == null || typeof error.statusCode === "number") && (error.responseBody == null || typeof error.responseBody === "string") && (error.cause == null || typeof error.cause === "object") && typeof error.isRetryable === "boolean" && (error.data == null || typeof error.data === "object");
29
- }
30
- toJSON() {
31
- return {
32
- name: this.name,
33
- message: this.message,
34
- url: this.url,
35
- requestBodyValues: this.requestBodyValues,
36
- statusCode: this.statusCode,
37
- responseBody: this.responseBody,
38
- cause: this.cause,
39
- isRetryable: this.isRetryable,
40
- data: this.data
41
- };
42
- }
43
- };
44
-
45
- // ai-model-specification/errors/invalid-prompt-error.ts
46
- var InvalidPromptError = class extends Error {
47
- constructor({ prompt: prompt2, message }) {
48
- super(`Invalid prompt: ${message}`);
49
- this.name = "AI_InvalidPromptError";
50
- this.prompt = prompt2;
51
- }
52
- static isInvalidPromptError(error) {
53
- return error instanceof Error && error.name === "AI_InvalidPromptError" && prompt != null;
54
- }
55
- toJSON() {
56
- return {
57
- name: this.name,
58
- message: this.message,
59
- stack: this.stack,
60
- prompt: this.prompt
61
- };
62
- }
63
- };
64
-
65
- // ai-model-specification/util/get-error-message.ts
66
- function getErrorMessage(error) {
67
- if (error == null) {
68
- return "unknown error";
69
- }
70
- if (typeof error === "string") {
71
- return error;
72
- }
73
- if (error instanceof Error) {
74
- return error.message;
75
- }
76
- return JSON.stringify(error);
77
- }
78
-
79
- // ai-model-specification/errors/load-api-key-error.ts
80
- var LoadAPIKeyError = class extends Error {
81
- constructor({ message }) {
82
- super(message);
83
- this.name = "AI_LoadAPIKeyError";
84
- }
85
- static isLoadAPIKeyError(error) {
86
- return error instanceof Error && error.name === "AI_LoadAPIKeyError";
87
- }
88
- toJSON() {
89
- return {
90
- name: this.name,
91
- message: this.message
92
- };
93
- }
94
- };
95
-
96
- // ai-model-specification/util/load-api-key.ts
97
- function loadApiKey({
98
- apiKey,
99
- environmentVariableName,
100
- apiKeyParameterName = "apiKey",
101
- description
102
- }) {
103
- if (apiKey != null) {
104
- return apiKey;
105
- }
106
- if (typeof process === "undefined") {
107
- throw new LoadAPIKeyError({
108
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
109
- });
110
- }
111
- apiKey = process.env[environmentVariableName];
112
- if (apiKey == null) {
113
- throw new LoadAPIKeyError({
114
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
115
- });
116
- }
117
- return apiKey;
118
- }
119
-
120
- // ai-model-specification/util/parse-json.ts
121
- import SecureJSON from "secure-json-parse";
122
-
123
- // ai-model-specification/errors/json-parse-error.ts
124
- var JSONParseError = class extends Error {
125
- constructor({ text, cause }) {
126
- super(
127
- `JSON parsing failed: Text: ${text}.
128
- Error message: ${getErrorMessage(cause)}`
129
- );
130
- this.name = "AI_JSONParseError";
131
- this.cause = cause;
132
- this.text = text;
133
- }
134
- static isJSONParseError(error) {
135
- return error instanceof Error && error.name === "AI_JSONParseError" && typeof error.text === "string" && typeof error.cause === "string";
136
- }
137
- toJSON() {
138
- return {
139
- name: this.name,
140
- message: this.message,
141
- cause: this.cause,
142
- stack: this.stack,
143
- valueText: this.text
144
- };
145
- }
146
- };
147
-
148
- // ai-model-specification/errors/type-validation-error.ts
149
- var TypeValidationError = class extends Error {
150
- constructor({ value, cause }) {
151
- super(
152
- `Type validation failed: Value: ${JSON.stringify(value)}.
153
- Error message: ${getErrorMessage(cause)}`
154
- );
155
- this.name = "AI_TypeValidationError";
156
- this.cause = cause;
157
- this.value = value;
158
- }
159
- static isTypeValidationError(error) {
160
- return error instanceof Error && error.name === "AI_TypeValidationError" && typeof error.value === "string" && typeof error.cause === "string";
161
- }
162
- toJSON() {
163
- return {
164
- name: this.name,
165
- message: this.message,
166
- cause: this.cause,
167
- stack: this.stack,
168
- value: this.value
169
- };
170
- }
171
- };
172
-
173
- // ai-model-specification/util/validate-types.ts
174
- function validateTypes({
175
- value,
176
- schema
177
- }) {
178
- try {
179
- return schema.parse(value);
180
- } catch (error) {
181
- throw new TypeValidationError({ value, cause: error });
182
- }
183
- }
184
- function safeValidateTypes({
185
- value,
186
- schema
187
- }) {
188
- try {
189
- const validationResult = schema.safeParse(value);
190
- if (validationResult.success) {
191
- return {
192
- success: true,
193
- value: validationResult.data
194
- };
195
- }
196
- return {
197
- success: false,
198
- error: new TypeValidationError({
199
- value,
200
- cause: validationResult.error
201
- })
202
- };
203
- } catch (error) {
204
- return {
205
- success: false,
206
- error: TypeValidationError.isTypeValidationError(error) ? error : new TypeValidationError({ value, cause: error })
207
- };
208
- }
209
- }
210
-
211
- // ai-model-specification/util/parse-json.ts
212
- function parseJSON({
213
- text,
214
- schema
215
- }) {
216
- try {
217
- const value = SecureJSON.parse(text);
218
- if (schema == null) {
219
- return value;
220
- }
221
- return validateTypes({ value, schema });
222
- } catch (error) {
223
- if (JSONParseError.isJSONParseError(error) || TypeValidationError.isTypeValidationError(error)) {
224
- throw error;
225
- }
226
- throw new JSONParseError({ text, cause: error });
227
- }
228
- }
229
- function safeParseJSON({
230
- text,
231
- schema
232
- }) {
233
- try {
234
- const value = SecureJSON.parse(text);
235
- if (schema == null) {
236
- return {
237
- success: true,
238
- value
239
- };
240
- }
241
- return safeValidateTypes({ value, schema });
242
- } catch (error) {
243
- return {
244
- success: false,
245
- error: JSONParseError.isJSONParseError(error) ? error : new JSONParseError({ text, cause: error })
246
- };
247
- }
248
- }
249
- function isParseableJson(input) {
250
- try {
251
- SecureJSON.parse(input);
252
- return true;
253
- } catch (e) {
254
- return false;
255
- }
256
- }
257
-
258
- // ai-model-specification/util/post-to-api.ts
259
- var postJsonToApi = async ({
260
- url,
261
- headers,
262
- body,
263
- failedResponseHandler,
264
- successfulResponseHandler,
265
- abortSignal
266
- }) => postToApi({
267
- url,
268
- headers: {
269
- ...headers,
270
- "Content-Type": "application/json"
271
- },
272
- body: {
273
- content: JSON.stringify(body),
274
- values: body
275
- },
276
- failedResponseHandler,
277
- successfulResponseHandler,
278
- abortSignal
279
- });
280
- var postToApi = async ({
281
- url,
282
- headers = {},
283
- body,
284
- successfulResponseHandler,
285
- failedResponseHandler,
286
- abortSignal
287
- }) => {
288
- try {
289
- const definedHeaders = Object.fromEntries(
290
- Object.entries(headers).filter(([_key, value]) => value != null)
291
- );
292
- const response = await fetch(url, {
293
- method: "POST",
294
- headers: definedHeaders,
295
- body: body.content,
296
- signal: abortSignal
297
- });
298
- if (!response.ok) {
299
- try {
300
- throw await failedResponseHandler({
301
- response,
302
- url,
303
- requestBodyValues: body.values
304
- });
305
- } catch (error) {
306
- if (error instanceof Error) {
307
- if (error.name === "AbortError" || APICallError.isAPICallError(error)) {
308
- throw error;
309
- }
310
- }
311
- throw new APICallError({
312
- message: "Failed to process error response",
313
- cause: error,
314
- statusCode: response.status,
315
- url,
316
- requestBodyValues: body.values
317
- });
318
- }
319
- }
320
- try {
321
- return await successfulResponseHandler({
322
- response,
323
- url,
324
- requestBodyValues: body.values
325
- });
326
- } catch (error) {
327
- if (error instanceof Error) {
328
- if (error.name === "AbortError" || APICallError.isAPICallError(error)) {
329
- throw error;
330
- }
331
- }
332
- throw new APICallError({
333
- message: "Failed to process successful response",
334
- cause: error,
335
- statusCode: response.status,
336
- url,
337
- requestBodyValues: body.values
338
- });
339
- }
340
- } catch (error) {
341
- if (error instanceof Error) {
342
- if (error.name === "AbortError") {
343
- throw error;
344
- }
345
- }
346
- if (error instanceof TypeError && error.message === "fetch failed") {
347
- const cause = error.cause;
348
- if (cause != null) {
349
- throw new APICallError({
350
- message: `Cannot connect to API: ${cause.message}`,
351
- cause,
352
- url,
353
- requestBodyValues: body.values,
354
- isRetryable: true
355
- // retry when network error
356
- });
357
- }
358
- }
359
- throw error;
360
- }
361
- };
362
-
363
- // ai-model-specification/util/response-handler.ts
364
- import {
365
- EventSourceParserStream
366
- } from "eventsource-parser/stream";
367
- var createJsonErrorResponseHandler = ({
368
- errorSchema,
369
- errorToMessage,
370
- isRetryable
371
- }) => async ({ response, url, requestBodyValues }) => {
372
- const responseBody = await response.text();
373
- if (responseBody.trim() === "") {
374
- return new APICallError({
375
- message: response.statusText,
376
- url,
377
- requestBodyValues,
378
- statusCode: response.status,
379
- responseBody,
380
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
381
- });
382
- }
383
- try {
384
- const parsedError = parseJSON({
385
- text: responseBody,
386
- schema: errorSchema
387
- });
388
- return new APICallError({
389
- message: errorToMessage(parsedError),
390
- url,
391
- requestBodyValues,
392
- statusCode: response.status,
393
- responseBody,
394
- data: parsedError,
395
- isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
396
- });
397
- } catch (parseError) {
398
- return new APICallError({
399
- message: response.statusText,
400
- url,
401
- requestBodyValues,
402
- statusCode: response.status,
403
- responseBody,
404
- isRetryable: isRetryable == null ? void 0 : isRetryable(response)
405
- });
406
- }
407
- };
408
- var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
409
- if (response.body == null) {
410
- throw new Error("No response body");
411
- }
412
- return response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(
413
- new TransformStream({
414
- transform({ data }, controller) {
415
- if (data === "[DONE]") {
416
- return;
417
- }
418
- const parseResult = safeParseJSON({
419
- text: data,
420
- schema: chunkSchema
421
- });
422
- controller.enqueue(
423
- parseResult.success ? { type: "value", value: parseResult.value } : { type: "error", error: parseResult.error }
424
- );
425
- }
426
- })
427
- );
428
- };
429
- var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
430
- const responseBody = await response.text();
431
- const parsedResult = safeParseJSON({
432
- text: responseBody,
433
- schema: responseSchema
434
- });
435
- if (!parsedResult.success) {
436
- throw new APICallError({
437
- message: "Invalid JSON response",
438
- cause: parsedResult.error,
439
- statusCode: response.status,
440
- responseBody,
441
- url,
442
- requestBodyValues
443
- });
444
- }
445
- return parsedResult.value;
446
- };
447
-
448
- // ai-model-specification/util/scale.ts
449
- function scale({
450
- inputMin = 0,
451
- inputMax = 1,
452
- outputMin,
453
- outputMax,
454
- value
455
- }) {
456
- if (value === void 0) {
457
- return void 0;
458
- }
459
- const inputRange = inputMax - inputMin;
460
- const outputRange = outputMax - outputMin;
461
- return (value - inputMin) * outputRange / inputRange + outputMin;
462
- }
463
-
464
- // ai-model-specification/util/uint8-utils.ts
465
- function convertUint8ArrayToBase64(array) {
466
- let latin1string = "";
467
- for (const value of array) {
468
- latin1string += String.fromCodePoint(value);
469
- }
470
- return globalThis.btoa(latin1string);
471
- }
472
-
473
- // ai-model-specification/errors/unsupported-functionality-error.ts
474
- var UnsupportedFunctionalityError = class extends Error {
475
- constructor({
476
- provider,
477
- functionality
478
- }) {
479
- super(
480
- `Functionality not supported by the provider. Provider: ${provider}.
481
- Functionality: ${functionality}`
482
- );
483
- this.name = "AI_UnsupportedFunctionalityError";
484
- this.provider = provider;
485
- this.functionality = functionality;
486
- }
487
- static isUnsupportedFunctionalityError(error) {
488
- return error instanceof Error && error.name === "AI_UnsupportedFunctionalityError" && typeof error.provider === "string" && typeof error.functionality === "string";
489
- }
490
- toJSON() {
491
- return {
492
- name: this.name,
493
- message: this.message,
494
- stack: this.stack,
495
- provider: this.provider,
496
- functionality: this.functionality
497
- };
498
- }
499
- };
500
-
501
- // openai/openai-chat-language-model.ts
502
- import { nanoid } from "nanoid";
503
- import { z as z2 } from "zod";
504
-
505
- // openai/convert-to-openai-chat-messages.ts
506
- function convertToOpenAIChatMessages(prompt2) {
507
- const messages = [];
508
- for (const { role, content } of prompt2) {
509
- switch (role) {
510
- case "system": {
511
- messages.push({ role: "system", content });
512
- break;
513
- }
514
- case "user": {
515
- messages.push({
516
- role: "user",
517
- content: content.map((part) => {
518
- var _a;
519
- switch (part.type) {
520
- case "text": {
521
- return { type: "text", text: part.text };
522
- }
523
- case "image": {
524
- return {
525
- type: "image_url",
526
- image_url: {
527
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
528
- }
529
- };
530
- }
531
- }
532
- })
533
- });
534
- break;
535
- }
536
- case "assistant": {
537
- let text = "";
538
- const toolCalls = [];
539
- for (const part of content) {
540
- switch (part.type) {
541
- case "text": {
542
- text += part.text;
543
- break;
544
- }
545
- case "tool-call": {
546
- toolCalls.push({
547
- id: part.toolCallId,
548
- type: "function",
549
- function: {
550
- name: part.toolName,
551
- arguments: JSON.stringify(part.args)
552
- }
553
- });
554
- break;
555
- }
556
- default: {
557
- const _exhaustiveCheck = part;
558
- throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
559
- }
560
- }
561
- }
562
- messages.push({
563
- role: "assistant",
564
- content: text,
565
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
566
- });
567
- break;
568
- }
569
- case "tool": {
570
- for (const toolResponse of content) {
571
- messages.push({
572
- role: "tool",
573
- tool_call_id: toolResponse.toolCallId,
574
- content: JSON.stringify(toolResponse.result)
575
- });
576
- }
577
- break;
578
- }
579
- default: {
580
- const _exhaustiveCheck = role;
581
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
582
- }
583
- }
584
- }
585
- return messages;
586
- }
587
-
588
- // openai/map-openai-finish-reason.ts
589
- function mapOpenAIFinishReason(finishReason) {
590
- switch (finishReason) {
591
- case "stop":
592
- return "stop";
593
- case "length":
594
- return "length";
595
- case "content-filter":
596
- return "content-filter";
597
- case "function_call":
598
- case "tool-calls":
599
- return "tool-calls";
600
- default:
601
- return "other";
602
- }
603
- }
604
-
605
- // openai/openai-error.ts
606
- import { z } from "zod";
607
- var openAIErrorDataSchema = z.object({
608
- error: z.object({
609
- message: z.string(),
610
- type: z.string(),
611
- param: z.any().nullable(),
612
- code: z.string().nullable()
613
- })
614
- });
615
- var openaiFailedResponseHandler = createJsonErrorResponseHandler({
616
- errorSchema: openAIErrorDataSchema,
617
- errorToMessage: (data) => data.error.message
618
- });
619
-
620
- // openai/openai-chat-language-model.ts
621
- var OpenAIChatLanguageModel = class {
622
- constructor(modelId, settings, config) {
623
- this.specificationVersion = "v1";
624
- this.defaultObjectGenerationMode = "tool";
625
- this.modelId = modelId;
626
- this.settings = settings;
627
- this.config = config;
628
- }
629
- get provider() {
630
- return this.config.provider;
631
- }
632
- getArgs({
633
- mode,
634
- prompt: prompt2,
635
- maxTokens,
636
- temperature,
637
- topP,
638
- frequencyPenalty,
639
- presencePenalty,
640
- seed
641
- }) {
642
- var _a;
643
- const type = mode.type;
644
- const baseArgs = {
645
- // model id:
646
- model: this.modelId,
647
- // model specific settings:
648
- logit_bias: this.settings.logitBias,
649
- user: this.settings.user,
650
- // standardized settings:
651
- max_tokens: maxTokens,
652
- temperature: scale({
653
- value: temperature,
654
- outputMin: 0,
655
- outputMax: 2
656
- }),
657
- top_p: topP,
658
- frequency_penalty: scale({
659
- value: frequencyPenalty,
660
- inputMin: -1,
661
- inputMax: 1,
662
- outputMin: -2,
663
- outputMax: 2
664
- }),
665
- presence_penalty: scale({
666
- value: presencePenalty,
667
- inputMin: -1,
668
- inputMax: 1,
669
- outputMin: -2,
670
- outputMax: 2
671
- }),
672
- seed,
673
- // messages:
674
- messages: convertToOpenAIChatMessages(prompt2)
675
- };
676
- switch (type) {
677
- case "regular": {
678
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
679
- return {
680
- ...baseArgs,
681
- tools: tools == null ? void 0 : tools.map((tool) => ({
682
- type: "function",
683
- function: {
684
- name: tool.name,
685
- description: tool.description,
686
- parameters: tool.parameters
687
- }
688
- }))
689
- };
690
- }
691
- case "object-json": {
692
- return {
693
- ...baseArgs,
694
- response_format: { type: "json_object" }
695
- };
696
- }
697
- case "object-tool": {
698
- return {
699
- ...baseArgs,
700
- tool_choice: { type: "function", function: { name: mode.tool.name } },
701
- tools: [{ type: "function", function: mode.tool }]
702
- };
703
- }
704
- case "object-grammar": {
705
- throw new UnsupportedFunctionalityError({
706
- functionality: "object-grammar mode",
707
- provider: this.provider
708
- });
709
- }
710
- default: {
711
- const _exhaustiveCheck = type;
712
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
713
- }
714
- }
715
- }
716
- async doGenerate(options) {
717
- var _a, _b;
718
- const args = this.getArgs(options);
719
- const response = await postJsonToApi({
720
- url: `${this.config.baseUrl}/chat/completions`,
721
- headers: this.config.headers(),
722
- body: args,
723
- failedResponseHandler: openaiFailedResponseHandler,
724
- successfulResponseHandler: createJsonResponseHandler(
725
- openAIChatResponseSchema
726
- ),
727
- abortSignal: options.abortSignal
728
- });
729
- const { messages: rawPrompt, ...rawSettings } = args;
730
- const choice = response.choices[0];
731
- return {
732
- text: (_a = choice.message.content) != null ? _a : void 0,
733
- toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => ({
734
- toolCallType: "function",
735
- toolCallId: toolCall.id,
736
- toolName: toolCall.function.name,
737
- args: toolCall.function.arguments
738
- })),
739
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
740
- usage: {
741
- promptTokens: response.usage.prompt_tokens,
742
- completionTokens: response.usage.completion_tokens
743
- },
744
- rawCall: { rawPrompt, rawSettings },
745
- warnings: []
746
- };
747
- }
748
- async doStream(options) {
749
- const args = this.getArgs(options);
750
- const response = await postJsonToApi({
751
- url: `${this.config.baseUrl}/chat/completions`,
752
- headers: this.config.headers(),
753
- body: {
754
- ...args,
755
- stream: true
756
- },
757
- failedResponseHandler: openaiFailedResponseHandler,
758
- successfulResponseHandler: createEventSourceResponseHandler(
759
- openaiChatChunkSchema
760
- ),
761
- abortSignal: options.abortSignal
762
- });
763
- const { messages: rawPrompt, ...rawSettings } = args;
764
- const toolCalls = [];
765
- return {
766
- stream: response.pipeThrough(
767
- new TransformStream({
768
- transform(chunk, controller) {
769
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
770
- if (chunk.type === "error") {
771
- controller.enqueue(chunk);
772
- return;
773
- }
774
- const value = chunk.value;
775
- if (((_b = (_a = value.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null) {
776
- return;
777
- }
778
- const delta = value.choices[0].delta;
779
- if (delta.content != null) {
780
- controller.enqueue({
781
- type: "text-delta",
782
- textDelta: delta.content
783
- });
784
- }
785
- if (delta.tool_calls != null) {
786
- for (const toolCallDelta of delta.tool_calls) {
787
- const index = toolCallDelta.index;
788
- if (toolCalls[index] == null) {
789
- toolCalls[index] = toolCallDelta;
790
- continue;
791
- }
792
- const toolCall = toolCalls[index];
793
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.arguments) != null) {
794
- toolCall.function.arguments += (_e = (_d = toolCallDelta.function) == null ? void 0 : _d.arguments) != null ? _e : "";
795
- }
796
- controller.enqueue({
797
- type: "tool-call-delta",
798
- toolCallId: (_f = toolCall.id) != null ? _f : "",
799
- // TODO empty?
800
- toolName: (_h = (_g = toolCall.function) == null ? void 0 : _g.name) != null ? _h : "",
801
- // TODO empty?
802
- argsTextDelta: (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : ""
803
- // TODO empty?
804
- });
805
- if (((_k = toolCall.function) == null ? void 0 : _k.name) == null || ((_l = toolCall.function) == null ? void 0 : _l.arguments) == null || !isParseableJson(toolCall.function.arguments)) {
806
- continue;
807
- }
808
- controller.enqueue({
809
- type: "tool-call",
810
- toolCallType: "function",
811
- toolCallId: (_m = toolCall.id) != null ? _m : nanoid(),
812
- toolName: toolCall.function.name,
813
- args: toolCall.function.arguments
814
- });
815
- }
816
- }
817
- }
818
- })
819
- ),
820
- rawCall: { rawPrompt, rawSettings },
821
- warnings: []
822
- };
823
- }
824
- };
825
- var openAIChatResponseSchema = z2.object({
826
- choices: z2.array(
827
- z2.object({
828
- message: z2.object({
829
- role: z2.literal("assistant"),
830
- content: z2.string().nullable(),
831
- tool_calls: z2.array(
832
- z2.object({
833
- id: z2.string(),
834
- type: z2.literal("function"),
835
- function: z2.object({
836
- name: z2.string(),
837
- arguments: z2.string()
838
- })
839
- })
840
- ).optional()
841
- }),
842
- index: z2.number(),
843
- finish_reason: z2.string().optional().nullable()
844
- })
845
- ),
846
- object: z2.literal("chat.completion"),
847
- usage: z2.object({
848
- prompt_tokens: z2.number(),
849
- completion_tokens: z2.number()
850
- })
851
- });
852
- var openaiChatChunkSchema = z2.object({
853
- object: z2.literal("chat.completion.chunk"),
854
- choices: z2.array(
855
- z2.object({
856
- delta: z2.object({
857
- role: z2.enum(["assistant"]).optional(),
858
- content: z2.string().nullable().optional(),
859
- tool_calls: z2.array(
860
- z2.object({
861
- index: z2.number(),
862
- id: z2.string().optional(),
863
- type: z2.literal("function").optional(),
864
- function: z2.object({
865
- name: z2.string().optional(),
866
- arguments: z2.string().optional()
867
- })
868
- })
869
- ).optional()
870
- }),
871
- finish_reason: z2.string().nullable().optional(),
872
- index: z2.number()
873
- })
874
- )
875
- });
876
-
877
- // openai/openai-completion-language-model.ts
878
- import { z as z3 } from "zod";
879
-
880
- // openai/convert-to-openai-completion-prompt.ts
881
- function convertToOpenAICompletionPrompt({
882
- prompt: prompt2,
883
- inputFormat,
884
- provider,
885
- user = "user",
886
- assistant = "assistant"
887
- }) {
888
- if (inputFormat === "prompt" && prompt2.length === 1 && prompt2[0].role === "user" && prompt2[0].content.length === 1 && prompt2[0].content[0].type === "text") {
889
- return { prompt: prompt2[0].content[0].text };
890
- }
891
- let text = "";
892
- if (prompt2[0].role === "system") {
893
- text += `${prompt2[0].content}
894
-
895
- `;
896
- prompt2 = prompt2.slice(1);
897
- }
898
- for (const { role, content } of prompt2) {
899
- switch (role) {
900
- case "system": {
901
- throw new InvalidPromptError({
902
- message: "Unexpected system message in prompt: ${content}",
903
- prompt: prompt2
904
- });
905
- }
906
- case "user": {
907
- const userMessage = content.map((part) => {
908
- switch (part.type) {
909
- case "text": {
910
- return part.text;
911
- }
912
- case "image": {
913
- throw new UnsupportedFunctionalityError({
914
- provider,
915
- functionality: "images"
916
- });
917
- }
918
- }
919
- }).join("");
920
- text += `${user}:
921
- ${userMessage}
922
-
923
- `;
924
- break;
925
- }
926
- case "assistant": {
927
- const assistantMessage = content.map((part) => {
928
- switch (part.type) {
929
- case "text": {
930
- return part.text;
931
- }
932
- case "tool-call": {
933
- throw new UnsupportedFunctionalityError({
934
- provider,
935
- functionality: "tool-call messages"
936
- });
937
- }
938
- }
939
- }).join("");
940
- text += `${assistant}:
941
- ${assistantMessage}
942
-
943
- `;
944
- break;
945
- }
946
- case "tool": {
947
- throw new UnsupportedFunctionalityError({
948
- provider,
949
- functionality: "tool messages"
950
- });
951
- }
952
- default: {
953
- const _exhaustiveCheck = role;
954
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
955
- }
956
- }
957
- }
958
- text += `${assistant}:
959
- `;
960
- return {
961
- prompt: text,
962
- stopSequences: [`
963
- ${user}:`]
964
- };
965
- }
966
-
967
- // openai/openai-completion-language-model.ts
968
- var OpenAICompletionLanguageModel = class {
969
- constructor(modelId, settings, config) {
970
- this.specificationVersion = "v1";
971
- this.defaultObjectGenerationMode = void 0;
972
- this.modelId = modelId;
973
- this.settings = settings;
974
- this.config = config;
975
- }
976
- get provider() {
977
- return this.config.provider;
978
- }
979
- getArgs({
980
- mode,
981
- inputFormat,
982
- prompt: prompt2,
983
- maxTokens,
984
- temperature,
985
- topP,
986
- frequencyPenalty,
987
- presencePenalty,
988
- seed
989
- }) {
990
- var _a;
991
- const type = mode.type;
992
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({
993
- prompt: prompt2,
994
- inputFormat,
995
- provider: this.provider
996
- });
997
- const baseArgs = {
998
- // model id:
999
- model: this.modelId,
1000
- // model specific settings:
1001
- echo: this.settings.echo,
1002
- logit_bias: this.settings.logitBias,
1003
- suffix: this.settings.suffix,
1004
- user: this.settings.user,
1005
- // standardized settings:
1006
- max_tokens: maxTokens,
1007
- temperature: scale({
1008
- value: temperature,
1009
- outputMin: 0,
1010
- outputMax: 2
1011
- }),
1012
- top_p: topP,
1013
- frequency_penalty: scale({
1014
- value: frequencyPenalty,
1015
- inputMin: -1,
1016
- inputMax: 1,
1017
- outputMin: -2,
1018
- outputMax: 2
1019
- }),
1020
- presence_penalty: scale({
1021
- value: presencePenalty,
1022
- inputMin: -1,
1023
- inputMax: 1,
1024
- outputMin: -2,
1025
- outputMax: 2
1026
- }),
1027
- seed,
1028
- // prompt:
1029
- prompt: completionPrompt,
1030
- // stop sequences:
1031
- stop: stopSequences
1032
- };
1033
- switch (type) {
1034
- case "regular": {
1035
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1036
- throw new UnsupportedFunctionalityError({
1037
- functionality: "tools",
1038
- provider: this.provider
1039
- });
1040
- }
1041
- return baseArgs;
1042
- }
1043
- case "object-json": {
1044
- throw new UnsupportedFunctionalityError({
1045
- functionality: "object-json mode",
1046
- provider: this.provider
1047
- });
1048
- }
1049
- case "object-tool": {
1050
- throw new UnsupportedFunctionalityError({
1051
- functionality: "object-tool mode",
1052
- provider: this.provider
1053
- });
1054
- }
1055
- case "object-grammar": {
1056
- throw new UnsupportedFunctionalityError({
1057
- functionality: "object-grammar mode",
1058
- provider: this.provider
1059
- });
1060
- }
1061
- default: {
1062
- const _exhaustiveCheck = type;
1063
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1064
- }
1065
- }
1066
- }
1067
- async doGenerate(options) {
1068
- const args = this.getArgs(options);
1069
- const response = await postJsonToApi({
1070
- url: `${this.config.baseUrl}/completions`,
1071
- headers: this.config.headers(),
1072
- body: args,
1073
- failedResponseHandler: openaiFailedResponseHandler,
1074
- successfulResponseHandler: createJsonResponseHandler(
1075
- openAICompletionResponseSchema
1076
- ),
1077
- abortSignal: options.abortSignal
1078
- });
1079
- const { prompt: rawPrompt, ...rawSettings } = args;
1080
- const choice = response.choices[0];
1081
- return {
1082
- text: choice.text,
1083
- usage: {
1084
- promptTokens: response.usage.prompt_tokens,
1085
- completionTokens: response.usage.completion_tokens
1086
- },
1087
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
1088
- rawCall: { rawPrompt, rawSettings },
1089
- warnings: []
1090
- };
1091
- }
1092
- async doStream(options) {
1093
- const args = this.getArgs(options);
1094
- const response = await postJsonToApi({
1095
- url: `${this.config.baseUrl}/completions`,
1096
- headers: this.config.headers(),
1097
- body: {
1098
- ...this.getArgs(options),
1099
- stream: true
1100
- },
1101
- failedResponseHandler: openaiFailedResponseHandler,
1102
- successfulResponseHandler: createEventSourceResponseHandler(
1103
- openaiCompletionChunkSchema
1104
- ),
1105
- abortSignal: options.abortSignal
1106
- });
1107
- const { prompt: rawPrompt, ...rawSettings } = args;
1108
- return {
1109
- stream: response.pipeThrough(
1110
- new TransformStream({
1111
- transform(chunk, controller) {
1112
- var _a, _b;
1113
- if (chunk.type === "error") {
1114
- controller.enqueue(chunk);
1115
- return;
1116
- }
1117
- const value = chunk.value;
1118
- if (((_b = (_a = value.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.text) != null) {
1119
- controller.enqueue({
1120
- type: "text-delta",
1121
- textDelta: value.choices[0].text
1122
- });
1123
- }
1124
- }
1125
- })
1126
- ),
1127
- rawCall: { rawPrompt, rawSettings },
1128
- warnings: []
1129
- };
1130
- }
1131
- };
1132
- var openAICompletionResponseSchema = z3.object({
1133
- choices: z3.array(
1134
- z3.object({
1135
- text: z3.string(),
1136
- finish_reason: z3.string()
1137
- })
1138
- ),
1139
- usage: z3.object({
1140
- prompt_tokens: z3.number(),
1141
- completion_tokens: z3.number()
1142
- })
1143
- });
1144
- var openaiCompletionChunkSchema = z3.object({
1145
- object: z3.literal("text_completion"),
1146
- choices: z3.array(
1147
- z3.object({
1148
- text: z3.string(),
1149
- finish_reason: z3.enum(["stop", "length", "content_filter"]).optional().nullable(),
1150
- index: z3.number()
1151
- })
1152
- )
1153
- });
1154
-
1155
- // openai/openai-facade.ts
1156
- var OpenAI = class {
1157
- constructor(options = {}) {
1158
- this.baseUrl = options.baseUrl;
1159
- this.apiKey = options.apiKey;
1160
- this.organization = options.organization;
1161
- }
1162
- get baseConfig() {
1163
- var _a;
1164
- return {
1165
- organization: this.organization,
1166
- baseUrl: (_a = this.baseUrl) != null ? _a : "https://api.openai.com/v1",
1167
- headers: () => ({
1168
- Authorization: `Bearer ${loadApiKey({
1169
- apiKey: this.apiKey,
1170
- environmentVariableName: "OPENAI_API_KEY",
1171
- description: "OpenAI"
1172
- })}`,
1173
- "OpenAI-Organization": this.organization
1174
- })
1175
- };
1176
- }
1177
- chat(modelId, settings = {}) {
1178
- return new OpenAIChatLanguageModel(modelId, settings, {
1179
- provider: "openai.chat",
1180
- ...this.baseConfig
1181
- });
1182
- }
1183
- completion(modelId, settings = {}) {
1184
- return new OpenAICompletionLanguageModel(modelId, settings, {
1185
- provider: "openai.completion",
1186
- ...this.baseConfig
1187
- });
1188
- }
1189
- };
1190
- var openai = new OpenAI();
1191
- export {
1192
- OpenAI,
1193
- openai
1194
- };
1195
- //# sourceMappingURL=index.mjs.map