ai 3.1.0-canary.2 → 3.1.0-canary.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  // ai-model-specification/errors/api-call-error.ts
2
- var ApiCallError = class extends Error {
2
+ var APICallError = class extends Error {
3
3
  constructor({
4
4
  message,
5
5
  url,
@@ -7,7 +7,11 @@ var ApiCallError = class extends Error {
7
7
  statusCode,
8
8
  responseBody,
9
9
  cause,
10
- isRetryable = statusCode != null && (statusCode === 429 || statusCode >= 500),
10
+ isRetryable = statusCode != null && (statusCode === 408 || // request timeout
11
+ statusCode === 409 || // conflict
12
+ statusCode === 429 || // too many requests
13
+ statusCode >= 500),
14
+ // server error
11
15
  data
12
16
  }) {
13
17
  super(message);
@@ -255,8 +259,8 @@ var postJsonToApi = async ({
255
259
  }) => postToApi({
256
260
  url,
257
261
  headers: {
258
- "Content-Type": "application/json",
259
- ...headers
262
+ ...headers,
263
+ "Content-Type": "application/json"
260
264
  },
261
265
  body: {
262
266
  content: JSON.stringify(body),
@@ -275,9 +279,12 @@ var postToApi = async ({
275
279
  abortSignal
276
280
  }) => {
277
281
  try {
282
+ const definedHeaders = Object.fromEntries(
283
+ Object.entries(headers).filter(([_key, value]) => value != null)
284
+ );
278
285
  const response = await fetch(url, {
279
286
  method: "POST",
280
- headers,
287
+ headers: definedHeaders,
281
288
  body: body.content,
282
289
  signal: abortSignal
283
290
  });
@@ -290,11 +297,11 @@ var postToApi = async ({
290
297
  });
291
298
  } catch (error) {
292
299
  if (error instanceof Error) {
293
- if (error.name === "AbortError" || error instanceof ApiCallError) {
300
+ if (error.name === "AbortError" || error instanceof APICallError) {
294
301
  throw error;
295
302
  }
296
303
  }
297
- throw new ApiCallError({
304
+ throw new APICallError({
298
305
  message: "Failed to process error response",
299
306
  cause: error,
300
307
  statusCode: response.status,
@@ -311,11 +318,11 @@ var postToApi = async ({
311
318
  });
312
319
  } catch (error) {
313
320
  if (error instanceof Error) {
314
- if (error.name === "AbortError" || error instanceof ApiCallError) {
321
+ if (error.name === "AbortError" || error instanceof APICallError) {
315
322
  throw error;
316
323
  }
317
324
  }
318
- throw new ApiCallError({
325
+ throw new APICallError({
319
326
  message: "Failed to process successful response",
320
327
  cause: error,
321
328
  statusCode: response.status,
@@ -332,12 +339,13 @@ var postToApi = async ({
332
339
  if (error instanceof TypeError && error.message === "fetch failed") {
333
340
  const cause = error.cause;
334
341
  if (cause != null) {
335
- throw new ApiCallError({
342
+ throw new APICallError({
336
343
  message: `Cannot connect to API: ${cause.message}`,
337
344
  cause,
338
345
  url,
339
346
  requestBodyValues: body.values,
340
347
  isRetryable: true
348
+ // retry when network error
341
349
  });
342
350
  }
343
351
  }
@@ -356,7 +364,7 @@ var createJsonErrorResponseHandler = ({
356
364
  }) => async ({ response, url, requestBodyValues }) => {
357
365
  const responseBody = await response.text();
358
366
  if (responseBody.trim() === "") {
359
- return new ApiCallError({
367
+ return new APICallError({
360
368
  message: response.statusText,
361
369
  url,
362
370
  requestBodyValues,
@@ -370,7 +378,7 @@ var createJsonErrorResponseHandler = ({
370
378
  text: responseBody,
371
379
  schema: errorSchema
372
380
  });
373
- return new ApiCallError({
381
+ return new APICallError({
374
382
  message: errorToMessage(parsedError),
375
383
  url,
376
384
  requestBodyValues,
@@ -380,7 +388,7 @@ var createJsonErrorResponseHandler = ({
380
388
  isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
381
389
  });
382
390
  } catch (parseError) {
383
- return new ApiCallError({
391
+ return new APICallError({
384
392
  message: response.statusText,
385
393
  url,
386
394
  requestBodyValues,
@@ -418,7 +426,7 @@ var createJsonResponseHandler = (responseSchema) => async ({ response, url, requ
418
426
  schema: responseSchema
419
427
  });
420
428
  if (!parsedResult.success) {
421
- throw new ApiCallError({
429
+ throw new APICallError({
422
430
  message: "Invalid JSON response",
423
431
  cause: parsedResult.error,
424
432
  statusCode: response.status,
@@ -481,7 +489,7 @@ function convertToOpenAIChatMessages(prompt) {
481
489
  return {
482
490
  type: "image_url",
483
491
  image_url: {
484
- url: `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
492
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
485
493
  }
486
494
  };
487
495
  }
@@ -554,25 +562,38 @@ var openAIErrorDataSchema = z.object({
554
562
  });
555
563
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
556
564
  errorSchema: openAIErrorDataSchema,
557
- errorToMessage: (data) => data.error.message,
558
- isRetryable: (response, error) => response.status >= 500 || response.status === 429 && // insufficient_quota is also reported as a 429, but it's not retryable:
559
- (error == null ? void 0 : error.error.type) !== "insufficient_quota"
565
+ errorToMessage: (data) => data.error.message
560
566
  });
561
567
 
568
+ // provider/openai/map-openai-finish-reason.ts
569
+ function mapOpenAIFinishReason(finishReason) {
570
+ switch (finishReason) {
571
+ case "stop":
572
+ return "stop";
573
+ case "length":
574
+ return "length";
575
+ case "content-filter":
576
+ return "content-filter";
577
+ case "function_call":
578
+ case "tool-calls":
579
+ return "tool-calls";
580
+ default:
581
+ return "other";
582
+ }
583
+ }
584
+
562
585
  // provider/openai/openai-chat-language-model.ts
563
586
  var OpenAIChatLanguageModel = class {
564
- constructor(settings, config) {
587
+ constructor(modelId, settings, config) {
565
588
  this.specificationVersion = "v1";
566
589
  this.defaultObjectGenerationMode = "tool";
590
+ this.modelId = modelId;
567
591
  this.settings = settings;
568
592
  this.config = config;
569
593
  }
570
594
  get provider() {
571
595
  return this.config.provider;
572
596
  }
573
- get modelId() {
574
- return this.settings.id;
575
- }
576
597
  getArgs({
577
598
  mode,
578
599
  prompt,
@@ -586,8 +607,11 @@ var OpenAIChatLanguageModel = class {
586
607
  var _a;
587
608
  const type = mode.type;
588
609
  const baseArgs = {
610
+ // model id:
611
+ model: this.modelId,
589
612
  // model specific settings:
590
- ...this.config.mapSettings(this.settings),
613
+ logit_bias: this.settings.logitBias,
614
+ user: this.settings.user,
591
615
  // standardized settings:
592
616
  max_tokens: maxTokens,
593
617
  temperature: scale({
@@ -656,49 +680,54 @@ var OpenAIChatLanguageModel = class {
656
680
  }
657
681
  async doGenerate(options) {
658
682
  var _a, _b;
683
+ const args = this.getArgs(options);
659
684
  const response = await postJsonToApi({
660
685
  url: `${this.config.baseUrl}/chat/completions`,
661
- headers: {
662
- Authorization: `Bearer ${this.config.apiKey()}`
663
- },
664
- body: {
665
- ...this.getArgs(options)
666
- },
686
+ headers: this.config.headers(),
687
+ body: args,
667
688
  failedResponseHandler: openaiFailedResponseHandler,
668
689
  successfulResponseHandler: createJsonResponseHandler(
669
690
  openAIChatResponseSchema
670
- )
691
+ ),
692
+ abortSignal: options.abortSignal
671
693
  });
672
- const message = response.choices[0].message;
694
+ const { messages: rawPrompt, ...rawSettings } = args;
695
+ const choice = response.choices[0];
673
696
  return {
674
- text: (_a = message.content) != null ? _a : void 0,
675
- toolCalls: (_b = message.tool_calls) == null ? void 0 : _b.map((toolCall) => ({
697
+ text: (_a = choice.message.content) != null ? _a : void 0,
698
+ toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => ({
676
699
  toolCallType: "function",
677
700
  toolCallId: toolCall.id,
678
701
  toolName: toolCall.function.name,
679
702
  args: toolCall.function.arguments
680
703
  })),
704
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
705
+ usage: {
706
+ promptTokens: response.usage.prompt_tokens,
707
+ completionTokens: response.usage.completion_tokens
708
+ },
709
+ rawCall: { rawPrompt, rawSettings },
681
710
  warnings: []
682
711
  };
683
712
  }
684
713
  async doStream(options) {
714
+ const args = this.getArgs(options);
685
715
  const response = await postJsonToApi({
686
716
  url: `${this.config.baseUrl}/chat/completions`,
687
- headers: {
688
- Authorization: `Bearer ${this.config.apiKey()}`
689
- },
717
+ headers: this.config.headers(),
690
718
  body: {
691
- ...this.getArgs(options),
719
+ ...args,
692
720
  stream: true
693
721
  },
694
722
  failedResponseHandler: openaiFailedResponseHandler,
695
723
  successfulResponseHandler: createEventSourceResponseHandler(
696
724
  openaiChatChunkSchema
697
- )
725
+ ),
726
+ abortSignal: options.abortSignal
698
727
  });
728
+ const { messages: rawPrompt, ...rawSettings } = args;
699
729
  const toolCalls = [];
700
730
  return {
701
- warnings: [],
702
731
  stream: response.pipeThrough(
703
732
  new TransformStream({
704
733
  transform(chunk, controller) {
@@ -752,7 +781,9 @@ var OpenAIChatLanguageModel = class {
752
781
  }
753
782
  }
754
783
  })
755
- )
784
+ ),
785
+ rawCall: { rawPrompt, rawSettings },
786
+ warnings: []
756
787
  };
757
788
  }
758
789
  };
@@ -898,18 +929,16 @@ ${user}:`]
898
929
 
899
930
  // provider/openai/openai-completion-language-model.ts
900
931
  var OpenAICompletionLanguageModel = class {
901
- constructor(settings, config) {
932
+ constructor(modelId, settings, config) {
902
933
  this.specificationVersion = "v1";
903
934
  this.defaultObjectGenerationMode = void 0;
935
+ this.modelId = modelId;
904
936
  this.settings = settings;
905
937
  this.config = config;
906
938
  }
907
939
  get provider() {
908
940
  return this.config.provider;
909
941
  }
910
- get modelId() {
911
- return this.settings.id;
912
- }
913
942
  getArgs({
914
943
  mode,
915
944
  inputFormat,
@@ -929,8 +958,13 @@ var OpenAICompletionLanguageModel = class {
929
958
  provider: this.provider
930
959
  });
931
960
  const baseArgs = {
961
+ // model id:
962
+ model: this.modelId,
932
963
  // model specific settings:
933
- ...this.config.mapSettings(this.settings),
964
+ echo: this.settings.echo,
965
+ logit_bias: this.settings.logitBias,
966
+ suffix: this.settings.suffix,
967
+ user: this.settings.user,
934
968
  // standardized settings:
935
969
  max_tokens: maxTokens,
936
970
  temperature: scale({
@@ -994,30 +1028,35 @@ var OpenAICompletionLanguageModel = class {
994
1028
  }
995
1029
  }
996
1030
  async doGenerate(options) {
1031
+ const args = this.getArgs(options);
997
1032
  const response = await postJsonToApi({
998
1033
  url: `${this.config.baseUrl}/completions`,
999
- headers: {
1000
- Authorization: `Bearer ${this.config.apiKey()}`
1001
- },
1002
- body: {
1003
- ...this.getArgs(options)
1004
- },
1034
+ headers: this.config.headers(),
1035
+ body: args,
1005
1036
  failedResponseHandler: openaiFailedResponseHandler,
1006
1037
  successfulResponseHandler: createJsonResponseHandler(
1007
1038
  openAICompletionResponseSchema
1008
- )
1039
+ ),
1040
+ abortSignal: options.abortSignal
1009
1041
  });
1042
+ const { prompt: rawPrompt, ...rawSettings } = args;
1043
+ const choice = response.choices[0];
1010
1044
  return {
1011
- text: response.choices[0].text,
1045
+ text: choice.text,
1046
+ usage: {
1047
+ promptTokens: response.usage.prompt_tokens,
1048
+ completionTokens: response.usage.completion_tokens
1049
+ },
1050
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1051
+ rawCall: { rawPrompt, rawSettings },
1012
1052
  warnings: []
1013
1053
  };
1014
1054
  }
1015
1055
  async doStream(options) {
1056
+ const args = this.getArgs(options);
1016
1057
  const response = await postJsonToApi({
1017
1058
  url: `${this.config.baseUrl}/completions`,
1018
- headers: {
1019
- Authorization: `Bearer ${this.config.apiKey()}`
1020
- },
1059
+ headers: this.config.headers(),
1021
1060
  body: {
1022
1061
  ...this.getArgs(options),
1023
1062
  stream: true
@@ -1025,10 +1064,11 @@ var OpenAICompletionLanguageModel = class {
1025
1064
  failedResponseHandler: openaiFailedResponseHandler,
1026
1065
  successfulResponseHandler: createEventSourceResponseHandler(
1027
1066
  openaiCompletionChunkSchema
1028
- )
1067
+ ),
1068
+ abortSignal: options.abortSignal
1029
1069
  });
1070
+ const { prompt: rawPrompt, ...rawSettings } = args;
1030
1071
  return {
1031
- warnings: [],
1032
1072
  stream: response.pipeThrough(
1033
1073
  new TransformStream({
1034
1074
  transform(chunk, controller) {
@@ -1046,7 +1086,9 @@ var OpenAICompletionLanguageModel = class {
1046
1086
  }
1047
1087
  }
1048
1088
  })
1049
- )
1089
+ ),
1090
+ rawCall: { rawPrompt, rawSettings },
1091
+ warnings: []
1050
1092
  };
1051
1093
  }
1052
1094
  };
@@ -1075,47 +1117,42 @@ var openaiCompletionChunkSchema = z3.object({
1075
1117
 
1076
1118
  // provider/openai/openai-facade.ts
1077
1119
  var OpenAI = class {
1078
- constructor({ baseUrl, apiKey } = {}) {
1079
- this.baseUrl = baseUrl;
1080
- this.apiKey = apiKey;
1120
+ constructor(options = {}) {
1121
+ this.baseUrl = options.baseUrl;
1122
+ this.apiKey = options.apiKey;
1123
+ this.organization = options.organization;
1081
1124
  }
1082
- chat(settings) {
1125
+ get baseConfig() {
1083
1126
  var _a;
1084
- return new OpenAIChatLanguageModel(settings, {
1085
- provider: "openai.chat",
1127
+ return {
1128
+ organization: this.organization,
1086
1129
  baseUrl: (_a = this.baseUrl) != null ? _a : "https://api.openai.com/v1",
1087
- apiKey: () => loadApiKey({
1088
- apiKey: this.apiKey,
1089
- environmentVariableName: "OPENAI_API_KEY",
1090
- description: "OpenAI"
1091
- }),
1092
- mapSettings: (settings2) => ({
1093
- model: settings2.id,
1094
- logit_bias: settings2.logitBias
1095
- })
1096
- });
1097
- }
1098
- completion(settings) {
1099
- var _a;
1100
- return new OpenAICompletionLanguageModel(
1101
- settings,
1102
- {
1103
- provider: "openai.completion",
1104
- baseUrl: (_a = this.baseUrl) != null ? _a : "https://api.openai.com/v1",
1105
- apiKey: () => loadApiKey({
1130
+ headers: () => ({
1131
+ Authorization: `Bearer ${loadApiKey({
1106
1132
  apiKey: this.apiKey,
1107
1133
  environmentVariableName: "OPENAI_API_KEY",
1108
1134
  description: "OpenAI"
1109
- }),
1110
- mapSettings: (settings2) => ({
1111
- model: settings2.id,
1112
- logit_bias: settings2.logitBias
1113
- })
1114
- }
1115
- );
1135
+ })}`,
1136
+ "OpenAI-Organization": this.organization
1137
+ })
1138
+ };
1139
+ }
1140
+ chat(modelId, settings = {}) {
1141
+ return new OpenAIChatLanguageModel(modelId, settings, {
1142
+ provider: "openai.chat",
1143
+ ...this.baseConfig
1144
+ });
1145
+ }
1146
+ completion(modelId, settings = {}) {
1147
+ return new OpenAICompletionLanguageModel(modelId, settings, {
1148
+ provider: "openai.completion",
1149
+ ...this.baseConfig
1150
+ });
1116
1151
  }
1117
1152
  };
1153
+ var openai = new OpenAI();
1118
1154
  export {
1119
- OpenAI
1155
+ OpenAI,
1156
+ openai
1120
1157
  };
1121
1158
  //# sourceMappingURL=index.mjs.map