@ai-sdk/openai-compatible 1.0.0-canary.4 → 1.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 1.0.0-canary.6
4
+
5
+ ### Patch Changes
6
+
7
+ - 6db02c9: chore(openai-compatible): remove simulateStreaming
8
+ - Updated dependencies [411e483]
9
+ - Updated dependencies [79457bd]
10
+ - Updated dependencies [ad80501]
11
+ - Updated dependencies [1766ede]
12
+ - Updated dependencies [f10304b]
13
+ - @ai-sdk/provider@2.0.0-canary.5
14
+ - @ai-sdk/provider-utils@3.0.0-canary.6
15
+
16
+ ## 1.0.0-canary.5
17
+
18
+ ### Patch Changes
19
+
20
+ - Updated dependencies [6f6bb89]
21
+ - @ai-sdk/provider@2.0.0-canary.4
22
+ - @ai-sdk/provider-utils@3.0.0-canary.5
23
+
3
24
  ## 1.0.0-canary.4
4
25
 
5
26
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -9,13 +9,6 @@ interface OpenAICompatibleChatSettings {
9
9
  monitor and detect abuse.
10
10
  */
11
11
  user?: string;
12
- /**
13
- Simulates streaming by using a normal generate call and returning it as a stream.
14
- Enable this if the model that you are using does not support streaming.
15
-
16
- Defaults to `false`.
17
- */
18
- simulateStreaming?: boolean;
19
12
  }
20
13
 
21
14
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
package/dist/index.d.ts CHANGED
@@ -9,13 +9,6 @@ interface OpenAICompatibleChatSettings {
9
9
  monitor and detect abuse.
10
10
  */
11
11
  user?: string;
12
- /**
13
- Simulates streaming by using a normal generate call and returning it as a stream.
14
- Enable this if the model that you are using does not support streaming.
15
-
16
- Defaults to `false`.
17
- */
18
- simulateStreaming?: boolean;
19
12
  }
20
13
 
21
14
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
package/dist/index.js CHANGED
@@ -268,7 +268,7 @@ var OpenAICompatibleChatLanguageModel = class {
268
268
  }
269
269
  getArgs({
270
270
  prompt,
271
- maxTokens,
271
+ maxOutputTokens,
272
272
  temperature,
273
273
  topP,
274
274
  topK,
@@ -308,7 +308,7 @@ var OpenAICompatibleChatLanguageModel = class {
308
308
  // model specific settings:
309
309
  user: this.settings.user,
310
310
  // standardized settings:
311
- max_tokens: maxTokens,
311
+ max_tokens: maxOutputTokens,
312
312
  temperature,
313
313
  top_p: topP,
314
314
  frequency_penalty: frequencyPenalty,
@@ -391,11 +391,10 @@ var OpenAICompatibleChatLanguageModel = class {
391
391
  }),
392
392
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
393
393
  usage: {
394
- promptTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : NaN,
395
- completionTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : NaN
394
+ inputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : void 0,
395
+ outputTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : void 0
396
396
  },
397
397
  providerMetadata,
398
- rawCall: { rawPrompt, rawSettings },
399
398
  request: { body },
400
399
  response: {
401
400
  ...getResponseMetadata(responseBody),
@@ -407,59 +406,6 @@ var OpenAICompatibleChatLanguageModel = class {
407
406
  }
408
407
  async doStream(options) {
409
408
  var _a;
410
- if (this.settings.simulateStreaming) {
411
- const result = await this.doGenerate(options);
412
- const simulatedStream = new ReadableStream({
413
- start(controller) {
414
- controller.enqueue({ type: "response-metadata", ...result.response });
415
- if (result.reasoning) {
416
- if (Array.isArray(result.reasoning)) {
417
- for (const part of result.reasoning) {
418
- if (part.type === "text") {
419
- controller.enqueue({
420
- type: "reasoning",
421
- textDelta: part.text
422
- });
423
- }
424
- }
425
- } else {
426
- controller.enqueue({
427
- type: "reasoning",
428
- textDelta: result.reasoning
429
- });
430
- }
431
- }
432
- if (result.text) {
433
- controller.enqueue({
434
- type: "text-delta",
435
- textDelta: result.text
436
- });
437
- }
438
- if (result.toolCalls) {
439
- for (const toolCall of result.toolCalls) {
440
- controller.enqueue({
441
- type: "tool-call",
442
- ...toolCall
443
- });
444
- }
445
- }
446
- controller.enqueue({
447
- type: "finish",
448
- finishReason: result.finishReason,
449
- usage: result.usage,
450
- logprobs: result.logprobs,
451
- providerMetadata: result.providerMetadata
452
- });
453
- controller.close();
454
- }
455
- });
456
- return {
457
- stream: simulatedStream,
458
- rawCall: result.rawCall,
459
- response: result.response,
460
- warnings: result.warnings
461
- };
462
- }
463
409
  const { args, warnings } = this.getArgs({ ...options });
464
410
  const body = JSON.stringify({ ...args, stream: true });
465
411
  const metadataExtractor = (_a = this.config.metadataExtractor) == null ? void 0 : _a.createStreamExtractor();
@@ -480,7 +426,6 @@ var OpenAICompatibleChatLanguageModel = class {
480
426
  abortSignal: options.abortSignal,
481
427
  fetch: this.config.fetch
482
428
  });
483
- const { messages: rawPrompt, ...rawSettings } = args;
484
429
  const toolCalls = [];
485
430
  let finishReason = "unknown";
486
431
  let usage = {
@@ -670,15 +615,14 @@ var OpenAICompatibleChatLanguageModel = class {
670
615
  type: "finish",
671
616
  finishReason,
672
617
  usage: {
673
- promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
674
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
618
+ inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
619
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0
675
620
  },
676
621
  providerMetadata
677
622
  });
678
623
  }
679
624
  })
680
625
  ),
681
- rawCall: { rawPrompt, rawSettings },
682
626
  request: { body },
683
627
  response: { headers: responseHeaders },
684
628
  warnings
@@ -862,7 +806,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
862
806
  getArgs({
863
807
  inputFormat,
864
808
  prompt,
865
- maxTokens,
809
+ maxOutputTokens,
866
810
  temperature,
867
811
  topP,
868
812
  topK,
@@ -904,7 +848,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
904
848
  suffix: this.settings.suffix,
905
849
  user: this.settings.user,
906
850
  // standardized settings:
907
- max_tokens: maxTokens,
851
+ max_tokens: maxOutputTokens,
908
852
  temperature,
909
853
  top_p: topP,
910
854
  frequency_penalty: frequencyPenalty,
@@ -940,17 +884,15 @@ var OpenAICompatibleCompletionLanguageModel = class {
940
884
  abortSignal: options.abortSignal,
941
885
  fetch: this.config.fetch
942
886
  });
943
- const { prompt: rawPrompt, ...rawSettings } = args;
944
887
  const choice = response.choices[0];
945
888
  return {
946
889
  text: choice.text,
947
890
  usage: {
948
- promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
949
- completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
891
+ inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
892
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0
950
893
  },
951
894
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
952
- rawCall: { rawPrompt, rawSettings },
953
- request: { body: JSON.stringify(args) },
895
+ request: { body: args },
954
896
  response: {
955
897
  ...getResponseMetadata(response),
956
898
  headers: responseHeaders,
@@ -979,17 +921,17 @@ var OpenAICompatibleCompletionLanguageModel = class {
979
921
  abortSignal: options.abortSignal,
980
922
  fetch: this.config.fetch
981
923
  });
982
- const { prompt: rawPrompt, ...rawSettings } = args;
983
924
  let finishReason = "unknown";
984
- let usage = {
985
- promptTokens: Number.NaN,
986
- completionTokens: Number.NaN
925
+ const usage = {
926
+ inputTokens: void 0,
927
+ outputTokens: void 0
987
928
  };
988
929
  let isFirstChunk = true;
989
930
  return {
990
931
  stream: response.pipeThrough(
991
932
  new TransformStream({
992
933
  transform(chunk, controller) {
934
+ var _a, _b;
993
935
  if (!chunk.success) {
994
936
  finishReason = "error";
995
937
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1009,10 +951,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1009
951
  });
1010
952
  }
1011
953
  if (value.usage != null) {
1012
- usage = {
1013
- promptTokens: value.usage.prompt_tokens,
1014
- completionTokens: value.usage.completion_tokens
1015
- };
954
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
955
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1016
956
  }
1017
957
  const choice = value.choices[0];
1018
958
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1036,8 +976,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1036
976
  }
1037
977
  })
1038
978
  ),
1039
- rawCall: { rawPrompt, rawSettings },
1040
- request: { body: JSON.stringify(body) },
979
+ request: { body },
1041
980
  response: { headers: responseHeaders },
1042
981
  warnings
1043
982
  };