@ai-sdk/openai 2.0.0-canary.3 → 2.0.0-canary.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-canary.5
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [6f6bb89]
8
+ - @ai-sdk/provider@2.0.0-canary.4
9
+ - @ai-sdk/provider-utils@3.0.0-canary.5
10
+
11
+ ## 2.0.0-canary.4
12
+
13
+ ### Patch Changes
14
+
15
+ - Updated dependencies [d1a1aa1]
16
+ - @ai-sdk/provider@2.0.0-canary.3
17
+ - @ai-sdk/provider-utils@3.0.0-canary.4
18
+
3
19
  ## 2.0.0-canary.3
4
20
 
5
21
  ### Patch Changes
package/dist/index.js CHANGED
@@ -630,10 +630,12 @@ var OpenAIChatLanguageModel = class {
630
630
  promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
631
631
  completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
632
632
  },
633
- rawCall: { rawPrompt, rawSettings },
634
- rawResponse: { headers: responseHeaders, body: rawResponse },
635
- request: { body: JSON.stringify(body) },
636
- response: getResponseMetadata(response),
633
+ request: { body },
634
+ response: {
635
+ ...getResponseMetadata(response),
636
+ headers: responseHeaders,
637
+ body: rawResponse
638
+ },
637
639
  warnings,
638
640
  logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
639
641
  providerMetadata
@@ -678,8 +680,7 @@ var OpenAIChatLanguageModel = class {
678
680
  });
679
681
  return {
680
682
  stream: simulatedStream,
681
- rawCall: result.rawCall,
682
- rawResponse: result.rawResponse,
683
+ response: result.response,
683
684
  warnings: result.warnings
684
685
  };
685
686
  }
@@ -888,9 +889,8 @@ var OpenAIChatLanguageModel = class {
888
889
  }
889
890
  })
890
891
  ),
891
- rawCall: { rawPrompt, rawSettings },
892
- rawResponse: { headers: responseHeaders },
893
- request: { body: JSON.stringify(body) },
892
+ request: { body },
893
+ response: { headers: responseHeaders },
894
894
  warnings
895
895
  };
896
896
  }
@@ -1221,7 +1221,6 @@ var OpenAICompletionLanguageModel = class {
1221
1221
  abortSignal: options.abortSignal,
1222
1222
  fetch: this.config.fetch
1223
1223
  });
1224
- const { prompt: rawPrompt, ...rawSettings } = args;
1225
1224
  const choice = response.choices[0];
1226
1225
  return {
1227
1226
  text: choice.text,
@@ -1231,11 +1230,13 @@ var OpenAICompletionLanguageModel = class {
1231
1230
  },
1232
1231
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1233
1232
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1234
- rawCall: { rawPrompt, rawSettings },
1235
- rawResponse: { headers: responseHeaders, body: rawResponse },
1236
- response: getResponseMetadata(response),
1237
- warnings,
1238
- request: { body: JSON.stringify(args) }
1233
+ request: { body: args },
1234
+ response: {
1235
+ ...getResponseMetadata(response),
1236
+ headers: responseHeaders,
1237
+ body: rawResponse
1238
+ },
1239
+ warnings
1239
1240
  };
1240
1241
  }
1241
1242
  async doStream(options) {
@@ -1260,7 +1261,6 @@ var OpenAICompletionLanguageModel = class {
1260
1261
  abortSignal: options.abortSignal,
1261
1262
  fetch: this.config.fetch
1262
1263
  });
1263
- const { prompt: rawPrompt, ...rawSettings } = args;
1264
1264
  let finishReason = "unknown";
1265
1265
  let usage = {
1266
1266
  promptTokens: Number.NaN,
@@ -1324,8 +1324,7 @@ var OpenAICompletionLanguageModel = class {
1324
1324
  }
1325
1325
  })
1326
1326
  ),
1327
- rawCall: { rawPrompt, rawSettings },
1328
- rawResponse: { headers: responseHeaders },
1327
+ response: { headers: responseHeaders },
1329
1328
  warnings,
1330
1329
  request: { body: JSON.stringify(body) }
1331
1330
  };
@@ -2152,21 +2151,13 @@ var OpenAIResponsesLanguageModel = class {
2152
2151
  promptTokens: response.usage.input_tokens,
2153
2152
  completionTokens: response.usage.output_tokens
2154
2153
  },
2155
- rawCall: {
2156
- rawPrompt: void 0,
2157
- rawSettings: {}
2158
- },
2159
- rawResponse: {
2160
- headers: responseHeaders,
2161
- body: rawResponse
2162
- },
2163
- request: {
2164
- body: JSON.stringify(body)
2165
- },
2154
+ request: { body },
2166
2155
  response: {
2167
2156
  id: response.id,
2168
2157
  timestamp: new Date(response.created_at * 1e3),
2169
- modelId: response.model
2158
+ modelId: response.model,
2159
+ headers: responseHeaders,
2160
+ body: rawResponse
2170
2161
  },
2171
2162
  providerMetadata: {
2172
2163
  openai: {
@@ -2304,12 +2295,8 @@ var OpenAIResponsesLanguageModel = class {
2304
2295
  }
2305
2296
  })
2306
2297
  ),
2307
- rawCall: {
2308
- rawPrompt: void 0,
2309
- rawSettings: {}
2310
- },
2311
- rawResponse: { headers: responseHeaders },
2312
- request: { body: JSON.stringify(body) },
2298
+ request: { body },
2299
+ response: { headers: responseHeaders },
2313
2300
  warnings
2314
2301
  };
2315
2302
  }