@ai-sdk/openai 2.0.0-canary.3 → 2.0.0-canary.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.js +23 -36
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +23 -36
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.js +23 -36
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +23 -36
- package/dist/internal/index.mjs.map +1 -0
- package/package.json +11 -12
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
- /package/{internal/dist → dist/internal}/index.d.mts +0 -0
- /package/{internal/dist → dist/internal}/index.d.ts +0 -0
package/dist/index.mjs
CHANGED
|
@@ -620,10 +620,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
620
620
|
promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
|
|
621
621
|
completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
|
|
622
622
|
},
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
623
|
+
request: { body },
|
|
624
|
+
response: {
|
|
625
|
+
...getResponseMetadata(response),
|
|
626
|
+
headers: responseHeaders,
|
|
627
|
+
body: rawResponse
|
|
628
|
+
},
|
|
627
629
|
warnings,
|
|
628
630
|
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
629
631
|
providerMetadata
|
|
@@ -668,8 +670,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
668
670
|
});
|
|
669
671
|
return {
|
|
670
672
|
stream: simulatedStream,
|
|
671
|
-
|
|
672
|
-
rawResponse: result.rawResponse,
|
|
673
|
+
response: result.response,
|
|
673
674
|
warnings: result.warnings
|
|
674
675
|
};
|
|
675
676
|
}
|
|
@@ -878,9 +879,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
878
879
|
}
|
|
879
880
|
})
|
|
880
881
|
),
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
request: { body: JSON.stringify(body) },
|
|
882
|
+
request: { body },
|
|
883
|
+
response: { headers: responseHeaders },
|
|
884
884
|
warnings
|
|
885
885
|
};
|
|
886
886
|
}
|
|
@@ -1219,7 +1219,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1219
1219
|
abortSignal: options.abortSignal,
|
|
1220
1220
|
fetch: this.config.fetch
|
|
1221
1221
|
});
|
|
1222
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1223
1222
|
const choice = response.choices[0];
|
|
1224
1223
|
return {
|
|
1225
1224
|
text: choice.text,
|
|
@@ -1229,11 +1228,13 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1229
1228
|
},
|
|
1230
1229
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1231
1230
|
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1231
|
+
request: { body: args },
|
|
1232
|
+
response: {
|
|
1233
|
+
...getResponseMetadata(response),
|
|
1234
|
+
headers: responseHeaders,
|
|
1235
|
+
body: rawResponse
|
|
1236
|
+
},
|
|
1237
|
+
warnings
|
|
1237
1238
|
};
|
|
1238
1239
|
}
|
|
1239
1240
|
async doStream(options) {
|
|
@@ -1258,7 +1259,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1258
1259
|
abortSignal: options.abortSignal,
|
|
1259
1260
|
fetch: this.config.fetch
|
|
1260
1261
|
});
|
|
1261
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1262
1262
|
let finishReason = "unknown";
|
|
1263
1263
|
let usage = {
|
|
1264
1264
|
promptTokens: Number.NaN,
|
|
@@ -1322,8 +1322,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1322
1322
|
}
|
|
1323
1323
|
})
|
|
1324
1324
|
),
|
|
1325
|
-
|
|
1326
|
-
rawResponse: { headers: responseHeaders },
|
|
1325
|
+
response: { headers: responseHeaders },
|
|
1327
1326
|
warnings,
|
|
1328
1327
|
request: { body: JSON.stringify(body) }
|
|
1329
1328
|
};
|
|
@@ -2177,21 +2176,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2177
2176
|
promptTokens: response.usage.input_tokens,
|
|
2178
2177
|
completionTokens: response.usage.output_tokens
|
|
2179
2178
|
},
|
|
2180
|
-
|
|
2181
|
-
rawPrompt: void 0,
|
|
2182
|
-
rawSettings: {}
|
|
2183
|
-
},
|
|
2184
|
-
rawResponse: {
|
|
2185
|
-
headers: responseHeaders,
|
|
2186
|
-
body: rawResponse
|
|
2187
|
-
},
|
|
2188
|
-
request: {
|
|
2189
|
-
body: JSON.stringify(body)
|
|
2190
|
-
},
|
|
2179
|
+
request: { body },
|
|
2191
2180
|
response: {
|
|
2192
2181
|
id: response.id,
|
|
2193
2182
|
timestamp: new Date(response.created_at * 1e3),
|
|
2194
|
-
modelId: response.model
|
|
2183
|
+
modelId: response.model,
|
|
2184
|
+
headers: responseHeaders,
|
|
2185
|
+
body: rawResponse
|
|
2195
2186
|
},
|
|
2196
2187
|
providerMetadata: {
|
|
2197
2188
|
openai: {
|
|
@@ -2329,12 +2320,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2329
2320
|
}
|
|
2330
2321
|
})
|
|
2331
2322
|
),
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
rawSettings: {}
|
|
2335
|
-
},
|
|
2336
|
-
rawResponse: { headers: responseHeaders },
|
|
2337
|
-
request: { body: JSON.stringify(body) },
|
|
2323
|
+
request: { body },
|
|
2324
|
+
response: { headers: responseHeaders },
|
|
2338
2325
|
warnings
|
|
2339
2326
|
};
|
|
2340
2327
|
}
|