ai 4.1.42 → 4.1.44
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +35 -3
- package/dist/index.d.ts +35 -3
- package/dist/index.js +99 -75
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +62 -35
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
@@ -918,6 +918,10 @@ var DefaultGeneratedImage = class {
|
|
918
918
|
};
|
919
919
|
|
920
920
|
// core/generate-object/generate-object.ts
|
921
|
+
import {
|
922
|
+
JSONParseError,
|
923
|
+
TypeValidationError as TypeValidationError2
|
924
|
+
} from "@ai-sdk/provider";
|
921
925
|
import { createIdGenerator, safeParseJSON } from "@ai-sdk/provider-utils";
|
922
926
|
|
923
927
|
// errors/no-object-generated-error.ts
|
@@ -2370,6 +2374,7 @@ async function generateObject({
|
|
2370
2374
|
maxRetries: maxRetriesArg,
|
2371
2375
|
abortSignal,
|
2372
2376
|
headers,
|
2377
|
+
experimental_repairText: repairText,
|
2373
2378
|
experimental_telemetry: telemetry,
|
2374
2379
|
experimental_providerMetadata,
|
2375
2380
|
providerOptions = experimental_providerMetadata,
|
@@ -2670,32 +2675,52 @@ async function generateObject({
|
|
2670
2675
|
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
2671
2676
|
}
|
2672
2677
|
}
|
2673
|
-
|
2674
|
-
|
2675
|
-
|
2676
|
-
|
2677
|
-
|
2678
|
-
|
2679
|
-
|
2680
|
-
|
2681
|
-
|
2678
|
+
function processResult(result2) {
|
2679
|
+
const parseResult = safeParseJSON({ text: result2 });
|
2680
|
+
if (!parseResult.success) {
|
2681
|
+
throw new NoObjectGeneratedError({
|
2682
|
+
message: "No object generated: could not parse the response.",
|
2683
|
+
cause: parseResult.error,
|
2684
|
+
text: result2,
|
2685
|
+
response,
|
2686
|
+
usage: calculateLanguageModelUsage(usage)
|
2687
|
+
});
|
2688
|
+
}
|
2689
|
+
const validationResult = outputStrategy.validateFinalResult(
|
2690
|
+
parseResult.value,
|
2691
|
+
{
|
2692
|
+
text: result2,
|
2693
|
+
response,
|
2694
|
+
usage: calculateLanguageModelUsage(usage)
|
2695
|
+
}
|
2696
|
+
);
|
2697
|
+
if (!validationResult.success) {
|
2698
|
+
throw new NoObjectGeneratedError({
|
2699
|
+
message: "No object generated: response did not match schema.",
|
2700
|
+
cause: validationResult.error,
|
2701
|
+
text: result2,
|
2702
|
+
response,
|
2703
|
+
usage: calculateLanguageModelUsage(usage)
|
2704
|
+
});
|
2705
|
+
}
|
2706
|
+
return validationResult.value;
|
2682
2707
|
}
|
2683
|
-
|
2684
|
-
|
2685
|
-
|
2686
|
-
|
2687
|
-
|
2688
|
-
|
2708
|
+
let object2;
|
2709
|
+
try {
|
2710
|
+
object2 = processResult(result);
|
2711
|
+
} catch (error) {
|
2712
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError.isInstance(error.cause) || TypeValidationError2.isInstance(error.cause))) {
|
2713
|
+
const repairedText = await repairText({
|
2714
|
+
text: result,
|
2715
|
+
error: error.cause
|
2716
|
+
});
|
2717
|
+
if (repairedText === null) {
|
2718
|
+
throw error;
|
2719
|
+
}
|
2720
|
+
object2 = processResult(repairedText);
|
2721
|
+
} else {
|
2722
|
+
throw error;
|
2689
2723
|
}
|
2690
|
-
);
|
2691
|
-
if (!validationResult.success) {
|
2692
|
-
throw new NoObjectGeneratedError({
|
2693
|
-
message: "No object generated: response did not match schema.",
|
2694
|
-
cause: validationResult.error,
|
2695
|
-
text: result,
|
2696
|
-
response,
|
2697
|
-
usage: calculateLanguageModelUsage(usage)
|
2698
|
-
});
|
2699
2724
|
}
|
2700
2725
|
span.setAttributes(
|
2701
2726
|
selectTelemetryAttributes({
|
@@ -2703,7 +2728,7 @@ async function generateObject({
|
|
2703
2728
|
attributes: {
|
2704
2729
|
"ai.response.finishReason": finishReason,
|
2705
2730
|
"ai.response.object": {
|
2706
|
-
output: () => JSON.stringify(
|
2731
|
+
output: () => JSON.stringify(object2)
|
2707
2732
|
},
|
2708
2733
|
"ai.usage.promptTokens": usage.promptTokens,
|
2709
2734
|
"ai.usage.completionTokens": usage.completionTokens
|
@@ -2711,7 +2736,7 @@ async function generateObject({
|
|
2711
2736
|
})
|
2712
2737
|
);
|
2713
2738
|
return new DefaultGenerateObjectResult({
|
2714
|
-
object:
|
2739
|
+
object: object2,
|
2715
2740
|
finishReason,
|
2716
2741
|
usage: calculateLanguageModelUsage(usage),
|
2717
2742
|
warnings,
|
@@ -4216,11 +4241,11 @@ import {
|
|
4216
4241
|
EmptyResponseBodyError,
|
4217
4242
|
InvalidPromptError as InvalidPromptError2,
|
4218
4243
|
InvalidResponseDataError,
|
4219
|
-
JSONParseError,
|
4244
|
+
JSONParseError as JSONParseError2,
|
4220
4245
|
LoadAPIKeyError,
|
4221
4246
|
NoContentGeneratedError,
|
4222
4247
|
NoSuchModelError,
|
4223
|
-
TypeValidationError as
|
4248
|
+
TypeValidationError as TypeValidationError3,
|
4224
4249
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
4225
4250
|
} from "@ai-sdk/provider";
|
4226
4251
|
|
@@ -5768,17 +5793,19 @@ function getPotentialStartIndex(text2, searchedText) {
|
|
5768
5793
|
// core/middleware/extract-reasoning-middleware.ts
|
5769
5794
|
function extractReasoningMiddleware({
|
5770
5795
|
tagName,
|
5771
|
-
separator = "\n"
|
5796
|
+
separator = "\n",
|
5797
|
+
startWithReasoning = false
|
5772
5798
|
}) {
|
5773
5799
|
const openingTag = `<${tagName}>`;
|
5774
5800
|
const closingTag = `</${tagName}>`;
|
5775
5801
|
return {
|
5776
5802
|
middlewareVersion: "v1",
|
5777
5803
|
wrapGenerate: async ({ doGenerate }) => {
|
5778
|
-
const { text:
|
5779
|
-
if (
|
5780
|
-
return { text:
|
5804
|
+
const { text: rawText, ...rest } = await doGenerate();
|
5805
|
+
if (rawText == null) {
|
5806
|
+
return { text: rawText, ...rest };
|
5781
5807
|
}
|
5808
|
+
const text2 = startWithReasoning ? openingTag + rawText : rawText;
|
5782
5809
|
const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
|
5783
5810
|
const matches = Array.from(text2.matchAll(regexp));
|
5784
5811
|
if (!matches.length) {
|
@@ -5801,7 +5828,7 @@ function extractReasoningMiddleware({
|
|
5801
5828
|
let isFirstReasoning = true;
|
5802
5829
|
let isFirstText = true;
|
5803
5830
|
let afterSwitch = false;
|
5804
|
-
let isReasoning =
|
5831
|
+
let isReasoning = startWithReasoning;
|
5805
5832
|
let buffer = "";
|
5806
5833
|
return {
|
5807
5834
|
stream: stream.pipeThrough(
|
@@ -6576,7 +6603,7 @@ export {
|
|
6576
6603
|
InvalidPromptError2 as InvalidPromptError,
|
6577
6604
|
InvalidResponseDataError,
|
6578
6605
|
InvalidToolArgumentsError,
|
6579
|
-
JSONParseError,
|
6606
|
+
JSONParseError2 as JSONParseError,
|
6580
6607
|
langchain_adapter_exports as LangChainAdapter,
|
6581
6608
|
llamaindex_adapter_exports as LlamaIndexAdapter,
|
6582
6609
|
LoadAPIKeyError,
|
@@ -6593,7 +6620,7 @@ export {
|
|
6593
6620
|
StreamData,
|
6594
6621
|
ToolCallRepairError,
|
6595
6622
|
ToolExecutionError,
|
6596
|
-
|
6623
|
+
TypeValidationError3 as TypeValidationError,
|
6597
6624
|
UnsupportedFunctionalityError2 as UnsupportedFunctionalityError,
|
6598
6625
|
appendClientMessage,
|
6599
6626
|
appendResponseMessages,
|