@ai-sdk-tool/parser 3.1.2 → 3.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-TQT6XSP7.js → chunk-3KQVEBKO.js} +26 -688
- package/dist/chunk-3KQVEBKO.js.map +1 -0
- package/dist/community.cjs +101 -649
- package/dist/community.cjs.map +1 -1
- package/dist/community.js +1 -1
- package/dist/index.cjs +19 -684
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +2 -108
- package/dist/index.d.ts +2 -108
- package/dist/index.js +1 -7
- package/package.json +10 -9
- package/dist/chunk-TQT6XSP7.js.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -17,6 +17,7 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
17
17
|
}
|
|
18
18
|
return to;
|
|
19
19
|
};
|
|
20
|
+
var __reExport = (target, mod, secondTarget) => (__copyProps(target, mod, "default"), secondTarget && __copyProps(secondTarget, mod, "default"));
|
|
20
21
|
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
22
|
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
23
|
// file that has been converted to a CommonJS file using a Babel-
|
|
@@ -59,11 +60,8 @@ __export(src_exports, {
|
|
|
59
60
|
mergePipelineConfigs: () => mergePipelineConfigs,
|
|
60
61
|
normalizeCloseTagsHeuristic: () => normalizeCloseTagsHeuristic,
|
|
61
62
|
originalToolsSchema: () => originalToolsSchema,
|
|
62
|
-
parse: () => parse2,
|
|
63
63
|
repairAgainstSchemaHeuristic: () => repairAgainstSchemaHeuristic,
|
|
64
|
-
stringify: () => stringify,
|
|
65
64
|
toolChoiceStream: () => toolChoiceStream,
|
|
66
|
-
transform: () => transform,
|
|
67
65
|
transformParams: () => transformParams,
|
|
68
66
|
wrapGenerate: () => wrapGenerate,
|
|
69
67
|
wrapStream: () => wrapStream,
|
|
@@ -73,6 +71,7 @@ __export(src_exports, {
|
|
|
73
71
|
yamlToolMiddleware: () => yamlToolMiddleware
|
|
74
72
|
});
|
|
75
73
|
module.exports = __toCommonJS(src_exports);
|
|
74
|
+
__reExport(src_exports, require("@ai-sdk-tool/rjson"), module.exports);
|
|
76
75
|
|
|
77
76
|
// src/core/heuristics/engine.ts
|
|
78
77
|
function applyRawSegmentUpdate(current, result) {
|
|
@@ -99,12 +98,12 @@ function applyWarningsUpdate(current, result) {
|
|
|
99
98
|
}
|
|
100
99
|
return current;
|
|
101
100
|
}
|
|
102
|
-
function attemptReparse(current, result, reparseCount, maxReparses,
|
|
101
|
+
function attemptReparse(current, result, reparseCount, maxReparses, parse3) {
|
|
103
102
|
if (!result.reparse || result.rawSegment === void 0 || reparseCount >= maxReparses) {
|
|
104
103
|
return { state: current, newCount: reparseCount };
|
|
105
104
|
}
|
|
106
105
|
try {
|
|
107
|
-
const reparsed =
|
|
106
|
+
const reparsed = parse3(result.rawSegment, current.schema);
|
|
108
107
|
return {
|
|
109
108
|
state: { ...current, parsed: reparsed, errors: [] },
|
|
110
109
|
newCount: reparseCount + 1
|
|
@@ -541,6 +540,9 @@ function extractStepStatusFromString(normXml) {
|
|
|
541
540
|
return null;
|
|
542
541
|
}
|
|
543
542
|
|
|
543
|
+
// src/core/protocols/json-protocol.ts
|
|
544
|
+
var import_rjson = require("@ai-sdk-tool/rjson");
|
|
545
|
+
|
|
544
546
|
// src/core/utils/debug.ts
|
|
545
547
|
var LINE_SPLIT_REGEX = /\r?\n/;
|
|
546
548
|
function normalizeBooleanString(value) {
|
|
@@ -724,676 +726,11 @@ function escapeRegExp2(literal) {
|
|
|
724
726
|
return literal.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
725
727
|
}
|
|
726
728
|
|
|
727
|
-
// src/core/utils/robust-json.ts
|
|
728
|
-
var WHITESPACE_TEST_REGEX = /\s/;
|
|
729
|
-
var WHITESPACE_REGEX2 = /^\s+/;
|
|
730
|
-
var OBJECT_START_REGEX = /^\{/;
|
|
731
|
-
var OBJECT_END_REGEX = /^\}/;
|
|
732
|
-
var ARRAY_START_REGEX = /^\[/;
|
|
733
|
-
var ARRAY_END_REGEX = /^\]/;
|
|
734
|
-
var COMMA_REGEX = /^,/;
|
|
735
|
-
var COLON_REGEX = /^:/;
|
|
736
|
-
var KEYWORD_REGEX = /^(?:true|false|null)/;
|
|
737
|
-
var NUMBER_REGEX = /^-?\d+(?:\.\d+)?(?:[eE][+-]?\d+)?/;
|
|
738
|
-
var STRING_DOUBLE_REGEX = /^"(?:[^"\\]|\\["bnrtf\\/]|\\u[0-9a-fA-F]{4})*"/;
|
|
739
|
-
var STRING_SINGLE_REGEX = /^'((?:[^'\\]|\\['bnrtf\\/]|\\u[0-9a-fA-F]{4})*)'/;
|
|
740
|
-
var COMMENT_SINGLE_REGEX = /^\/\/.*?(?:\r\n|\r|\n)/;
|
|
741
|
-
var COMMENT_MULTI_REGEX = /^\/\*[\s\S]*?\*\//;
|
|
742
|
-
var IDENTIFIER_REGEX = /^[$a-zA-Z0-9_\-+.*?!|&%^/#\\]+/;
|
|
743
|
-
function some(array, f) {
|
|
744
|
-
let acc = false;
|
|
745
|
-
for (let i = 0; i < array.length; i += 1) {
|
|
746
|
-
const result = f(array[i], i, array);
|
|
747
|
-
acc = result === void 0 ? false : result;
|
|
748
|
-
if (acc) {
|
|
749
|
-
return acc;
|
|
750
|
-
}
|
|
751
|
-
}
|
|
752
|
-
return acc;
|
|
753
|
-
}
|
|
754
|
-
function makeLexer(tokenSpecs) {
|
|
755
|
-
return (contents) => {
|
|
756
|
-
const tokens = [];
|
|
757
|
-
let line = 1;
|
|
758
|
-
let remainingContents = contents;
|
|
759
|
-
function findToken() {
|
|
760
|
-
const result = some(tokenSpecs, (tokenSpec) => {
|
|
761
|
-
const m = tokenSpec.re.exec(remainingContents);
|
|
762
|
-
if (m) {
|
|
763
|
-
const raw = m[0];
|
|
764
|
-
remainingContents = remainingContents.slice(raw.length);
|
|
765
|
-
return {
|
|
766
|
-
raw,
|
|
767
|
-
matched: tokenSpec.f(m)
|
|
768
|
-
// Process the match using the spec's function
|
|
769
|
-
};
|
|
770
|
-
}
|
|
771
|
-
return;
|
|
772
|
-
});
|
|
773
|
-
return result === false ? void 0 : result;
|
|
774
|
-
}
|
|
775
|
-
while (remainingContents !== "") {
|
|
776
|
-
const matched = findToken();
|
|
777
|
-
if (!matched) {
|
|
778
|
-
const err = new SyntaxError(
|
|
779
|
-
`Unexpected character: ${remainingContents[0]}; input: ${remainingContents.substr(
|
|
780
|
-
0,
|
|
781
|
-
100
|
|
782
|
-
)}`
|
|
783
|
-
);
|
|
784
|
-
err.line = line;
|
|
785
|
-
throw err;
|
|
786
|
-
}
|
|
787
|
-
const tokenWithLine = matched.matched;
|
|
788
|
-
tokenWithLine.line = line;
|
|
789
|
-
line += matched.raw.replace(/[^\n]/g, "").length;
|
|
790
|
-
tokens.push(tokenWithLine);
|
|
791
|
-
}
|
|
792
|
-
return tokens;
|
|
793
|
-
};
|
|
794
|
-
}
|
|
795
|
-
function fStringSingle(m) {
|
|
796
|
-
const content = m[1].replace(
|
|
797
|
-
/([^'\\]|\\['bnrtf\\]|\\u[0-9a-fA-F]{4})/g,
|
|
798
|
-
(mm) => {
|
|
799
|
-
if (mm === '"') {
|
|
800
|
-
return '\\"';
|
|
801
|
-
}
|
|
802
|
-
if (mm === "\\'") {
|
|
803
|
-
return "'";
|
|
804
|
-
}
|
|
805
|
-
return mm;
|
|
806
|
-
}
|
|
807
|
-
);
|
|
808
|
-
const match = `"${content}"`;
|
|
809
|
-
return {
|
|
810
|
-
type: "string",
|
|
811
|
-
match,
|
|
812
|
-
// The transformed, double-quoted string representation
|
|
813
|
-
// Use JSON.parse on the transformed string to handle escape sequences correctly
|
|
814
|
-
value: JSON.parse(match)
|
|
815
|
-
};
|
|
816
|
-
}
|
|
817
|
-
function fStringDouble(m) {
|
|
818
|
-
return {
|
|
819
|
-
type: "string",
|
|
820
|
-
match: m[0],
|
|
821
|
-
// The raw matched string (including quotes)
|
|
822
|
-
value: JSON.parse(m[0])
|
|
823
|
-
// Use JSON.parse to handle escapes and get the value
|
|
824
|
-
};
|
|
825
|
-
}
|
|
826
|
-
function fIdentifier(m) {
|
|
827
|
-
const value = m[0];
|
|
828
|
-
const match = '"' + value.replace(/\\/g, "\\\\").replace(/"/g, '\\"') + // Escape backslashes and quotes
|
|
829
|
-
'"';
|
|
830
|
-
return {
|
|
831
|
-
type: "string",
|
|
832
|
-
// Treat identifiers as strings
|
|
833
|
-
value,
|
|
834
|
-
// The original identifier name
|
|
835
|
-
match
|
|
836
|
-
// The double-quoted string representation
|
|
837
|
-
};
|
|
838
|
-
}
|
|
839
|
-
function fComment(m) {
|
|
840
|
-
const match = m[0].replace(
|
|
841
|
-
/./g,
|
|
842
|
-
(c) => WHITESPACE_TEST_REGEX.test(c) ? c : " "
|
|
843
|
-
);
|
|
844
|
-
return {
|
|
845
|
-
type: " ",
|
|
846
|
-
// Represent comments as whitespace tokens
|
|
847
|
-
match,
|
|
848
|
-
// String containing original newlines and spaces for other chars
|
|
849
|
-
value: void 0
|
|
850
|
-
// Comments don't have a semantic value
|
|
851
|
-
};
|
|
852
|
-
}
|
|
853
|
-
function fNumber(m) {
|
|
854
|
-
return {
|
|
855
|
-
type: "number",
|
|
856
|
-
match: m[0],
|
|
857
|
-
// The raw matched number string
|
|
858
|
-
value: Number.parseFloat(m[0])
|
|
859
|
-
// Convert string to number
|
|
860
|
-
};
|
|
861
|
-
}
|
|
862
|
-
function fKeyword(m) {
|
|
863
|
-
let value;
|
|
864
|
-
switch (m[0]) {
|
|
865
|
-
case "null":
|
|
866
|
-
value = null;
|
|
867
|
-
break;
|
|
868
|
-
case "true":
|
|
869
|
-
value = true;
|
|
870
|
-
break;
|
|
871
|
-
case "false":
|
|
872
|
-
value = false;
|
|
873
|
-
break;
|
|
874
|
-
default:
|
|
875
|
-
throw new Error(`Unexpected keyword: ${m[0]}`);
|
|
876
|
-
}
|
|
877
|
-
return {
|
|
878
|
-
type: "atom",
|
|
879
|
-
// Use 'atom' type for these literals
|
|
880
|
-
match: m[0],
|
|
881
|
-
// The raw matched keyword
|
|
882
|
-
value
|
|
883
|
-
// The corresponding JavaScript value
|
|
884
|
-
};
|
|
885
|
-
}
|
|
886
|
-
function makeTokenSpecs(relaxed) {
|
|
887
|
-
function f(type) {
|
|
888
|
-
return (m) => {
|
|
889
|
-
return { type, match: m[0], value: void 0 };
|
|
890
|
-
};
|
|
891
|
-
}
|
|
892
|
-
let tokenSpecs = [
|
|
893
|
-
{ re: WHITESPACE_REGEX2, f: f(" ") },
|
|
894
|
-
// Whitespace
|
|
895
|
-
{ re: OBJECT_START_REGEX, f: f("{") },
|
|
896
|
-
// Object start
|
|
897
|
-
{ re: OBJECT_END_REGEX, f: f("}") },
|
|
898
|
-
// Object end
|
|
899
|
-
{ re: ARRAY_START_REGEX, f: f("[") },
|
|
900
|
-
// Array start
|
|
901
|
-
{ re: ARRAY_END_REGEX, f: f("]") },
|
|
902
|
-
// Array end
|
|
903
|
-
{ re: COMMA_REGEX, f: f(",") },
|
|
904
|
-
// Comma separator
|
|
905
|
-
{ re: COLON_REGEX, f: f(":") },
|
|
906
|
-
// Key-value separator
|
|
907
|
-
{ re: KEYWORD_REGEX, f: fKeyword },
|
|
908
|
-
// Keywords
|
|
909
|
-
// Number: optional sign, digits, optional decimal part, optional exponent
|
|
910
|
-
{ re: NUMBER_REGEX, f: fNumber },
|
|
911
|
-
// String: double-quoted, handles escapes
|
|
912
|
-
{ re: STRING_DOUBLE_REGEX, f: fStringDouble }
|
|
913
|
-
];
|
|
914
|
-
if (relaxed) {
|
|
915
|
-
tokenSpecs = tokenSpecs.concat([
|
|
916
|
-
// Single-quoted strings
|
|
917
|
-
{
|
|
918
|
-
re: STRING_SINGLE_REGEX,
|
|
919
|
-
f: fStringSingle
|
|
920
|
-
},
|
|
921
|
-
// Single-line comments (// ...)
|
|
922
|
-
{ re: COMMENT_SINGLE_REGEX, f: fComment },
|
|
923
|
-
// Multi-line comments (/* ... */)
|
|
924
|
-
{ re: COMMENT_MULTI_REGEX, f: fComment },
|
|
925
|
-
// Unquoted identifiers (treated as strings)
|
|
926
|
-
// Allows letters, numbers, _, -, +, ., *, ?, !, |, &, %, ^, /, #, \
|
|
927
|
-
{ re: IDENTIFIER_REGEX, f: fIdentifier }
|
|
928
|
-
// Note: The order matters here. Identifiers are checked after keywords/numbers.
|
|
929
|
-
]);
|
|
930
|
-
}
|
|
931
|
-
return tokenSpecs;
|
|
932
|
-
}
|
|
933
|
-
var lexer = makeLexer(makeTokenSpecs(true));
|
|
934
|
-
var strictLexer = makeLexer(makeTokenSpecs(false));
|
|
935
|
-
function previousNWSToken(tokens, index) {
|
|
936
|
-
let currentIndex = index;
|
|
937
|
-
for (; currentIndex >= 0; currentIndex -= 1) {
|
|
938
|
-
if (tokens[currentIndex].type !== " ") {
|
|
939
|
-
return currentIndex;
|
|
940
|
-
}
|
|
941
|
-
}
|
|
942
|
-
return;
|
|
943
|
-
}
|
|
944
|
-
function stripTrailingComma(tokens) {
|
|
945
|
-
const res = [];
|
|
946
|
-
tokens.forEach((token, index) => {
|
|
947
|
-
if (index > 0 && (token.type === "]" || token.type === "}")) {
|
|
948
|
-
const prevNWSTokenIndex = previousNWSToken(res, res.length - 1);
|
|
949
|
-
if (prevNWSTokenIndex !== void 0 && res[prevNWSTokenIndex].type === ",") {
|
|
950
|
-
const preCommaIndex = previousNWSToken(res, prevNWSTokenIndex - 1);
|
|
951
|
-
if (preCommaIndex !== void 0 && res[preCommaIndex].type !== "[" && res[preCommaIndex].type !== "{") {
|
|
952
|
-
res[prevNWSTokenIndex] = {
|
|
953
|
-
type: " ",
|
|
954
|
-
match: " ",
|
|
955
|
-
// Represent as a single space
|
|
956
|
-
value: void 0,
|
|
957
|
-
// Whitespace has no value
|
|
958
|
-
line: res[prevNWSTokenIndex].line
|
|
959
|
-
// Preserve original line number
|
|
960
|
-
};
|
|
961
|
-
}
|
|
962
|
-
}
|
|
963
|
-
}
|
|
964
|
-
res.push(token);
|
|
965
|
-
});
|
|
966
|
-
return res;
|
|
967
|
-
}
|
|
968
|
-
function transform(text) {
|
|
969
|
-
let tokens = lexer(text);
|
|
970
|
-
tokens = stripTrailingComma(tokens);
|
|
971
|
-
return tokens.reduce((str, token) => str + token.match, "");
|
|
972
|
-
}
|
|
973
|
-
function popToken(tokens, state) {
|
|
974
|
-
var _a, _b;
|
|
975
|
-
const token = tokens[state.pos];
|
|
976
|
-
state.pos += 1;
|
|
977
|
-
if (!token) {
|
|
978
|
-
const lastLine = tokens.length !== 0 ? (_b = (_a = tokens.at(-1)) == null ? void 0 : _a.line) != null ? _b : 1 : 1;
|
|
979
|
-
return { type: "eof", match: "", value: void 0, line: lastLine };
|
|
980
|
-
}
|
|
981
|
-
return token;
|
|
982
|
-
}
|
|
983
|
-
function strToken(token) {
|
|
984
|
-
switch (token.type) {
|
|
985
|
-
case "atom":
|
|
986
|
-
case "string":
|
|
987
|
-
case "number":
|
|
988
|
-
return `${token.type} ${token.match}`;
|
|
989
|
-
case "eof":
|
|
990
|
-
return "end-of-file";
|
|
991
|
-
default:
|
|
992
|
-
return `'${token.type}'`;
|
|
993
|
-
}
|
|
994
|
-
}
|
|
995
|
-
function skipColon(tokens, state) {
|
|
996
|
-
const colon = popToken(tokens, state);
|
|
997
|
-
if (colon.type !== ":") {
|
|
998
|
-
const message = `Unexpected token: ${strToken(colon)}, expected ':'`;
|
|
999
|
-
if (state.tolerant) {
|
|
1000
|
-
state.warnings.push({
|
|
1001
|
-
message,
|
|
1002
|
-
line: colon.line
|
|
1003
|
-
});
|
|
1004
|
-
state.pos -= 1;
|
|
1005
|
-
} else {
|
|
1006
|
-
const err = new SyntaxError(message);
|
|
1007
|
-
err.line = colon.line;
|
|
1008
|
-
throw err;
|
|
1009
|
-
}
|
|
1010
|
-
}
|
|
1011
|
-
}
|
|
1012
|
-
function skipPunctuation(tokens, state, valid) {
|
|
1013
|
-
const punctuation = [",", ":", "]", "}"];
|
|
1014
|
-
let token = popToken(tokens, state);
|
|
1015
|
-
while (true) {
|
|
1016
|
-
if (valid == null ? void 0 : valid.includes(token.type)) {
|
|
1017
|
-
return token;
|
|
1018
|
-
}
|
|
1019
|
-
if (token.type === "eof") {
|
|
1020
|
-
return token;
|
|
1021
|
-
}
|
|
1022
|
-
if (punctuation.includes(token.type)) {
|
|
1023
|
-
const message = `Unexpected token: ${strToken(
|
|
1024
|
-
token
|
|
1025
|
-
)}, expected '[', '{', number, string or atom`;
|
|
1026
|
-
if (state.tolerant) {
|
|
1027
|
-
state.warnings.push({
|
|
1028
|
-
message,
|
|
1029
|
-
line: token.line
|
|
1030
|
-
});
|
|
1031
|
-
token = popToken(tokens, state);
|
|
1032
|
-
} else {
|
|
1033
|
-
const err = new SyntaxError(message);
|
|
1034
|
-
err.line = token.line;
|
|
1035
|
-
throw err;
|
|
1036
|
-
}
|
|
1037
|
-
} else {
|
|
1038
|
-
return token;
|
|
1039
|
-
}
|
|
1040
|
-
}
|
|
1041
|
-
}
|
|
1042
|
-
function raiseError(state, token, message) {
|
|
1043
|
-
if (state.tolerant) {
|
|
1044
|
-
state.warnings.push({
|
|
1045
|
-
message,
|
|
1046
|
-
line: token.line
|
|
1047
|
-
});
|
|
1048
|
-
} else {
|
|
1049
|
-
const err = new SyntaxError(message);
|
|
1050
|
-
err.line = token.line;
|
|
1051
|
-
throw err;
|
|
1052
|
-
}
|
|
1053
|
-
}
|
|
1054
|
-
function raiseUnexpected(state, token, expected) {
|
|
1055
|
-
raiseError(
|
|
1056
|
-
state,
|
|
1057
|
-
token,
|
|
1058
|
-
`Unexpected token: ${strToken(token)}, expected ${expected}`
|
|
1059
|
-
);
|
|
1060
|
-
}
|
|
1061
|
-
function checkDuplicates(state, obj, token) {
|
|
1062
|
-
const key = String(token.value);
|
|
1063
|
-
if (!state.duplicate && Object.hasOwn(obj, key)) {
|
|
1064
|
-
raiseError(state, token, `Duplicate key: ${key}`);
|
|
1065
|
-
}
|
|
1066
|
-
}
|
|
1067
|
-
function appendPair(state, obj, key, value) {
|
|
1068
|
-
const finalValue = state.reviver ? state.reviver(key, value) : value;
|
|
1069
|
-
if (finalValue !== void 0) {
|
|
1070
|
-
obj[key] = finalValue;
|
|
1071
|
-
}
|
|
1072
|
-
}
|
|
1073
|
-
function parsePair(tokens, state, obj) {
|
|
1074
|
-
let token = skipPunctuation(tokens, state, [":", "string", "number", "atom"]);
|
|
1075
|
-
let value;
|
|
1076
|
-
if (token.type !== "string") {
|
|
1077
|
-
raiseUnexpected(state, token, "string key");
|
|
1078
|
-
if (state.tolerant) {
|
|
1079
|
-
switch (token.type) {
|
|
1080
|
-
case ":":
|
|
1081
|
-
token = {
|
|
1082
|
-
type: "string",
|
|
1083
|
-
value: "null",
|
|
1084
|
-
match: '"null"',
|
|
1085
|
-
line: token.line
|
|
1086
|
-
};
|
|
1087
|
-
state.pos -= 1;
|
|
1088
|
-
break;
|
|
1089
|
-
case "number":
|
|
1090
|
-
// Use number as string key
|
|
1091
|
-
case "atom":
|
|
1092
|
-
token = {
|
|
1093
|
-
type: "string",
|
|
1094
|
-
value: String(token.value),
|
|
1095
|
-
match: `"${token.value}"`,
|
|
1096
|
-
line: token.line
|
|
1097
|
-
};
|
|
1098
|
-
break;
|
|
1099
|
-
case "[":
|
|
1100
|
-
// Assume missing key before an array
|
|
1101
|
-
case "{":
|
|
1102
|
-
state.pos -= 1;
|
|
1103
|
-
value = parseAny(tokens, state);
|
|
1104
|
-
checkDuplicates(state, obj, {
|
|
1105
|
-
type: "string",
|
|
1106
|
-
value: "null",
|
|
1107
|
-
match: '"null"',
|
|
1108
|
-
line: token.line
|
|
1109
|
-
});
|
|
1110
|
-
appendPair(state, obj, "null", value);
|
|
1111
|
-
return;
|
|
1112
|
-
// Finished parsing this "pair"
|
|
1113
|
-
case "eof":
|
|
1114
|
-
return;
|
|
1115
|
-
// Cannot recover
|
|
1116
|
-
default:
|
|
1117
|
-
return;
|
|
1118
|
-
}
|
|
1119
|
-
} else {
|
|
1120
|
-
return;
|
|
1121
|
-
}
|
|
1122
|
-
}
|
|
1123
|
-
checkDuplicates(state, obj, token);
|
|
1124
|
-
const key = String(token.value);
|
|
1125
|
-
skipColon(tokens, state);
|
|
1126
|
-
value = parseAny(tokens, state);
|
|
1127
|
-
appendPair(state, obj, key, value);
|
|
1128
|
-
}
|
|
1129
|
-
function parseElement(tokens, state, arr) {
|
|
1130
|
-
const key = arr.length;
|
|
1131
|
-
const value = parseAny(tokens, state);
|
|
1132
|
-
arr[key] = state.reviver ? state.reviver(String(key), value) : value;
|
|
1133
|
-
}
|
|
1134
|
-
function parseObject(tokens, state) {
|
|
1135
|
-
const obj = {};
|
|
1136
|
-
return parseMany(tokens, state, obj, {
|
|
1137
|
-
skip: [":", "}"],
|
|
1138
|
-
// Initially skip over colon or closing brace (for empty/tolerant cases)
|
|
1139
|
-
elementParser: parsePair,
|
|
1140
|
-
// Use parsePair to parse each key-value element
|
|
1141
|
-
elementName: "string key",
|
|
1142
|
-
// Expected element type for errors
|
|
1143
|
-
endSymbol: "}"
|
|
1144
|
-
// The closing token for an object
|
|
1145
|
-
});
|
|
1146
|
-
}
|
|
1147
|
-
function parseArray(tokens, state) {
|
|
1148
|
-
const arr = [];
|
|
1149
|
-
return parseMany(tokens, state, arr, {
|
|
1150
|
-
skip: ["]"],
|
|
1151
|
-
// Initially skip over closing bracket (for empty/tolerant cases)
|
|
1152
|
-
elementParser: parseElement,
|
|
1153
|
-
// Use parseElement to parse each array item
|
|
1154
|
-
elementName: "json value",
|
|
1155
|
-
// Expected element type for errors
|
|
1156
|
-
endSymbol: "]"
|
|
1157
|
-
// The closing token for an array
|
|
1158
|
-
});
|
|
1159
|
-
}
|
|
1160
|
-
function handleInvalidToken(token, state, opts, result) {
|
|
1161
|
-
raiseUnexpected(state, token, `',' or '${opts.endSymbol}'`);
|
|
1162
|
-
if (state.tolerant) {
|
|
1163
|
-
if (token.type === "eof") {
|
|
1164
|
-
return result;
|
|
1165
|
-
}
|
|
1166
|
-
state.pos -= 1;
|
|
1167
|
-
return null;
|
|
1168
|
-
}
|
|
1169
|
-
return result;
|
|
1170
|
-
}
|
|
1171
|
-
function handleCommaToken(params) {
|
|
1172
|
-
const { token, tokens, state, opts, result } = params;
|
|
1173
|
-
const nextToken = tokens[state.pos];
|
|
1174
|
-
if (state.tolerant && nextToken && nextToken.type === opts.endSymbol) {
|
|
1175
|
-
raiseError(state, token, `Trailing comma before '${opts.endSymbol}'`);
|
|
1176
|
-
popToken(tokens, state);
|
|
1177
|
-
return result;
|
|
1178
|
-
}
|
|
1179
|
-
opts.elementParser(tokens, state, result);
|
|
1180
|
-
return null;
|
|
1181
|
-
}
|
|
1182
|
-
function parseManyInitialElement(tokens, state, result, opts) {
|
|
1183
|
-
const token = skipPunctuation(tokens, state, opts.skip);
|
|
1184
|
-
if (token.type === "eof") {
|
|
1185
|
-
raiseUnexpected(state, token, `'${opts.endSymbol}' or ${opts.elementName}`);
|
|
1186
|
-
return result;
|
|
1187
|
-
}
|
|
1188
|
-
if (token.type === opts.endSymbol) {
|
|
1189
|
-
return result;
|
|
1190
|
-
}
|
|
1191
|
-
state.pos -= 1;
|
|
1192
|
-
opts.elementParser(tokens, state, result);
|
|
1193
|
-
return;
|
|
1194
|
-
}
|
|
1195
|
-
function parseManyProcessToken(params) {
|
|
1196
|
-
const { token, tokens, state, opts, result } = params;
|
|
1197
|
-
if (token.type !== opts.endSymbol && token.type !== ",") {
|
|
1198
|
-
const handledResult = handleInvalidToken(token, state, opts, result);
|
|
1199
|
-
if (handledResult !== null) {
|
|
1200
|
-
return handledResult;
|
|
1201
|
-
}
|
|
1202
|
-
}
|
|
1203
|
-
if (token.type === opts.endSymbol) {
|
|
1204
|
-
return result;
|
|
1205
|
-
}
|
|
1206
|
-
if (token.type === ",") {
|
|
1207
|
-
const handledResult = handleCommaToken({
|
|
1208
|
-
token,
|
|
1209
|
-
tokens,
|
|
1210
|
-
state,
|
|
1211
|
-
opts,
|
|
1212
|
-
result
|
|
1213
|
-
});
|
|
1214
|
-
if (handledResult !== null) {
|
|
1215
|
-
return handledResult;
|
|
1216
|
-
}
|
|
1217
|
-
return;
|
|
1218
|
-
}
|
|
1219
|
-
opts.elementParser(tokens, state, result);
|
|
1220
|
-
return;
|
|
1221
|
-
}
|
|
1222
|
-
function parseMany(tokens, state, result, opts) {
|
|
1223
|
-
const initialResult = parseManyInitialElement(tokens, state, result, opts);
|
|
1224
|
-
if (initialResult !== void 0) {
|
|
1225
|
-
return initialResult;
|
|
1226
|
-
}
|
|
1227
|
-
while (true) {
|
|
1228
|
-
const token = popToken(tokens, state);
|
|
1229
|
-
const processedResult = parseManyProcessToken({
|
|
1230
|
-
token,
|
|
1231
|
-
tokens,
|
|
1232
|
-
state,
|
|
1233
|
-
opts,
|
|
1234
|
-
result
|
|
1235
|
-
});
|
|
1236
|
-
if (processedResult !== void 0) {
|
|
1237
|
-
return processedResult;
|
|
1238
|
-
}
|
|
1239
|
-
}
|
|
1240
|
-
}
|
|
1241
|
-
function endChecks(tokens, state, ret) {
|
|
1242
|
-
if (state.pos < tokens.length) {
|
|
1243
|
-
if (state.tolerant) {
|
|
1244
|
-
skipPunctuation(tokens, state);
|
|
1245
|
-
}
|
|
1246
|
-
if (state.pos < tokens.length) {
|
|
1247
|
-
raiseError(
|
|
1248
|
-
state,
|
|
1249
|
-
tokens[state.pos],
|
|
1250
|
-
`Unexpected token: ${strToken(tokens[state.pos])}, expected end-of-input`
|
|
1251
|
-
);
|
|
1252
|
-
}
|
|
1253
|
-
}
|
|
1254
|
-
if (state.tolerant && state.warnings.length > 0) {
|
|
1255
|
-
const message = state.warnings.length === 1 ? state.warnings[0].message : `${state.warnings.length} parse warnings`;
|
|
1256
|
-
const err = new SyntaxError(message);
|
|
1257
|
-
err.line = state.warnings[0].line;
|
|
1258
|
-
err.warnings = state.warnings;
|
|
1259
|
-
err.obj = ret;
|
|
1260
|
-
throw err;
|
|
1261
|
-
}
|
|
1262
|
-
}
|
|
1263
|
-
function parseAny(tokens, state, end = false) {
|
|
1264
|
-
const token = skipPunctuation(tokens, state);
|
|
1265
|
-
let ret;
|
|
1266
|
-
if (token.type === "eof") {
|
|
1267
|
-
if (end) {
|
|
1268
|
-
raiseUnexpected(state, token, "json value");
|
|
1269
|
-
}
|
|
1270
|
-
raiseUnexpected(state, token, "json value");
|
|
1271
|
-
return;
|
|
1272
|
-
}
|
|
1273
|
-
switch (token.type) {
|
|
1274
|
-
case "{":
|
|
1275
|
-
ret = parseObject(tokens, state);
|
|
1276
|
-
break;
|
|
1277
|
-
case "[":
|
|
1278
|
-
ret = parseArray(tokens, state);
|
|
1279
|
-
break;
|
|
1280
|
-
case "string":
|
|
1281
|
-
// String literal
|
|
1282
|
-
case "number":
|
|
1283
|
-
// Number literal
|
|
1284
|
-
case "atom":
|
|
1285
|
-
ret = token.value;
|
|
1286
|
-
break;
|
|
1287
|
-
default:
|
|
1288
|
-
raiseUnexpected(state, token, "json value");
|
|
1289
|
-
if (state.tolerant) {
|
|
1290
|
-
ret = null;
|
|
1291
|
-
} else {
|
|
1292
|
-
return;
|
|
1293
|
-
}
|
|
1294
|
-
}
|
|
1295
|
-
if (end) {
|
|
1296
|
-
ret = state.reviver ? state.reviver("", ret) : ret;
|
|
1297
|
-
endChecks(tokens, state, ret);
|
|
1298
|
-
}
|
|
1299
|
-
return ret;
|
|
1300
|
-
}
|
|
1301
|
-
function normalizeParseOptions(optsOrReviver) {
|
|
1302
|
-
var _a;
|
|
1303
|
-
let options = {};
|
|
1304
|
-
if (typeof optsOrReviver === "function") {
|
|
1305
|
-
options.reviver = optsOrReviver;
|
|
1306
|
-
} else if (optsOrReviver !== null && typeof optsOrReviver === "object") {
|
|
1307
|
-
options = { ...optsOrReviver };
|
|
1308
|
-
} else if (optsOrReviver !== void 0) {
|
|
1309
|
-
throw new TypeError(
|
|
1310
|
-
"Second argument must be a reviver function or an options object."
|
|
1311
|
-
);
|
|
1312
|
-
}
|
|
1313
|
-
if (options.relaxed === void 0) {
|
|
1314
|
-
if (options.warnings === true || options.tolerant === true) {
|
|
1315
|
-
options.relaxed = true;
|
|
1316
|
-
} else if (options.warnings === false && options.tolerant === false) {
|
|
1317
|
-
options.relaxed = false;
|
|
1318
|
-
} else {
|
|
1319
|
-
options.relaxed = true;
|
|
1320
|
-
}
|
|
1321
|
-
}
|
|
1322
|
-
options.tolerant = options.tolerant || options.warnings;
|
|
1323
|
-
options.duplicate = (_a = options.duplicate) != null ? _a : false;
|
|
1324
|
-
return options;
|
|
1325
|
-
}
|
|
1326
|
-
function createParseState(options) {
|
|
1327
|
-
var _a, _b;
|
|
1328
|
-
return {
|
|
1329
|
-
pos: 0,
|
|
1330
|
-
reviver: options.reviver,
|
|
1331
|
-
tolerant: (_a = options.tolerant) != null ? _a : false,
|
|
1332
|
-
duplicate: (_b = options.duplicate) != null ? _b : false,
|
|
1333
|
-
warnings: []
|
|
1334
|
-
};
|
|
1335
|
-
}
|
|
1336
|
-
function parseWithCustomParser(text, options) {
|
|
1337
|
-
const lexerToUse = options.relaxed ? lexer : strictLexer;
|
|
1338
|
-
let tokens = lexerToUse(text);
|
|
1339
|
-
if (options.relaxed) {
|
|
1340
|
-
tokens = stripTrailingComma(tokens);
|
|
1341
|
-
}
|
|
1342
|
-
tokens = tokens.filter((token) => token.type !== " ");
|
|
1343
|
-
const state = createParseState(options);
|
|
1344
|
-
return parseAny(tokens, state, true);
|
|
1345
|
-
}
|
|
1346
|
-
function parseWithTransform(text, options) {
|
|
1347
|
-
let tokens = lexer(text);
|
|
1348
|
-
tokens = stripTrailingComma(tokens);
|
|
1349
|
-
const newtext = tokens.reduce((str, token) => str + token.match, "");
|
|
1350
|
-
return JSON.parse(
|
|
1351
|
-
newtext,
|
|
1352
|
-
options.reviver
|
|
1353
|
-
);
|
|
1354
|
-
}
|
|
1355
|
-
function parse2(text, optsOrReviver) {
|
|
1356
|
-
const options = normalizeParseOptions(optsOrReviver);
|
|
1357
|
-
if (!(options.relaxed || options.warnings || options.tolerant) && options.duplicate) {
|
|
1358
|
-
return JSON.parse(
|
|
1359
|
-
text,
|
|
1360
|
-
options.reviver
|
|
1361
|
-
);
|
|
1362
|
-
}
|
|
1363
|
-
if (options.warnings || options.tolerant || !options.duplicate) {
|
|
1364
|
-
return parseWithCustomParser(text, options);
|
|
1365
|
-
}
|
|
1366
|
-
return parseWithTransform(text, options);
|
|
1367
|
-
}
|
|
1368
|
-
function stringifyPair(obj, key) {
|
|
1369
|
-
return `${JSON.stringify(key)}:${stringify(obj[key])}`;
|
|
1370
|
-
}
|
|
1371
|
-
function stringify(obj) {
|
|
1372
|
-
const type = typeof obj;
|
|
1373
|
-
if (type === "string" || type === "number" || type === "boolean" || obj === null) {
|
|
1374
|
-
return JSON.stringify(obj);
|
|
1375
|
-
}
|
|
1376
|
-
if (type === "undefined") {
|
|
1377
|
-
return "null";
|
|
1378
|
-
}
|
|
1379
|
-
if (Array.isArray(obj)) {
|
|
1380
|
-
const elements = obj.map(stringify).join(",");
|
|
1381
|
-
return `[${elements}]`;
|
|
1382
|
-
}
|
|
1383
|
-
if (type === "object") {
|
|
1384
|
-
const keys = Object.keys(obj);
|
|
1385
|
-
keys.sort();
|
|
1386
|
-
const pairs = keys.map((key) => stringifyPair(obj, key)).join(",");
|
|
1387
|
-
return `{${pairs}}`;
|
|
1388
|
-
}
|
|
1389
|
-
return "null";
|
|
1390
|
-
}
|
|
1391
|
-
|
|
1392
729
|
// src/core/protocols/json-protocol.ts
|
|
1393
730
|
function processToolCallJson(toolCallJson, fullMatch, processedElements, options) {
|
|
1394
731
|
var _a, _b;
|
|
1395
732
|
try {
|
|
1396
|
-
const parsedToolCall =
|
|
733
|
+
const parsedToolCall = (0, import_rjson.parse)(toolCallJson);
|
|
1397
734
|
processedElements.push({
|
|
1398
735
|
type: "tool-call",
|
|
1399
736
|
toolCallId: generateId(),
|
|
@@ -1521,7 +858,7 @@ function emitToolCall(context) {
|
|
|
1521
858
|
var _a, _b;
|
|
1522
859
|
const { state, controller, toolCallStart, toolCallEnd, options } = context;
|
|
1523
860
|
try {
|
|
1524
|
-
const parsedToolCall =
|
|
861
|
+
const parsedToolCall = (0, import_rjson.parse)(state.currentToolCallJson);
|
|
1525
862
|
closeTextBlock(state, controller);
|
|
1526
863
|
controller.enqueue({
|
|
1527
864
|
type: "tool-call",
|
|
@@ -1717,7 +1054,7 @@ function isTCMProtocolFactory(protocol) {
|
|
|
1717
1054
|
var import_rxml2 = require("@ai-sdk-tool/rxml");
|
|
1718
1055
|
var defaultPipelineConfig2 = defaultPipelineConfig;
|
|
1719
1056
|
var NAME_CHAR_RE2 = /[A-Za-z0-9_:-]/;
|
|
1720
|
-
var
|
|
1057
|
+
var WHITESPACE_REGEX2 = /\s/;
|
|
1721
1058
|
function getToolSchema(tools, toolName) {
|
|
1722
1059
|
var _a;
|
|
1723
1060
|
return (_a = tools.find((t) => t.name === toolName)) == null ? void 0 : _a.inputSchema;
|
|
@@ -1891,7 +1228,7 @@ function consumeClosingTag(text, lt) {
|
|
|
1891
1228
|
}
|
|
1892
1229
|
function consumeOpenTag(text, lt) {
|
|
1893
1230
|
let p = lt + 1;
|
|
1894
|
-
while (p < text.length &&
|
|
1231
|
+
while (p < text.length && WHITESPACE_REGEX2.test(text[p])) {
|
|
1895
1232
|
p += 1;
|
|
1896
1233
|
}
|
|
1897
1234
|
const nameStart = p;
|
|
@@ -1904,7 +1241,7 @@ function consumeOpenTag(text, lt) {
|
|
|
1904
1241
|
return null;
|
|
1905
1242
|
}
|
|
1906
1243
|
let r = q - 1;
|
|
1907
|
-
while (r >= nameStart &&
|
|
1244
|
+
while (r >= nameStart && WHITESPACE_REGEX2.test(text[r])) {
|
|
1908
1245
|
r -= 1;
|
|
1909
1246
|
}
|
|
1910
1247
|
const selfClosing = text[r] === "/";
|
|
@@ -1933,7 +1270,7 @@ function nextTagToken(text, fromPos) {
|
|
|
1933
1270
|
if (next === "/") {
|
|
1934
1271
|
const closing = consumeClosingTag(text, lt);
|
|
1935
1272
|
let p = lt + 2;
|
|
1936
|
-
while (p < text.length &&
|
|
1273
|
+
while (p < text.length && WHITESPACE_REGEX2.test(text[p])) {
|
|
1937
1274
|
p += 1;
|
|
1938
1275
|
}
|
|
1939
1276
|
const nameStart = p;
|
|
@@ -2381,7 +1718,7 @@ var xmlProtocol = (protocolOptions) => {
|
|
|
2381
1718
|
// src/core/protocols/yaml-protocol.ts
|
|
2382
1719
|
var import_yaml = __toESM(require("yaml"), 1);
|
|
2383
1720
|
var NAME_CHAR_RE3 = /[A-Za-z0-9_:-]/;
|
|
2384
|
-
var
|
|
1721
|
+
var WHITESPACE_REGEX3 = /\s/;
|
|
2385
1722
|
var LEADING_WHITESPACE_RE = /^(\s*)/;
|
|
2386
1723
|
function findClosingTagEnd(text, contentStart, toolName) {
|
|
2387
1724
|
let pos = contentStart;
|
|
@@ -2398,7 +1735,7 @@ function findClosingTagEnd(text, contentStart, toolName) {
|
|
|
2398
1735
|
break;
|
|
2399
1736
|
}
|
|
2400
1737
|
let p = ltIdx + 2;
|
|
2401
|
-
while (p < gtIdx &&
|
|
1738
|
+
while (p < gtIdx && WHITESPACE_REGEX3.test(text[p])) {
|
|
2402
1739
|
p++;
|
|
2403
1740
|
}
|
|
2404
1741
|
const nameStart = p;
|
|
@@ -2418,7 +1755,7 @@ function findClosingTagEnd(text, contentStart, toolName) {
|
|
|
2418
1755
|
pos = gtIdx === -1 ? text.length : gtIdx + 1;
|
|
2419
1756
|
} else {
|
|
2420
1757
|
let p = ltIdx + 1;
|
|
2421
|
-
while (p < text.length &&
|
|
1758
|
+
while (p < text.length && WHITESPACE_REGEX3.test(text[p])) {
|
|
2422
1759
|
p++;
|
|
2423
1760
|
}
|
|
2424
1761
|
const nameStart = p;
|
|
@@ -2431,7 +1768,7 @@ function findClosingTagEnd(text, contentStart, toolName) {
|
|
|
2431
1768
|
break;
|
|
2432
1769
|
}
|
|
2433
1770
|
let r = gtIdx - 1;
|
|
2434
|
-
while (r >= nameStart &&
|
|
1771
|
+
while (r >= nameStart && WHITESPACE_REGEX3.test(text[r])) {
|
|
2435
1772
|
r--;
|
|
2436
1773
|
}
|
|
2437
1774
|
const selfClosing = text[r] === "/";
|
|
@@ -3838,17 +3175,15 @@ var yamlToolMiddleware = createToolMiddleware({
|
|
|
3838
3175
|
mergePipelineConfigs,
|
|
3839
3176
|
normalizeCloseTagsHeuristic,
|
|
3840
3177
|
originalToolsSchema,
|
|
3841
|
-
parse,
|
|
3842
3178
|
repairAgainstSchemaHeuristic,
|
|
3843
|
-
stringify,
|
|
3844
3179
|
toolChoiceStream,
|
|
3845
|
-
transform,
|
|
3846
3180
|
transformParams,
|
|
3847
3181
|
wrapGenerate,
|
|
3848
3182
|
wrapStream,
|
|
3849
3183
|
xmlProtocol,
|
|
3850
3184
|
xmlToolMiddleware,
|
|
3851
3185
|
yamlProtocol,
|
|
3852
|
-
yamlToolMiddleware
|
|
3186
|
+
yamlToolMiddleware,
|
|
3187
|
+
...require("@ai-sdk-tool/rjson")
|
|
3853
3188
|
});
|
|
3854
3189
|
//# sourceMappingURL=index.cjs.map
|