@reverbia/sdk 1.0.0-next.20251202092727 → 1.0.0-next.20251202130234
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +26 -2
- package/dist/index.d.mts +260 -2
- package/dist/index.d.ts +260 -2
- package/dist/index.mjs +23 -1
- package/dist/react/index.cjs +319 -27
- package/dist/react/index.d.mts +160 -24
- package/dist/react/index.d.ts +160 -24
- package/dist/react/index.mjs +316 -27
- package/dist/vercel/index.cjs +1 -1
- package/dist/vercel/index.d.mts +25 -1
- package/dist/vercel/index.d.ts +25 -1
- package/dist/vercel/index.mjs +1 -1
- package/package.json +2 -2
package/dist/react/index.mjs
CHANGED
|
@@ -821,9 +821,32 @@ var client = createClient(createClientConfig(createConfig()));
|
|
|
821
821
|
// src/lib/chat/constants.ts
|
|
822
822
|
var DEFAULT_LOCAL_CHAT_MODEL = "onnx-community/Qwen2.5-0.5B-Instruct";
|
|
823
823
|
|
|
824
|
-
// src/lib/chat/
|
|
825
|
-
var
|
|
824
|
+
// src/lib/chat/pipeline.ts
|
|
825
|
+
var sharedPipeline = null;
|
|
826
826
|
var currentModel = null;
|
|
827
|
+
var currentDevice = null;
|
|
828
|
+
async function getTextGenerationPipeline(options) {
|
|
829
|
+
const { model, device = "wasm", dtype = "q4" } = options;
|
|
830
|
+
if (sharedPipeline && currentModel === model && currentDevice === device) {
|
|
831
|
+
return sharedPipeline;
|
|
832
|
+
}
|
|
833
|
+
const { pipeline, env } = await import("./transformers.node-BSHUG7OY.mjs");
|
|
834
|
+
env.allowLocalModels = false;
|
|
835
|
+
if (env.backends?.onnx) {
|
|
836
|
+
env.backends.onnx.logLevel = "fatal";
|
|
837
|
+
}
|
|
838
|
+
console.log(`[Pipeline] Loading model: ${model} on ${device}...`);
|
|
839
|
+
sharedPipeline = await pipeline("text-generation", model, {
|
|
840
|
+
dtype,
|
|
841
|
+
device
|
|
842
|
+
});
|
|
843
|
+
currentModel = model;
|
|
844
|
+
currentDevice = device;
|
|
845
|
+
console.log(`[Pipeline] Model loaded: ${model}`);
|
|
846
|
+
return sharedPipeline;
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
// src/lib/chat/generation.ts
|
|
827
850
|
async function generateLocalChatCompletion(messages, options = {}) {
|
|
828
851
|
const {
|
|
829
852
|
model = DEFAULT_LOCAL_CHAT_MODEL,
|
|
@@ -833,13 +856,12 @@ async function generateLocalChatCompletion(messages, options = {}) {
|
|
|
833
856
|
onToken,
|
|
834
857
|
signal
|
|
835
858
|
} = options;
|
|
836
|
-
const {
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
}
|
|
859
|
+
const { TextStreamer } = await import("./transformers.node-BSHUG7OY.mjs");
|
|
860
|
+
const chatPipeline = await getTextGenerationPipeline({
|
|
861
|
+
model,
|
|
862
|
+
device: "wasm",
|
|
863
|
+
dtype: "q4"
|
|
864
|
+
});
|
|
843
865
|
class CallbackStreamer extends TextStreamer {
|
|
844
866
|
constructor(tokenizer, cb) {
|
|
845
867
|
super(tokenizer, {
|
|
@@ -866,6 +888,148 @@ async function generateLocalChatCompletion(messages, options = {}) {
|
|
|
866
888
|
return output;
|
|
867
889
|
}
|
|
868
890
|
|
|
891
|
+
// src/lib/tools/selector.ts
|
|
892
|
+
var DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
|
|
893
|
+
function buildToolSelectionPrompt(userMessage, tools) {
|
|
894
|
+
const toolList = tools.map((t) => `${t.name} (${t.description})`).join("\n");
|
|
895
|
+
return `Pick the best tool for the task. Reply with ONLY the tool name.
|
|
896
|
+
|
|
897
|
+
Available tools:
|
|
898
|
+
${toolList}
|
|
899
|
+
none (no tool needed)
|
|
900
|
+
|
|
901
|
+
Task: "${userMessage}"
|
|
902
|
+
|
|
903
|
+
Best tool:`;
|
|
904
|
+
}
|
|
905
|
+
function extractParams(userMessage, tool) {
|
|
906
|
+
const params = {};
|
|
907
|
+
if (!tool.parameters) return params;
|
|
908
|
+
for (const param of tool.parameters) {
|
|
909
|
+
if (param.name === "expression" || param.name === "query") {
|
|
910
|
+
params[param.name] = userMessage;
|
|
911
|
+
} else if (param.name === "location" || param.name === "city") {
|
|
912
|
+
const words = userMessage.split(/\s+/);
|
|
913
|
+
const capitalizedWords = words.filter(
|
|
914
|
+
(w) => w.length > 1 && w[0] === w[0].toUpperCase()
|
|
915
|
+
);
|
|
916
|
+
params[param.name] = capitalizedWords.length > 0 ? capitalizedWords.join(" ") : userMessage;
|
|
917
|
+
} else if (param.name === "text" || param.name === "input") {
|
|
918
|
+
params[param.name] = userMessage;
|
|
919
|
+
} else {
|
|
920
|
+
params[param.name] = userMessage;
|
|
921
|
+
}
|
|
922
|
+
}
|
|
923
|
+
return params;
|
|
924
|
+
}
|
|
925
|
+
function parseToolSelectionResponse(response, tools, userMessage) {
|
|
926
|
+
console.log("[Tool Selector] Raw response:", response);
|
|
927
|
+
const cleaned = response.toLowerCase().trim().split(/[\s\n,.]+/)[0].replace(/[^a-z0-9_-]/g, "");
|
|
928
|
+
console.log("[Tool Selector] Parsed tool name:", cleaned);
|
|
929
|
+
if (cleaned === "none" || cleaned === "null" || cleaned === "") {
|
|
930
|
+
console.log("[Tool Selector] No tool selected");
|
|
931
|
+
return { toolSelected: false };
|
|
932
|
+
}
|
|
933
|
+
const selectedTool = tools.find((t) => t.name.toLowerCase() === cleaned);
|
|
934
|
+
if (!selectedTool) {
|
|
935
|
+
const fuzzyTool = tools.find(
|
|
936
|
+
(t) => t.name.toLowerCase().includes(cleaned) || cleaned.includes(t.name.toLowerCase())
|
|
937
|
+
);
|
|
938
|
+
if (fuzzyTool) {
|
|
939
|
+
console.log(`[Tool Selector] Fuzzy matched tool: ${fuzzyTool.name}`);
|
|
940
|
+
const params2 = extractParams(userMessage, fuzzyTool);
|
|
941
|
+
return {
|
|
942
|
+
toolSelected: true,
|
|
943
|
+
toolName: fuzzyTool.name,
|
|
944
|
+
parameters: params2,
|
|
945
|
+
confidence: 0.6
|
|
946
|
+
};
|
|
947
|
+
}
|
|
948
|
+
console.warn(`[Tool Selector] Unknown tool: ${cleaned}`);
|
|
949
|
+
return { toolSelected: false };
|
|
950
|
+
}
|
|
951
|
+
const params = extractParams(userMessage, selectedTool);
|
|
952
|
+
console.log(`[Tool Selector] Selected tool: ${selectedTool.name}`, params);
|
|
953
|
+
return {
|
|
954
|
+
toolSelected: true,
|
|
955
|
+
toolName: selectedTool.name,
|
|
956
|
+
parameters: params,
|
|
957
|
+
confidence: 0.9
|
|
958
|
+
};
|
|
959
|
+
}
|
|
960
|
+
async function selectTool(userMessage, tools, options = {}) {
|
|
961
|
+
const {
|
|
962
|
+
model = DEFAULT_TOOL_SELECTOR_MODEL,
|
|
963
|
+
signal,
|
|
964
|
+
device = "wasm"
|
|
965
|
+
} = options;
|
|
966
|
+
if (!tools.length) {
|
|
967
|
+
return { toolSelected: false };
|
|
968
|
+
}
|
|
969
|
+
console.log(
|
|
970
|
+
`[Tool Selector] analyzing message: "${userMessage}" with model ${model}`
|
|
971
|
+
);
|
|
972
|
+
try {
|
|
973
|
+
const selectorPipeline = await getTextGenerationPipeline({
|
|
974
|
+
model,
|
|
975
|
+
device,
|
|
976
|
+
dtype: "q4"
|
|
977
|
+
// Aggressive quantization for speed
|
|
978
|
+
});
|
|
979
|
+
const prompt = buildToolSelectionPrompt(userMessage, tools);
|
|
980
|
+
const output = await selectorPipeline(prompt, {
|
|
981
|
+
max_new_tokens: 4,
|
|
982
|
+
// Just need the tool name
|
|
983
|
+
temperature: 0,
|
|
984
|
+
// Deterministic
|
|
985
|
+
do_sample: false,
|
|
986
|
+
return_full_text: false
|
|
987
|
+
});
|
|
988
|
+
if (signal?.aborted) {
|
|
989
|
+
return { toolSelected: false };
|
|
990
|
+
}
|
|
991
|
+
const generatedText = output?.[0]?.generated_text || output?.generated_text || "";
|
|
992
|
+
return parseToolSelectionResponse(generatedText, tools, userMessage);
|
|
993
|
+
} catch (error) {
|
|
994
|
+
console.error("[Tool Selector] Error:", error);
|
|
995
|
+
return { toolSelected: false };
|
|
996
|
+
}
|
|
997
|
+
}
|
|
998
|
+
var preloadPromise = null;
|
|
999
|
+
async function preloadToolSelectorModel(options = {}) {
|
|
1000
|
+
if (preloadPromise) {
|
|
1001
|
+
return preloadPromise;
|
|
1002
|
+
}
|
|
1003
|
+
const { model = DEFAULT_TOOL_SELECTOR_MODEL, device = "wasm" } = options;
|
|
1004
|
+
console.log(`[Tool Selector] Preloading model: ${model}`);
|
|
1005
|
+
preloadPromise = getTextGenerationPipeline({
|
|
1006
|
+
model,
|
|
1007
|
+
device,
|
|
1008
|
+
dtype: "q4"
|
|
1009
|
+
}).then(() => {
|
|
1010
|
+
console.log(`[Tool Selector] Model preloaded: ${model}`);
|
|
1011
|
+
}).catch((error) => {
|
|
1012
|
+
console.warn("[Tool Selector] Failed to preload model:", error);
|
|
1013
|
+
preloadPromise = null;
|
|
1014
|
+
});
|
|
1015
|
+
return preloadPromise;
|
|
1016
|
+
}
|
|
1017
|
+
async function executeTool(tool, params) {
|
|
1018
|
+
try {
|
|
1019
|
+
console.log(
|
|
1020
|
+
`[Tool Selector] Executing tool ${tool.name} with params:`,
|
|
1021
|
+
params
|
|
1022
|
+
);
|
|
1023
|
+
const result = await tool.execute(params);
|
|
1024
|
+
console.log(`[Tool Selector] Tool ${tool.name} execution result:`, result);
|
|
1025
|
+
return { success: true, result };
|
|
1026
|
+
} catch (error) {
|
|
1027
|
+
const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
|
|
1028
|
+
console.error(`[Tool Selector] Tool ${tool.name} failed:`, errorMessage);
|
|
1029
|
+
return { success: false, error: errorMessage };
|
|
1030
|
+
}
|
|
1031
|
+
}
|
|
1032
|
+
|
|
869
1033
|
// src/react/useChat.ts
|
|
870
1034
|
function useChat(options) {
|
|
871
1035
|
const {
|
|
@@ -875,9 +1039,13 @@ function useChat(options) {
|
|
|
875
1039
|
onFinish,
|
|
876
1040
|
onError,
|
|
877
1041
|
chatProvider = "api",
|
|
878
|
-
localModel = DEFAULT_LOCAL_CHAT_MODEL
|
|
1042
|
+
localModel = DEFAULT_LOCAL_CHAT_MODEL,
|
|
1043
|
+
tools,
|
|
1044
|
+
toolSelectorModel = DEFAULT_TOOL_SELECTOR_MODEL,
|
|
1045
|
+
onToolExecution
|
|
879
1046
|
} = options || {};
|
|
880
1047
|
const [isLoading, setIsLoading] = useState(false);
|
|
1048
|
+
const [isSelectingTool, setIsSelectingTool] = useState(false);
|
|
881
1049
|
const abortControllerRef = useRef(null);
|
|
882
1050
|
const stop = useCallback(() => {
|
|
883
1051
|
if (abortControllerRef.current) {
|
|
@@ -893,11 +1061,17 @@ function useChat(options) {
|
|
|
893
1061
|
}
|
|
894
1062
|
};
|
|
895
1063
|
}, []);
|
|
1064
|
+
useEffect(() => {
|
|
1065
|
+
if (tools && tools.length > 0) {
|
|
1066
|
+
preloadToolSelectorModel({ model: toolSelectorModel });
|
|
1067
|
+
}
|
|
1068
|
+
}, [tools, toolSelectorModel]);
|
|
896
1069
|
const sendMessage = useCallback(
|
|
897
1070
|
async ({
|
|
898
1071
|
messages,
|
|
899
1072
|
model,
|
|
900
|
-
onData
|
|
1073
|
+
onData,
|
|
1074
|
+
runTools = true
|
|
901
1075
|
}) => {
|
|
902
1076
|
if (!messages?.length) {
|
|
903
1077
|
const errorMsg = "messages are required to call sendMessage.";
|
|
@@ -910,13 +1084,84 @@ function useChat(options) {
|
|
|
910
1084
|
const abortController = new AbortController();
|
|
911
1085
|
abortControllerRef.current = abortController;
|
|
912
1086
|
setIsLoading(true);
|
|
1087
|
+
let toolExecutionResult;
|
|
1088
|
+
let messagesWithToolContext = messages;
|
|
1089
|
+
if (runTools && tools && tools.length > 0) {
|
|
1090
|
+
const lastUserMessage = [...messages].reverse().find((m) => m.role === "user");
|
|
1091
|
+
if (lastUserMessage?.content) {
|
|
1092
|
+
setIsSelectingTool(true);
|
|
1093
|
+
const contentString = lastUserMessage.content?.map((part) => part.text || "").join("") || "";
|
|
1094
|
+
try {
|
|
1095
|
+
const selectionResult = await selectTool(contentString, tools, {
|
|
1096
|
+
model: toolSelectorModel,
|
|
1097
|
+
signal: abortController.signal
|
|
1098
|
+
});
|
|
1099
|
+
if (selectionResult.toolSelected && selectionResult.toolName) {
|
|
1100
|
+
const selectedTool = tools.find(
|
|
1101
|
+
(t) => t.name === selectionResult.toolName
|
|
1102
|
+
);
|
|
1103
|
+
if (selectedTool) {
|
|
1104
|
+
const execResult = await executeTool(
|
|
1105
|
+
selectedTool,
|
|
1106
|
+
selectionResult.parameters || {}
|
|
1107
|
+
);
|
|
1108
|
+
toolExecutionResult = {
|
|
1109
|
+
toolName: selectionResult.toolName,
|
|
1110
|
+
success: execResult.success,
|
|
1111
|
+
result: execResult.result,
|
|
1112
|
+
error: execResult.error
|
|
1113
|
+
};
|
|
1114
|
+
if (onToolExecution) {
|
|
1115
|
+
onToolExecution(toolExecutionResult);
|
|
1116
|
+
}
|
|
1117
|
+
if (toolExecutionResult.success && toolExecutionResult.result !== void 0) {
|
|
1118
|
+
const toolResultContext = {
|
|
1119
|
+
role: "system",
|
|
1120
|
+
content: [
|
|
1121
|
+
{
|
|
1122
|
+
type: "text",
|
|
1123
|
+
text: `Tool "${toolExecutionResult.toolName}" was executed with the following result:
|
|
1124
|
+
${JSON.stringify(
|
|
1125
|
+
toolExecutionResult.result,
|
|
1126
|
+
null,
|
|
1127
|
+
2
|
|
1128
|
+
)}
|
|
1129
|
+
|
|
1130
|
+
Use this information to respond to the user's request.`
|
|
1131
|
+
}
|
|
1132
|
+
]
|
|
1133
|
+
};
|
|
1134
|
+
messagesWithToolContext = [...messages, toolResultContext];
|
|
1135
|
+
} else if (toolExecutionResult.error) {
|
|
1136
|
+
const toolErrorContext = {
|
|
1137
|
+
role: "system",
|
|
1138
|
+
content: [
|
|
1139
|
+
{
|
|
1140
|
+
type: "text",
|
|
1141
|
+
text: `Tool "${toolExecutionResult.toolName}" was executed but encountered an error: ${toolExecutionResult.error}
|
|
1142
|
+
|
|
1143
|
+
Please inform the user about this issue and try to help them alternatively.`
|
|
1144
|
+
}
|
|
1145
|
+
]
|
|
1146
|
+
};
|
|
1147
|
+
messagesWithToolContext = [...messages, toolErrorContext];
|
|
1148
|
+
}
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
} catch (err) {
|
|
1152
|
+
console.warn("Tool selection error:", err);
|
|
1153
|
+
} finally {
|
|
1154
|
+
setIsSelectingTool(false);
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
}
|
|
913
1158
|
try {
|
|
914
1159
|
if (chatProvider === "local") {
|
|
915
1160
|
let accumulatedContent = "";
|
|
916
1161
|
const usedModel = localModel;
|
|
917
|
-
const formattedMessages =
|
|
1162
|
+
const formattedMessages = messagesWithToolContext.map((m) => ({
|
|
918
1163
|
role: m.role || "user",
|
|
919
|
-
content: m.content || ""
|
|
1164
|
+
content: m.content?.map((p) => p.text || "").join("") || ""
|
|
920
1165
|
}));
|
|
921
1166
|
await generateLocalChatCompletion(formattedMessages, {
|
|
922
1167
|
model: usedModel,
|
|
@@ -935,7 +1180,7 @@ function useChat(options) {
|
|
|
935
1180
|
index: 0,
|
|
936
1181
|
message: {
|
|
937
1182
|
role: "assistant",
|
|
938
|
-
content: accumulatedContent
|
|
1183
|
+
content: [{ type: "text", text: accumulatedContent }]
|
|
939
1184
|
},
|
|
940
1185
|
finish_reason: "stop"
|
|
941
1186
|
}
|
|
@@ -951,30 +1196,46 @@ function useChat(options) {
|
|
|
951
1196
|
if (onFinish) {
|
|
952
1197
|
onFinish(completion);
|
|
953
1198
|
}
|
|
954
|
-
return {
|
|
1199
|
+
return {
|
|
1200
|
+
data: completion,
|
|
1201
|
+
error: null,
|
|
1202
|
+
toolExecution: toolExecutionResult
|
|
1203
|
+
};
|
|
955
1204
|
} else {
|
|
956
1205
|
if (!model) {
|
|
957
1206
|
const errorMsg = "model is required to call sendMessage.";
|
|
958
1207
|
if (onError) onError(new Error(errorMsg));
|
|
959
|
-
return {
|
|
1208
|
+
return {
|
|
1209
|
+
data: null,
|
|
1210
|
+
error: errorMsg,
|
|
1211
|
+
toolExecution: toolExecutionResult
|
|
1212
|
+
};
|
|
960
1213
|
}
|
|
961
1214
|
if (!getToken) {
|
|
962
1215
|
const errorMsg = "Token getter function is required.";
|
|
963
1216
|
if (onError) onError(new Error(errorMsg));
|
|
964
|
-
return {
|
|
1217
|
+
return {
|
|
1218
|
+
data: null,
|
|
1219
|
+
error: errorMsg,
|
|
1220
|
+
toolExecution: toolExecutionResult
|
|
1221
|
+
};
|
|
965
1222
|
}
|
|
966
1223
|
const token = await getToken();
|
|
967
1224
|
if (!token) {
|
|
968
1225
|
const errorMsg = "No access token available.";
|
|
969
1226
|
setIsLoading(false);
|
|
970
1227
|
if (onError) onError(new Error(errorMsg));
|
|
971
|
-
return {
|
|
1228
|
+
return {
|
|
1229
|
+
data: null,
|
|
1230
|
+
error: errorMsg,
|
|
1231
|
+
toolExecution: toolExecutionResult
|
|
1232
|
+
};
|
|
972
1233
|
}
|
|
973
1234
|
const sseResult = await client.sse.post({
|
|
974
1235
|
baseUrl,
|
|
975
1236
|
url: "/api/v1/chat/completions",
|
|
976
1237
|
body: {
|
|
977
|
-
messages,
|
|
1238
|
+
messages: messagesWithToolContext,
|
|
978
1239
|
model,
|
|
979
1240
|
stream: true
|
|
980
1241
|
},
|
|
@@ -1033,7 +1294,7 @@ function useChat(options) {
|
|
|
1033
1294
|
index: 0,
|
|
1034
1295
|
message: {
|
|
1035
1296
|
role: "assistant",
|
|
1036
|
-
content: accumulatedContent
|
|
1297
|
+
content: [{ type: "text", text: accumulatedContent }]
|
|
1037
1298
|
},
|
|
1038
1299
|
finish_reason: finishReason
|
|
1039
1300
|
}
|
|
@@ -1044,12 +1305,20 @@ function useChat(options) {
|
|
|
1044
1305
|
if (onFinish) {
|
|
1045
1306
|
onFinish(completion);
|
|
1046
1307
|
}
|
|
1047
|
-
return {
|
|
1308
|
+
return {
|
|
1309
|
+
data: completion,
|
|
1310
|
+
error: null,
|
|
1311
|
+
toolExecution: toolExecutionResult
|
|
1312
|
+
};
|
|
1048
1313
|
}
|
|
1049
1314
|
} catch (err) {
|
|
1050
1315
|
if (err instanceof Error && err.name === "AbortError") {
|
|
1051
1316
|
setIsLoading(false);
|
|
1052
|
-
return {
|
|
1317
|
+
return {
|
|
1318
|
+
data: null,
|
|
1319
|
+
error: "Request aborted",
|
|
1320
|
+
toolExecution: toolExecutionResult
|
|
1321
|
+
};
|
|
1053
1322
|
}
|
|
1054
1323
|
const errorMsg = err instanceof Error ? err.message : "Failed to send message.";
|
|
1055
1324
|
const errorObj = err instanceof Error ? err : new Error(errorMsg);
|
|
@@ -1057,7 +1326,11 @@ function useChat(options) {
|
|
|
1057
1326
|
if (onError) {
|
|
1058
1327
|
onError(errorObj);
|
|
1059
1328
|
}
|
|
1060
|
-
return {
|
|
1329
|
+
return {
|
|
1330
|
+
data: null,
|
|
1331
|
+
error: errorMsg,
|
|
1332
|
+
toolExecution: toolExecutionResult
|
|
1333
|
+
};
|
|
1061
1334
|
} finally {
|
|
1062
1335
|
if (abortControllerRef.current === abortController) {
|
|
1063
1336
|
abortControllerRef.current = null;
|
|
@@ -1071,11 +1344,15 @@ function useChat(options) {
|
|
|
1071
1344
|
onFinish,
|
|
1072
1345
|
onError,
|
|
1073
1346
|
chatProvider,
|
|
1074
|
-
localModel
|
|
1347
|
+
localModel,
|
|
1348
|
+
tools,
|
|
1349
|
+
toolSelectorModel,
|
|
1350
|
+
onToolExecution
|
|
1075
1351
|
]
|
|
1076
1352
|
);
|
|
1077
1353
|
return {
|
|
1078
1354
|
isLoading,
|
|
1355
|
+
isSelectingTool,
|
|
1079
1356
|
sendMessage,
|
|
1080
1357
|
stop
|
|
1081
1358
|
};
|
|
@@ -1638,9 +1915,12 @@ function useMemory(options = {}) {
|
|
|
1638
1915
|
messages: [
|
|
1639
1916
|
{
|
|
1640
1917
|
role: "system",
|
|
1641
|
-
content: FACT_EXTRACTION_PROMPT
|
|
1918
|
+
content: [{ type: "text", text: FACT_EXTRACTION_PROMPT }]
|
|
1642
1919
|
},
|
|
1643
|
-
...messages
|
|
1920
|
+
...messages.map((m) => ({
|
|
1921
|
+
role: m.role,
|
|
1922
|
+
content: [{ type: "text", text: m.content }]
|
|
1923
|
+
}))
|
|
1644
1924
|
],
|
|
1645
1925
|
model: model || completionsModel
|
|
1646
1926
|
},
|
|
@@ -1661,7 +1941,13 @@ function useMemory(options = {}) {
|
|
|
1661
1941
|
);
|
|
1662
1942
|
return null;
|
|
1663
1943
|
}
|
|
1664
|
-
const
|
|
1944
|
+
const messageContent = completion.data.choices?.[0]?.message?.content;
|
|
1945
|
+
let content = "";
|
|
1946
|
+
if (Array.isArray(messageContent)) {
|
|
1947
|
+
content = messageContent.map((p) => p.text || "").join("").trim();
|
|
1948
|
+
} else if (typeof messageContent === "string") {
|
|
1949
|
+
content = messageContent.trim();
|
|
1950
|
+
}
|
|
1665
1951
|
if (!content) {
|
|
1666
1952
|
console.error("No content in memory extraction response");
|
|
1667
1953
|
return null;
|
|
@@ -1997,12 +2283,15 @@ var extractConversationContext = (messages, maxMessages = 3) => {
|
|
|
1997
2283
|
return userMessages.trim();
|
|
1998
2284
|
};
|
|
1999
2285
|
export {
|
|
2286
|
+
DEFAULT_TOOL_SELECTOR_MODEL,
|
|
2000
2287
|
createMemoryContextSystemMessage,
|
|
2001
2288
|
decryptData,
|
|
2002
2289
|
decryptDataBytes,
|
|
2003
2290
|
encryptData,
|
|
2291
|
+
executeTool,
|
|
2004
2292
|
extractConversationContext,
|
|
2005
2293
|
formatMemoriesForChat,
|
|
2294
|
+
selectTool,
|
|
2006
2295
|
useChat,
|
|
2007
2296
|
useEncryption,
|
|
2008
2297
|
useMemory,
|
package/dist/vercel/index.cjs
CHANGED
package/dist/vercel/index.d.mts
CHANGED
|
@@ -7,9 +7,33 @@ type LlmapiMessage = {
|
|
|
7
7
|
/**
|
|
8
8
|
* Content is the message content
|
|
9
9
|
*/
|
|
10
|
-
content?:
|
|
10
|
+
content?: Array<LlmapiMessageContentPart>;
|
|
11
11
|
role?: LlmapiRole;
|
|
12
12
|
};
|
|
13
|
+
/**
|
|
14
|
+
* ImageURL is used when Type=image_url
|
|
15
|
+
*/
|
|
16
|
+
type LlmapiMessageContentImage = {
|
|
17
|
+
/**
|
|
18
|
+
* Detail is the OpenAI detail hint (auto|low|high)
|
|
19
|
+
*/
|
|
20
|
+
detail?: string;
|
|
21
|
+
/**
|
|
22
|
+
* URL is the image URL or data URI
|
|
23
|
+
*/
|
|
24
|
+
url?: string;
|
|
25
|
+
};
|
|
26
|
+
type LlmapiMessageContentPart = {
|
|
27
|
+
image_url?: LlmapiMessageContentImage;
|
|
28
|
+
/**
|
|
29
|
+
* Text holds the text content when Type=text
|
|
30
|
+
*/
|
|
31
|
+
text?: string;
|
|
32
|
+
/**
|
|
33
|
+
* Type is the block type (`text` or `image_url`)
|
|
34
|
+
*/
|
|
35
|
+
type?: string;
|
|
36
|
+
};
|
|
13
37
|
/**
|
|
14
38
|
* Role is the message role (system, user, assistant)
|
|
15
39
|
*/
|
package/dist/vercel/index.d.ts
CHANGED
|
@@ -7,9 +7,33 @@ type LlmapiMessage = {
|
|
|
7
7
|
/**
|
|
8
8
|
* Content is the message content
|
|
9
9
|
*/
|
|
10
|
-
content?:
|
|
10
|
+
content?: Array<LlmapiMessageContentPart>;
|
|
11
11
|
role?: LlmapiRole;
|
|
12
12
|
};
|
|
13
|
+
/**
|
|
14
|
+
* ImageURL is used when Type=image_url
|
|
15
|
+
*/
|
|
16
|
+
type LlmapiMessageContentImage = {
|
|
17
|
+
/**
|
|
18
|
+
* Detail is the OpenAI detail hint (auto|low|high)
|
|
19
|
+
*/
|
|
20
|
+
detail?: string;
|
|
21
|
+
/**
|
|
22
|
+
* URL is the image URL or data URI
|
|
23
|
+
*/
|
|
24
|
+
url?: string;
|
|
25
|
+
};
|
|
26
|
+
type LlmapiMessageContentPart = {
|
|
27
|
+
image_url?: LlmapiMessageContentImage;
|
|
28
|
+
/**
|
|
29
|
+
* Text holds the text content when Type=text
|
|
30
|
+
*/
|
|
31
|
+
text?: string;
|
|
32
|
+
/**
|
|
33
|
+
* Type is the block type (`text` or `image_url`)
|
|
34
|
+
*/
|
|
35
|
+
type?: string;
|
|
36
|
+
};
|
|
13
37
|
/**
|
|
14
38
|
* Role is the message role (system, user, assistant)
|
|
15
39
|
*/
|
package/dist/vercel/index.mjs
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@reverbia/sdk",
|
|
3
|
-
"version": "1.0.0-next.
|
|
3
|
+
"version": "1.0.0-next.20251202130234",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "./dist/index.cjs",
|
|
6
6
|
"module": "./dist/index.mjs",
|
|
@@ -57,7 +57,7 @@
|
|
|
57
57
|
"homepage": "https://github.com/zeta-chain/ai-sdk#readme",
|
|
58
58
|
"dependencies": {
|
|
59
59
|
"@huggingface/transformers": "^3.8.0",
|
|
60
|
-
"@reverbia/portal": "1.0.0-next.
|
|
60
|
+
"@reverbia/portal": "1.0.0-next.20251201184846",
|
|
61
61
|
"ai": "5.0.93"
|
|
62
62
|
},
|
|
63
63
|
"devDependencies": {
|