utilitas 1998.2.36 → 1998.2.37
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +45 -53
- package/lib/manifest.mjs +1 -1
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -751,8 +751,7 @@ const streamResp = async (resp, options) => {
|
|
|
751
751
|
|
|
752
752
|
const packGptResp = async (resp, options) => {
|
|
753
753
|
// simple mode is not recommended for streaming responses
|
|
754
|
-
let text = resp.text // ChatGPT / Claude
|
|
755
|
-
|| (Function.isFunction(resp?.text) ? resp.text() : resp?.text) // Gemini
|
|
754
|
+
let text = resp.text // ChatGPT / Claude / Gemini
|
|
756
755
|
|| resp?.message?.content || ''; // Ollama
|
|
757
756
|
const audio = resp?.message?.audio?.data; // ChatGPT audio mode
|
|
758
757
|
if (options?.raw) { return resp; }
|
|
@@ -783,11 +782,11 @@ const handleToolsCall = async (msg, options) => {
|
|
|
783
782
|
toolsResponse = (toolsResponse + m).trim();
|
|
784
783
|
await streamResp({ text: options?.delta ? m : toolsResponse }, options);
|
|
785
784
|
};
|
|
786
|
-
const calls = msg.tool_calls || msg.content || [];
|
|
785
|
+
const calls = msg.tool_calls || msg.content || msg.parts || [];
|
|
787
786
|
if (calls.length) {
|
|
788
787
|
switch (options?.flavor) {
|
|
789
788
|
case CLAUDE: preRes.push(msg); break;
|
|
790
|
-
case GEMINI: preRes.push(
|
|
789
|
+
case GEMINI: preRes.push(msg); break;
|
|
791
790
|
case CHATGPT: default: preRes.push(msg); break;
|
|
792
791
|
}
|
|
793
792
|
for (const fn of calls) {
|
|
@@ -799,11 +798,11 @@ const handleToolsCall = async (msg, options) => {
|
|
|
799
798
|
});
|
|
800
799
|
break;
|
|
801
800
|
case GEMINI:
|
|
802
|
-
input = fn
|
|
801
|
+
input = fn?.functionCall?.args;
|
|
803
802
|
packMsg = (t, e) => ({
|
|
804
803
|
functionResponse: {
|
|
805
|
-
name: fn
|
|
806
|
-
name: fn
|
|
804
|
+
name: fn?.functionCall?.name, response: {
|
|
805
|
+
name: fn?.functionCall?.name, content: e ? `[Error] ${t}` : t,
|
|
807
806
|
}
|
|
808
807
|
}
|
|
809
808
|
});
|
|
@@ -816,7 +815,7 @@ const handleToolsCall = async (msg, options) => {
|
|
|
816
815
|
});
|
|
817
816
|
break;
|
|
818
817
|
}
|
|
819
|
-
const name = fn?.function?.
|
|
818
|
+
const name = (fn?.function || fn?.functionCall || fn)?.name;
|
|
820
819
|
if (!name) { continue; }
|
|
821
820
|
await resp(`\nName: ${name}`);
|
|
822
821
|
const f = tools.find(x => insensitiveCompare(
|
|
@@ -1012,7 +1011,8 @@ const promptClaude = async (content, options = {}) => {
|
|
|
1012
1011
|
);
|
|
1013
1012
|
if (tool_use.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
|
|
1014
1013
|
return await promptClaude(content, {
|
|
1015
|
-
...options, toolsResult: [...options?.toolsResult || [],
|
|
1014
|
+
...options, toolsResult: [...options?.toolsResult || [],
|
|
1015
|
+
...toolsResult], result: toolsResponse,
|
|
1016
1016
|
});
|
|
1017
1017
|
}
|
|
1018
1018
|
return packGptResp({ text: mergeMsgs(toolsResponse, tool_use) }, options);
|
|
@@ -1078,54 +1078,46 @@ const promptGemini = async (content, options = {}) => {
|
|
|
1078
1078
|
options.model = genModel;
|
|
1079
1079
|
const chat = generative.startChat({
|
|
1080
1080
|
history: [
|
|
1081
|
-
...options?.messages && !options?.attachments?.length
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1081
|
+
...options?.messages && !options?.attachments?.length ? options.messages : [],
|
|
1082
|
+
...options?.toolsResult ? [
|
|
1083
|
+
buildGeminiMessage(content, { ...options, history: true }),
|
|
1084
|
+
...options.toolsResult.slice(0, options.toolsResult.length - 1)
|
|
1085
|
+
] : []
|
|
1086
1086
|
], ...generationConfig(options),
|
|
1087
1087
|
});
|
|
1088
|
-
const resp = await chat
|
|
1089
|
-
options?.
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
let [result, references, functionCalls]
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
references = rfc;
|
|
1106
|
-
await ignoreErrFunc(async () => await options.stream(
|
|
1107
|
-
await packGptResp({
|
|
1108
|
-
text: () => options?.delta ? delta : result, references,
|
|
1109
|
-
}, { ...options, processing: true })
|
|
1110
|
-
), LOG);
|
|
1111
|
-
}
|
|
1088
|
+
const resp = await chat.sendMessageStream(
|
|
1089
|
+
options?.toolsResult?.[options?.toolsResult?.length]?.parts
|
|
1090
|
+
|| buildGeminiMessage(content, options)
|
|
1091
|
+
);
|
|
1092
|
+
let [result, references, functionCalls]
|
|
1093
|
+
= [options?.result ?? '', null, null];
|
|
1094
|
+
for await (const chunk of resp.stream) {
|
|
1095
|
+
functionCalls || (functionCalls = chunk.functionCalls);
|
|
1096
|
+
const delta = chunk?.text?.() || '';
|
|
1097
|
+
const rfc = packGeminiReferences(
|
|
1098
|
+
chunk.candidates[0]?.groundingMetadata?.groundingChunks,
|
|
1099
|
+
chunk.candidates[0]?.groundingMetadata?.groundingSupports
|
|
1100
|
+
);
|
|
1101
|
+
if (delta === '' && !rfc) { continue; }
|
|
1102
|
+
result += delta;
|
|
1103
|
+
references = rfc;
|
|
1104
|
+
await streamResp({ text: options?.delta ? delta : result }, options);
|
|
1112
1105
|
}
|
|
1113
1106
|
const _resp = await resp.response;
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
content, {
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
}, options));
|
|
1107
|
+
functionCalls = (functionCalls() || _resp.functionCalls() || []).map(x => ({ functionCall: x }));
|
|
1108
|
+
const { toolsResult, toolsResponse } = await handleToolsCall(
|
|
1109
|
+
{ role: MODEL, parts: functionCalls },
|
|
1110
|
+
{ ...options, result, flavor: GEMINI }
|
|
1111
|
+
);
|
|
1112
|
+
if (functionCalls.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
|
|
1113
|
+
return promptGemini(content, {
|
|
1114
|
+
...options || {}, toolsResult: [...options?.toolsResult || [],
|
|
1115
|
+
...toolsResult], result: toolsResponse,
|
|
1116
|
+
});
|
|
1117
|
+
}
|
|
1118
|
+
return await packGptResp({
|
|
1119
|
+
text: mergeMsgs(toolsResponse, functionCalls), references,
|
|
1120
|
+
}, options);
|
|
1129
1121
|
};
|
|
1130
1122
|
|
|
1131
1123
|
const checkEmbeddingInput = async (input, model) => {
|
package/lib/manifest.mjs
CHANGED