utilitas 1998.2.35 → 1998.2.37
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +133 -156
- package/lib/manifest.mjs +1 -1
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -743,14 +743,17 @@ const packResp = async (resp, options) => {
|
|
|
743
743
|
};
|
|
744
744
|
};
|
|
745
745
|
|
|
746
|
+
const streamResp = async (resp, options) => {
|
|
747
|
+
const msg = await packGptResp(resp, { ...options, processing: true });
|
|
748
|
+
return options?.stream && (msg?.text || msg?.audio?.length)
|
|
749
|
+
&& await ignoreErrFunc(async () => await options.stream(msg), LOG);
|
|
750
|
+
};
|
|
751
|
+
|
|
746
752
|
const packGptResp = async (resp, options) => {
|
|
747
753
|
// simple mode is not recommended for streaming responses
|
|
748
|
-
let text = resp
|
|
749
|
-
|| resp?.choices?.[0]?.message?.audio?.transcript // ChatGPT audio mode
|
|
750
|
-
|| (Function.isFunction(resp?.text) ? resp.text() : resp?.text) // Gemini
|
|
751
|
-
|| resp?.content?.find(x => x.type === TEXT)?.text // Claude
|
|
754
|
+
let text = resp.text // ChatGPT / Claude / Gemini
|
|
752
755
|
|| resp?.message?.content || ''; // Ollama
|
|
753
|
-
const audio = resp?.
|
|
756
|
+
const audio = resp?.message?.audio?.data; // ChatGPT audio mode
|
|
754
757
|
if (options?.raw) { return resp; }
|
|
755
758
|
else if (options?.simple && options?.jsonMode) { return parseJson(text); }
|
|
756
759
|
else if (options?.simple && options?.audioMode) { return audio; }
|
|
@@ -769,24 +772,24 @@ const packGptResp = async (resp, options) => {
|
|
|
769
772
|
};
|
|
770
773
|
|
|
771
774
|
const handleToolsCall = async (msg, options) => {
|
|
772
|
-
let [content, preRes, input, packMsg, toolsResponse]
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
775
|
+
let [content, preRes, input, packMsg, toolsResponse, responded] = [
|
|
776
|
+
[], [], [], null, options?.result ? options?.result.trim() : '', false
|
|
777
|
+
];
|
|
778
|
+
const resp = async m => {
|
|
779
|
+
m = `\n${m}`;
|
|
780
|
+
responded || (m = `\n\n${TOOLS_STR}${m}`);
|
|
781
|
+
responded = true;
|
|
782
|
+
toolsResponse = (toolsResponse + m).trim();
|
|
783
|
+
await streamResp({ text: options?.delta ? m : toolsResponse }, options);
|
|
781
784
|
};
|
|
782
|
-
|
|
783
|
-
|
|
785
|
+
const calls = msg.tool_calls || msg.content || msg.parts || [];
|
|
786
|
+
if (calls.length) {
|
|
784
787
|
switch (options?.flavor) {
|
|
785
|
-
case CLAUDE: preRes.push(
|
|
786
|
-
case GEMINI: preRes.push(
|
|
787
|
-
case CHATGPT: default: preRes.push(
|
|
788
|
+
case CLAUDE: preRes.push(msg); break;
|
|
789
|
+
case GEMINI: preRes.push(msg); break;
|
|
790
|
+
case CHATGPT: default: preRes.push(msg); break;
|
|
788
791
|
}
|
|
789
|
-
for (const fn of
|
|
792
|
+
for (const fn of calls) {
|
|
790
793
|
switch (options?.flavor) {
|
|
791
794
|
case CLAUDE:
|
|
792
795
|
input = fn.input = String.isString(fn?.input) ? parseJson(fn.input) : fn?.input;
|
|
@@ -795,11 +798,11 @@ const handleToolsCall = async (msg, options) => {
|
|
|
795
798
|
});
|
|
796
799
|
break;
|
|
797
800
|
case GEMINI:
|
|
798
|
-
input = fn
|
|
801
|
+
input = fn?.functionCall?.args;
|
|
799
802
|
packMsg = (t, e) => ({
|
|
800
803
|
functionResponse: {
|
|
801
|
-
name: fn
|
|
802
|
-
name: fn
|
|
804
|
+
name: fn?.functionCall?.name, response: {
|
|
805
|
+
name: fn?.functionCall?.name, content: e ? `[Error] ${t}` : t,
|
|
803
806
|
}
|
|
804
807
|
}
|
|
805
808
|
});
|
|
@@ -812,13 +815,16 @@ const handleToolsCall = async (msg, options) => {
|
|
|
812
815
|
});
|
|
813
816
|
break;
|
|
814
817
|
}
|
|
815
|
-
const name = fn?.function?.
|
|
818
|
+
const name = (fn?.function || fn?.functionCall || fn)?.name;
|
|
819
|
+
if (!name) { continue; }
|
|
816
820
|
await resp(`\nName: ${name}`);
|
|
817
821
|
const f = tools.find(x => insensitiveCompare(
|
|
818
822
|
x.def?.function?.name || x?.def?.name, name
|
|
819
823
|
));
|
|
820
824
|
if (!f?.func) {
|
|
821
|
-
|
|
825
|
+
const rt = `Failed: invalid function name \`${name}\``;
|
|
826
|
+
content.push(packMsg(rt, true));
|
|
827
|
+
await resp(rt);
|
|
822
828
|
continue;
|
|
823
829
|
}
|
|
824
830
|
const description = f.def?.function?.description || f.def?.description;
|
|
@@ -830,20 +836,25 @@ const handleToolsCall = async (msg, options) => {
|
|
|
830
836
|
content.push(packMsg(output));
|
|
831
837
|
await resp(f.showRes ? `Output: ${output}` : `Status: OK`);
|
|
832
838
|
} catch (err) {
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
839
|
+
const rt = `Failed: ${err.message}`;
|
|
840
|
+
content.push(packMsg(rt, true));
|
|
841
|
+
await resp(rt);
|
|
842
|
+
log(rt);
|
|
836
843
|
}
|
|
837
844
|
}
|
|
838
845
|
switch (options?.flavor) {
|
|
839
|
-
case CLAUDE: content = [{ role: user, content }]; break;
|
|
846
|
+
case CLAUDE: content = content.length ? [{ role: user, content }] : []; break;
|
|
840
847
|
case GEMINI: content = [{ role: user, parts: content }]; break;
|
|
841
848
|
}
|
|
842
|
-
await resp(`\n${TOOLS_END}`);
|
|
849
|
+
responded && await resp(`\n${TOOLS_END}`);
|
|
843
850
|
}
|
|
844
|
-
return { toolsResult: [...preRes, ...content], toolsResponse };
|
|
851
|
+
return { toolsResult: [...content.length ? preRes : [], ...content], toolsResponse };
|
|
845
852
|
};
|
|
846
853
|
|
|
854
|
+
const mergeMsgs = (resp, calls) => [resp, ...calls.length ? [
|
|
855
|
+
`⚠️ Tools recursion limit reached: ${MAX_TOOL_RECURSION}`
|
|
856
|
+
] : []].map(x => x.trim()).join('\n\n');
|
|
857
|
+
|
|
847
858
|
const promptChatGPT = async (content, options = {}) => {
|
|
848
859
|
const { client } = await getOpenAIClient(options);
|
|
849
860
|
// https://github.com/openai/openai-node?tab=readme-ov-file#streaming-responses
|
|
@@ -871,10 +882,8 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
871
882
|
options?.reasoning && !MODELS[options.model]?.reasoning
|
|
872
883
|
), `This model does not support reasoning: ${options.model}`);
|
|
873
884
|
[options.audioMimeType, options.suffix] = [pcm16, 'pcm.wav'];
|
|
874
|
-
let [result, resultAudio,
|
|
875
|
-
|
|
876
|
-
Buffer.alloc(0), null, []
|
|
877
|
-
];
|
|
885
|
+
let [result, resultAudio, event, resultTools, responded]
|
|
886
|
+
= [options?.result ?? '', Buffer.alloc(0), null, [], false];
|
|
878
887
|
const resp = await client.chat.completions.create({
|
|
879
888
|
modalities, audio: options?.audio || (
|
|
880
889
|
modalities?.find?.(x => x === AUDIO)
|
|
@@ -888,53 +897,43 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
888
897
|
} : {}, model: options.model, stream: true,
|
|
889
898
|
store: true, tool_choice: 'auto',
|
|
890
899
|
});
|
|
891
|
-
for await (
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
900
|
+
for await (event of resp) {
|
|
901
|
+
event = event?.choices?.[0] || {};
|
|
902
|
+
const delta = event.delta || {};
|
|
903
|
+
let deltaText = delta.content || delta.audio?.transcript || '';
|
|
904
|
+
const deltaAudio = delta.audio?.data ? await convert(
|
|
905
|
+
delta.audio.data, { input: BASE64, expected: BUFFER }
|
|
896
906
|
) : Buffer.alloc(0);
|
|
897
|
-
const
|
|
898
|
-
|
|
899
|
-
let curFunc = resultTools.find(z => z.index === deltaFunc[x].index);
|
|
907
|
+
for (const x of delta.tool_calls || []) {
|
|
908
|
+
let curFunc = resultTools.find(y => y.index === x.index);
|
|
900
909
|
curFunc || (resultTools.push(curFunc = {}));
|
|
901
|
-
isSet(
|
|
902
|
-
|
|
903
|
-
|
|
910
|
+
isSet(x.index, true) && (curFunc.index = x.index);
|
|
911
|
+
x.id && (curFunc.id = x.id);
|
|
912
|
+
x.type && (curFunc.type = x.type);
|
|
904
913
|
curFunc.function || (curFunc.function = { name: '', arguments: '' });
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
deltaFunc[x].function.arguments && (curFunc.function.arguments += deltaFunc[x].function.arguments);
|
|
908
|
-
}
|
|
914
|
+
x?.function?.name && (curFunc.function.name += x.function.name);
|
|
915
|
+
x?.function?.arguments && (curFunc.function.arguments += x.function.arguments);
|
|
909
916
|
}
|
|
910
|
-
|
|
917
|
+
deltaText && (responded = responded || (deltaText = `\n\n${deltaText}`));
|
|
911
918
|
result += deltaText;
|
|
912
919
|
resultAudio = Buffer.concat([resultAudio, deltaAudio]);
|
|
913
920
|
const respAudio = options?.delta ? deltaAudio : resultAudio;
|
|
914
|
-
|
|
915
|
-
|
|
921
|
+
await streamResp({
|
|
922
|
+
text: options?.delta ? deltaText : result,
|
|
916
923
|
...respAudio.length ? { audio: { data: respAudio } } : {},
|
|
917
|
-
};
|
|
918
|
-
await ignoreErrFunc(async () => await options?.stream?.(
|
|
919
|
-
await packGptResp(chunk, { ...options, processing: true })
|
|
920
|
-
), LOG);
|
|
924
|
+
}, options);
|
|
921
925
|
}
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
content: result, tool_calls: resultTools,
|
|
926
|
+
event = {
|
|
927
|
+
role: assistant, text: result, tool_calls: resultTools,
|
|
925
928
|
...resultAudio.length ? { audio: { data: resultAudio } } : {},
|
|
926
929
|
};
|
|
927
|
-
const { toolsResult, toolsResponse }
|
|
928
|
-
|
|
929
|
-
);
|
|
930
|
-
|
|
930
|
+
const { toolsResult, toolsResponse }
|
|
931
|
+
= await handleToolsCall(event, { ...options, result });
|
|
931
932
|
if (toolsResult.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
|
|
932
933
|
return promptChatGPT(content, { ...options, toolsResult, result: toolsResponse });
|
|
933
934
|
}
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
] : []].map(x => x.trim()).join('\n\n');
|
|
937
|
-
return await packGptResp(chunk, options);
|
|
935
|
+
event.text = mergeMsgs(toolsResponse, toolsResult);
|
|
936
|
+
return await packGptResp(event, options);
|
|
938
937
|
};
|
|
939
938
|
|
|
940
939
|
const promptAzure = async (content, options = {}) =>
|
|
@@ -966,13 +965,13 @@ const promptOllama = async (content, options = {}) => {
|
|
|
966
965
|
const promptClaude = async (content, options = {}) => {
|
|
967
966
|
const { client } = await getClaudeClient(options);
|
|
968
967
|
options.model = options?.model || DEFAULT_MODELS[CLAUDE];
|
|
969
|
-
const reasoning = options?.reasoning ?? MODELS[options.model]?.reasoning;
|
|
970
968
|
const resp = await client.messages.create({
|
|
971
969
|
model: options.model, max_tokens: MODELS[options.model].maxOutputTokens,
|
|
972
970
|
messages: [
|
|
973
971
|
...options?.messages || [], buildClaudeMessage(content, options),
|
|
974
972
|
...options?.toolsResult || [],
|
|
975
|
-
], stream: true,
|
|
973
|
+
], stream: true,
|
|
974
|
+
...options?.reasoning ?? MODELS[options.model]?.reasoning ? {
|
|
976
975
|
thinking: options?.thinking || { type: 'enabled', budget_tokens: 1024 },
|
|
977
976
|
} : {}, // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
|
|
978
977
|
...MODELS[options.model]?.tools ? {
|
|
@@ -980,57 +979,43 @@ const promptClaude = async (content, options = {}) => {
|
|
|
980
979
|
tool_choice: { type: 'auto' },
|
|
981
980
|
} : {},
|
|
982
981
|
});
|
|
983
|
-
let [event, text, thinking, signature, result, thinkEnd,
|
|
984
|
-
= [null, '', '', '', options?.
|
|
985
|
-
for await (
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
event?.content_block?.text || event?.delta?.text || '',
|
|
989
|
-
];
|
|
982
|
+
let [event, text, thinking, signature, result, thinkEnd, tool_use]
|
|
983
|
+
= [null, '', '', '', options?.result ?? '', '', []];
|
|
984
|
+
for await (const chunk of resp) {
|
|
985
|
+
event = chunk?.content_block || chunk?.delta || {};
|
|
986
|
+
let [thkDelta, txtDelta] = [event.thinking || '', event.text || ''];
|
|
990
987
|
text += txtDelta;
|
|
991
988
|
thinking += thkDelta;
|
|
992
|
-
signature = signature || event?.
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
989
|
+
signature = signature || event?.signature || '';
|
|
990
|
+
thkDelta && thkDelta === thinking
|
|
991
|
+
&& (thkDelta = `${THINK_STR}\n${thkDelta}`);
|
|
992
|
+
thinking && txtDelta && !thinkEnd
|
|
993
|
+
&& (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
|
|
994
|
+
if (event?.type === 'tool_use') {
|
|
995
|
+
tool_use.push({ ...event, input: '' });
|
|
996
|
+
} else if (event.partial_json) {
|
|
997
|
+
tool_use[tool_use.length - 1].input += event.partial_json;
|
|
998
998
|
}
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
tool_calls[tool_calls.length - 1].input += event?.delta?.partial_json;
|
|
1003
|
-
}
|
|
1004
|
-
const delta = thkDelta + txtDelta;
|
|
1005
|
-
if (delta === '') { continue; }
|
|
1006
|
-
result += delta;
|
|
1007
|
-
event.content = [{ type: TEXT, text: options?.delta ? delta : result }];
|
|
1008
|
-
await ignoreErrFunc(async () => await options?.stream?.(
|
|
1009
|
-
await packGptResp(event, { ...options, processing: true })
|
|
1010
|
-
), LOG);
|
|
999
|
+
txtDelta = thkDelta + txtDelta;
|
|
1000
|
+
result += txtDelta;
|
|
1001
|
+
await streamResp({ text: options?.delta ? txtDelta : result }, options);
|
|
1011
1002
|
}
|
|
1012
|
-
event
|
|
1013
|
-
|
|
1014
|
-
|
|
1003
|
+
event = {
|
|
1004
|
+
role: assistant, content: [
|
|
1005
|
+
...thinking ? [{ type: THINKING, thinking, signature }] : [],
|
|
1006
|
+
...text ? [{ type: TEXT, text }] : [], ...tool_use,
|
|
1007
|
+
]
|
|
1008
|
+
};
|
|
1015
1009
|
const { toolsResult, toolsResponse } = await handleToolsCall(
|
|
1016
|
-
|
|
1010
|
+
event, { ...options, result, flavor: CLAUDE },
|
|
1017
1011
|
);
|
|
1018
|
-
if (
|
|
1019
|
-
toolsResult[0].content.unshift(
|
|
1020
|
-
...event?.content.filter(x => x?.type !== 'tool_use')
|
|
1021
|
-
);
|
|
1012
|
+
if (tool_use.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
|
|
1022
1013
|
return await promptClaude(content, {
|
|
1023
|
-
...options, toolsResult
|
|
1014
|
+
...options, toolsResult: [...options?.toolsResult || [],
|
|
1015
|
+
...toolsResult], result: toolsResponse,
|
|
1024
1016
|
});
|
|
1025
1017
|
}
|
|
1026
|
-
|
|
1027
|
-
textPart.text = [
|
|
1028
|
-
...options?.toolsResponse ? [options?.toolsResponse] : [],
|
|
1029
|
-
textPart.text, ...toolsResult.length ? [
|
|
1030
|
-
`⚠️ Tools recursion limit reached: ${MAX_TOOL_RECURSION}`
|
|
1031
|
-
] : [],
|
|
1032
|
-
].map(x => x.trim()).join('\n\n')
|
|
1033
|
-
return packGptResp(event, options);
|
|
1018
|
+
return packGptResp({ text: mergeMsgs(toolsResponse, tool_use) }, options);
|
|
1034
1019
|
};
|
|
1035
1020
|
|
|
1036
1021
|
const uploadFile = async (input, options) => {
|
|
@@ -1093,54 +1078,46 @@ const promptGemini = async (content, options = {}) => {
|
|
|
1093
1078
|
options.model = genModel;
|
|
1094
1079
|
const chat = generative.startChat({
|
|
1095
1080
|
history: [
|
|
1096
|
-
...options?.messages && !options?.attachments?.length
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1081
|
+
...options?.messages && !options?.attachments?.length ? options.messages : [],
|
|
1082
|
+
...options?.toolsResult ? [
|
|
1083
|
+
buildGeminiMessage(content, { ...options, history: true }),
|
|
1084
|
+
...options.toolsResult.slice(0, options.toolsResult.length - 1)
|
|
1085
|
+
] : []
|
|
1101
1086
|
], ...generationConfig(options),
|
|
1102
1087
|
});
|
|
1103
|
-
const resp = await chat
|
|
1104
|
-
options?.
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
let [result, references, functionCalls]
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
references = rfc;
|
|
1121
|
-
await ignoreErrFunc(async () => await options.stream(
|
|
1122
|
-
await packGptResp({
|
|
1123
|
-
text: () => options?.delta ? delta : result, references,
|
|
1124
|
-
}, { ...options, processing: true })
|
|
1125
|
-
), LOG);
|
|
1126
|
-
}
|
|
1088
|
+
const resp = await chat.sendMessageStream(
|
|
1089
|
+
options?.toolsResult?.[options?.toolsResult?.length]?.parts
|
|
1090
|
+
|| buildGeminiMessage(content, options)
|
|
1091
|
+
);
|
|
1092
|
+
let [result, references, functionCalls]
|
|
1093
|
+
= [options?.result ?? '', null, null];
|
|
1094
|
+
for await (const chunk of resp.stream) {
|
|
1095
|
+
functionCalls || (functionCalls = chunk.functionCalls);
|
|
1096
|
+
const delta = chunk?.text?.() || '';
|
|
1097
|
+
const rfc = packGeminiReferences(
|
|
1098
|
+
chunk.candidates[0]?.groundingMetadata?.groundingChunks,
|
|
1099
|
+
chunk.candidates[0]?.groundingMetadata?.groundingSupports
|
|
1100
|
+
);
|
|
1101
|
+
if (delta === '' && !rfc) { continue; }
|
|
1102
|
+
result += delta;
|
|
1103
|
+
references = rfc;
|
|
1104
|
+
await streamResp({ text: options?.delta ? delta : result }, options);
|
|
1127
1105
|
}
|
|
1128
1106
|
const _resp = await resp.response;
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
content, {
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
}, options));
|
|
1107
|
+
functionCalls = (functionCalls() || _resp.functionCalls() || []).map(x => ({ functionCall: x }));
|
|
1108
|
+
const { toolsResult, toolsResponse } = await handleToolsCall(
|
|
1109
|
+
{ role: MODEL, parts: functionCalls },
|
|
1110
|
+
{ ...options, result, flavor: GEMINI }
|
|
1111
|
+
);
|
|
1112
|
+
if (functionCalls.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
|
|
1113
|
+
return promptGemini(content, {
|
|
1114
|
+
...options || {}, toolsResult: [...options?.toolsResult || [],
|
|
1115
|
+
...toolsResult], result: toolsResponse,
|
|
1116
|
+
});
|
|
1117
|
+
}
|
|
1118
|
+
return await packGptResp({
|
|
1119
|
+
text: mergeMsgs(toolsResponse, functionCalls), references,
|
|
1120
|
+
}, options);
|
|
1144
1121
|
};
|
|
1145
1122
|
|
|
1146
1123
|
const checkEmbeddingInput = async (input, model) => {
|
package/lib/manifest.mjs
CHANGED