utilitas 1998.2.60 → 1998.2.61
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -2
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +20 -52
- package/lib/manifest.mjs +1 -2
- package/package.json +1 -2
package/lib/alan.mjs
CHANGED
|
@@ -42,7 +42,7 @@ You may be provided with some tools(functions) to help you gather information an
|
|
|
42
42
|
|
|
43
43
|
const _NEED = [
|
|
44
44
|
'@anthropic-ai/sdk', '@anthropic-ai/vertex-sdk', '@google/generative-ai',
|
|
45
|
-
'js-tiktoken', '
|
|
45
|
+
'js-tiktoken', 'OpenAI',
|
|
46
46
|
];
|
|
47
47
|
|
|
48
48
|
const [
|
|
@@ -565,10 +565,13 @@ const init = async (options = {}) => {
|
|
|
565
565
|
};
|
|
566
566
|
break;
|
|
567
567
|
case OLLAMA:
|
|
568
|
+
// https://github.com/ollama/ollama/blob/main/docs/openai.md
|
|
568
569
|
ais[id] = {
|
|
569
|
-
id, provider, model,
|
|
570
|
-
|
|
571
|
-
|
|
570
|
+
id, provider, model, client: await OpenAI({
|
|
571
|
+
baseURL: 'http://localhost:11434/v1/', apiKey: 'ollama',
|
|
572
|
+
...options
|
|
573
|
+
}),
|
|
574
|
+
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
572
575
|
};
|
|
573
576
|
break;
|
|
574
577
|
default:
|
|
@@ -638,14 +641,6 @@ const buildGptMessage = (content, options) => {
|
|
|
638
641
|
return message;
|
|
639
642
|
};
|
|
640
643
|
|
|
641
|
-
const buildOllamaMessage = (content, options) => {
|
|
642
|
-
const message = String.isString(content) ? {
|
|
643
|
-
role: options?.role || user, content,
|
|
644
|
-
} : content;
|
|
645
|
-
assertContent(message.content);
|
|
646
|
-
return message;
|
|
647
|
-
};
|
|
648
|
-
|
|
649
644
|
const buildGeminiParts = (text, attachments) => {
|
|
650
645
|
// Gemini API does not allow empty text, even you prompt with attachments.
|
|
651
646
|
const message = [...text?.length || attachments?.length ? [{
|
|
@@ -695,8 +690,8 @@ const buildGeminiHistory = (text, options) => buildGeminiMessage(
|
|
|
695
690
|
text, { ...options || {}, history: true }
|
|
696
691
|
);
|
|
697
692
|
|
|
698
|
-
const [getOpenAIClient, getGeminiClient,
|
|
699
|
-
= [OPENAI, GEMINI,
|
|
693
|
+
const [getOpenAIClient, getGeminiClient, getClaudeClient]
|
|
694
|
+
= [OPENAI, GEMINI, CLAUDE].map(
|
|
700
695
|
x => async options => await init({ ...provider(x), ...options })
|
|
701
696
|
);
|
|
702
697
|
|
|
@@ -742,8 +737,7 @@ const packResp = async (resp, options) => {
|
|
|
742
737
|
])) && (audio = await convert(audio, {
|
|
743
738
|
input: BUFFER, expected: BUFFER, ...options || {},
|
|
744
739
|
}));
|
|
745
|
-
options?.jsonMode && !options?.delta &&
|
|
746
|
-
&& (json = parseJson(simpleText));
|
|
740
|
+
options?.jsonMode && !options?.delta && (json = parseJson(simpleText, null));
|
|
747
741
|
if (options?.simple && options?.audioMode) { return audio; }
|
|
748
742
|
else if (options?.simple && options?.jsonMode) { return json; }
|
|
749
743
|
else if (options?.simple) { return simpleText; }
|
|
@@ -829,10 +823,6 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
829
823
|
systemPrompt = options.systemPrompt;
|
|
830
824
|
prompt = buildClaudeMessage(content, { ...options, cache_control: true });
|
|
831
825
|
break;
|
|
832
|
-
case OLLAMA:
|
|
833
|
-
systemPrompt = buildOllamaMessage(options.systemPrompt, _system);
|
|
834
|
-
prompt = buildOllamaMessage(content, options);
|
|
835
|
-
break;
|
|
836
826
|
case GEMINI:
|
|
837
827
|
const _role = { role: options.model === GEMMA_3_27B ? user : system };
|
|
838
828
|
systemPrompt = buildGeminiHistory(options.systemPrompt, _role);
|
|
@@ -852,10 +842,6 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
852
842
|
history.push(buildClaudeMessage(x.request, _user));
|
|
853
843
|
history.push(buildClaudeMessage(x.response, _assistant));
|
|
854
844
|
break;
|
|
855
|
-
case OLLAMA:
|
|
856
|
-
history.push(buildOllamaMessage(x.request, _user));
|
|
857
|
-
history.push(buildOllamaMessage(x.response, _assistant));
|
|
858
|
-
break;
|
|
859
845
|
case GEMINI:
|
|
860
846
|
if (options.attachments?.length) { return; }
|
|
861
847
|
history.push(buildGeminiHistory(x.request, _user));
|
|
@@ -864,9 +850,14 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
864
850
|
}
|
|
865
851
|
});
|
|
866
852
|
switch (options.flavor) {
|
|
867
|
-
case CHATGPT:
|
|
853
|
+
case CHATGPT:
|
|
854
|
+
history = messages([
|
|
855
|
+
systemPrompt, ...history, prompt,
|
|
856
|
+
...options.toolsResult?.length ? options.toolsResult : []
|
|
857
|
+
]);
|
|
858
|
+
break;
|
|
859
|
+
case CLAUDE:
|
|
868
860
|
history = messages([
|
|
869
|
-
...options.flavor === CLAUDE ? [] : [systemPrompt],
|
|
870
861
|
...history, prompt,
|
|
871
862
|
...options.toolsResult?.length ? options.toolsResult : []
|
|
872
863
|
]);
|
|
@@ -891,8 +882,7 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
891
882
|
content = trimTailing(trimTailing(content).slice(0, -1)) + '...';
|
|
892
883
|
}
|
|
893
884
|
}, model.maxInputTokens - options.attachments?.length * ATTACHMENT_TOKEN_COST);
|
|
894
|
-
if ([CHATGPT
|
|
895
|
-
|| options.model === GEMMA_3_27B) {
|
|
885
|
+
if ([CHATGPT].includes(options.flavor) || options.model === GEMMA_3_27B) {
|
|
896
886
|
systemPrompt = null;
|
|
897
887
|
}
|
|
898
888
|
return { systemPrompt, history, prompt };
|
|
@@ -1063,27 +1053,6 @@ const promptOpenAI = async (aiId, content, options = {}) => {
|
|
|
1063
1053
|
return await packResp(event, options);
|
|
1064
1054
|
};
|
|
1065
1055
|
|
|
1066
|
-
const promptOllama = async (aiId, content, options = {}) => {
|
|
1067
|
-
const { client, model } = await getAi(aiId);
|
|
1068
|
-
// https://github.com/ollama/ollama-js
|
|
1069
|
-
// https://github.com/jmorganca/ollama/blob/main/examples/typescript-simplechat/client.ts
|
|
1070
|
-
options.model = options?.model || model.name;
|
|
1071
|
-
let [chunk, result] = [null, ''];
|
|
1072
|
-
const { history }
|
|
1073
|
-
= await buildPrompts(model, content, { ...options, flavor: OLLAMA });
|
|
1074
|
-
const resp = await client.chat({
|
|
1075
|
-
model: options.model, stream: true, ...history,
|
|
1076
|
-
});
|
|
1077
|
-
for await (chunk of resp) {
|
|
1078
|
-
const delta = chunk.message.content || '';
|
|
1079
|
-
result += delta;
|
|
1080
|
-
delta && await streamResp({
|
|
1081
|
-
text: options.delta ? delta : result,
|
|
1082
|
-
}, options);
|
|
1083
|
-
}
|
|
1084
|
-
return await packResp({ text: result }, options);
|
|
1085
|
-
};
|
|
1086
|
-
|
|
1087
1056
|
const promptAnthropic = async (aiId, content, options = {}) => {
|
|
1088
1057
|
const { client, model } = await getAi(aiId);
|
|
1089
1058
|
let [
|
|
@@ -1458,7 +1427,7 @@ const talk = async (input, options) => {
|
|
|
1458
1427
|
case CHATGPT: resp = await promptOpenAI(input, pmtOptions); break;
|
|
1459
1428
|
case GEMINI: resp = await promptGemini(input, pmtOptions); break;
|
|
1460
1429
|
case CLAUDE: resp = await promptAnthropic(input, pmtOptions); break;
|
|
1461
|
-
case OLLAMA: resp = await promptOllama(input, pmtOptions); break;
|
|
1430
|
+
// case OLLAMA: resp = await promptOllama(input, pmtOptions); break;
|
|
1462
1431
|
case AZURE: resp = await promptAzure(input, pmtOptions); break;
|
|
1463
1432
|
default: throwError(`Invalid AI engine: '${engine}'.`);
|
|
1464
1433
|
}
|
|
@@ -1585,7 +1554,7 @@ const PREFERRED_ENGINES = [
|
|
|
1585
1554
|
{ client: GEMINI, func: promptGemini, multimodal: 1 },
|
|
1586
1555
|
{ client: CLAUDE, func: promptAnthropic, multimodal: 2 },
|
|
1587
1556
|
// { client: AZURE, func: promptAzure, multimodal: 3 },
|
|
1588
|
-
{ client: OLLAMA, func: promptOllama, multimodal: 99 },
|
|
1557
|
+
// { client: OLLAMA, func: promptOllama, multimodal: 99 },
|
|
1589
1558
|
]; // keep gpt first to avoid gemini grounding by default
|
|
1590
1559
|
|
|
1591
1560
|
export default init;
|
|
@@ -1619,7 +1588,6 @@ export {
|
|
|
1619
1588
|
prompt, promptOpenAI,
|
|
1620
1589
|
promptAnthropic,
|
|
1621
1590
|
promptGemini,
|
|
1622
|
-
promptOllama,
|
|
1623
1591
|
resetSession,
|
|
1624
1592
|
tailGptFineTuningEvents,
|
|
1625
1593
|
talk,
|
package/lib/manifest.mjs
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
const manifest = {
|
|
2
2
|
"name": "utilitas",
|
|
3
3
|
"description": "Just another common utility for JavaScript.",
|
|
4
|
-
"version": "1998.2.
|
|
4
|
+
"version": "1998.2.61",
|
|
5
5
|
"private": false,
|
|
6
6
|
"homepage": "https://github.com/Leask/utilitas",
|
|
7
7
|
"main": "index.mjs",
|
|
@@ -53,7 +53,6 @@ const manifest = {
|
|
|
53
53
|
"node-mailjet": "^6.0.8",
|
|
54
54
|
"node-polyfill-webpack-plugin": "^4.1.0",
|
|
55
55
|
"office-text-extractor": "^3.0.3",
|
|
56
|
-
"ollama": "^0.5.14",
|
|
57
56
|
"openai": "^4.87.3",
|
|
58
57
|
"pdfjs-dist": "^4.10.38",
|
|
59
58
|
"pg": "^8.14.0",
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "utilitas",
|
|
3
3
|
"description": "Just another common utility for JavaScript.",
|
|
4
|
-
"version": "1998.2.
|
|
4
|
+
"version": "1998.2.61",
|
|
5
5
|
"private": false,
|
|
6
6
|
"homepage": "https://github.com/Leask/utilitas",
|
|
7
7
|
"main": "index.mjs",
|
|
@@ -64,7 +64,6 @@
|
|
|
64
64
|
"node-mailjet": "^6.0.8",
|
|
65
65
|
"node-polyfill-webpack-plugin": "^4.1.0",
|
|
66
66
|
"office-text-extractor": "^3.0.3",
|
|
67
|
-
"ollama": "^0.5.14",
|
|
68
67
|
"openai": "^4.87.3",
|
|
69
68
|
"pdfjs-dist": "^4.10.38",
|
|
70
69
|
"pg": "^8.14.0",
|