utilitas 1998.2.60 → 1998.2.62
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +38 -59
- package/lib/manifest.mjs +1 -2
- package/lib/utilitas.mjs +4 -2
- package/package.json +1 -2
package/lib/alan.mjs
CHANGED
|
@@ -42,14 +42,14 @@ You may be provided with some tools(functions) to help you gather information an
|
|
|
42
42
|
|
|
43
43
|
const _NEED = [
|
|
44
44
|
'@anthropic-ai/sdk', '@anthropic-ai/vertex-sdk', '@google/generative-ai',
|
|
45
|
-
'js-tiktoken', '
|
|
45
|
+
'js-tiktoken', 'OpenAI',
|
|
46
46
|
];
|
|
47
47
|
|
|
48
48
|
const [
|
|
49
49
|
OPENAI, GEMINI, CHATGPT, OPENAI_EMBEDDING, GEMINI_EMEDDING, OPENAI_TRAINING,
|
|
50
50
|
OLLAMA, CLAUDE, GPT_4O_MINI, GPT_4O, GPT_O1, GPT_O3_MINI, GEMINI_20_FLASH,
|
|
51
51
|
GEMINI_20_FLASH_THINKING, GEMINI_20_PRO, NOVA, EMBEDDING_001, DEEPSEEK_R1,
|
|
52
|
-
DEEPSEEK_R1_70B, MD_CODE, CHATGPT_REASONING, TEXT_EMBEDDING_3_SMALL,
|
|
52
|
+
DEEPSEEK_R1_70B, DEEPSEEK_R1_32B, MD_CODE, CHATGPT_REASONING, TEXT_EMBEDDING_3_SMALL,
|
|
53
53
|
TEXT_EMBEDDING_3_LARGE, CLOUD_37_SONNET, AUDIO, WAV, CHATGPT_MINI,
|
|
54
54
|
ATTACHMENTS, CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH, GPT_REASONING_EFFORT,
|
|
55
55
|
THINK, THINK_STR, THINK_END, AZURE, TOOLS_STR, TOOLS_END, TOOLS, TEXT,
|
|
@@ -62,11 +62,12 @@ const [
|
|
|
62
62
|
'OPENAI_TRAINING', 'OLLAMA', 'CLAUDE', 'gpt-4o-mini', 'gpt-4o', 'o1',
|
|
63
63
|
'o3-mini', 'gemini-2.0-flash', 'gemini-2.0-flash-thinking-exp',
|
|
64
64
|
'gemini-2.0-pro-exp', 'nova', 'embedding-001', 'deepseek-r1',
|
|
65
|
-
'deepseek-r1:70b', '
|
|
66
|
-
'text-embedding-3-
|
|
67
|
-
'
|
|
68
|
-
'
|
|
69
|
-
'
|
|
65
|
+
'deepseek-r1:70b', 'deepseek-r1:32b', '```', 'CHATGPT_REASONING',
|
|
66
|
+
'text-embedding-3-small', 'text-embedding-3-large',
|
|
67
|
+
'claude-3-7-sonnet@20250219', 'audio', 'wav', 'CHATGPT_MINI',
|
|
68
|
+
'[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium', 'low', 'high',
|
|
69
|
+
'medium', 'think', '<think>', '</think>', 'AZURE', '<tools>',
|
|
70
|
+
'</tools>', 'tools', 'text', 'thinking', 'OK', 'function',
|
|
70
71
|
'gpt-4.5-preview', 'redacted_thinking', 'gemma-3-27b-it',
|
|
71
72
|
'AZURE OPENAI', 'ANTHROPIC', 'VERTEX ANTHROPIC', 'gemma3:27b',
|
|
72
73
|
7680 * 4320, {}, 10, { log: true }, 'Alan', 'user', 'system',
|
|
@@ -369,6 +370,7 @@ const MODELS = {
|
|
|
369
370
|
};
|
|
370
371
|
|
|
371
372
|
MODELS[DEEPSEEK_R1_70B] = MODELS[DEEPSEEK_R1];
|
|
373
|
+
MODELS[DEEPSEEK_R1_32B] = MODELS[DEEPSEEK_R1];
|
|
372
374
|
MODELS[GEMMA327B] = MODELS[GEMMA_3_27B];
|
|
373
375
|
|
|
374
376
|
for (const n in MODELS) {
|
|
@@ -565,11 +567,22 @@ const init = async (options = {}) => {
|
|
|
565
567
|
};
|
|
566
568
|
break;
|
|
567
569
|
case OLLAMA:
|
|
570
|
+
// https://github.com/ollama/ollama/blob/main/docs/openai.md
|
|
571
|
+
const baseURL = 'http://localhost:11434/v1/';
|
|
568
572
|
ais[id] = {
|
|
569
|
-
id, provider, model,
|
|
570
|
-
|
|
571
|
-
|
|
573
|
+
id, provider, model, client: await OpenAI({
|
|
574
|
+
baseURL, apiKey: 'ollama', ...options
|
|
575
|
+
}),
|
|
576
|
+
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
572
577
|
};
|
|
578
|
+
const phLog = m => log(`Ollama preheat: ${m?.message || m}`);
|
|
579
|
+
ignoreErrFunc(async () => {
|
|
580
|
+
phLog(await (await fetch(`${baseURL}completions`, {
|
|
581
|
+
method: 'POST', body: JSON.stringify({
|
|
582
|
+
model: model.name, prompt: '', keep_alive: -1
|
|
583
|
+
})
|
|
584
|
+
})).text());
|
|
585
|
+
}, { log: phLog });
|
|
573
586
|
break;
|
|
574
587
|
default:
|
|
575
588
|
throwError(`Invalid AI provider: ${options.provider || 'null'}.`);
|
|
@@ -638,14 +651,6 @@ const buildGptMessage = (content, options) => {
|
|
|
638
651
|
return message;
|
|
639
652
|
};
|
|
640
653
|
|
|
641
|
-
const buildOllamaMessage = (content, options) => {
|
|
642
|
-
const message = String.isString(content) ? {
|
|
643
|
-
role: options?.role || user, content,
|
|
644
|
-
} : content;
|
|
645
|
-
assertContent(message.content);
|
|
646
|
-
return message;
|
|
647
|
-
};
|
|
648
|
-
|
|
649
654
|
const buildGeminiParts = (text, attachments) => {
|
|
650
655
|
// Gemini API does not allow empty text, even you prompt with attachments.
|
|
651
656
|
const message = [...text?.length || attachments?.length ? [{
|
|
@@ -695,8 +700,8 @@ const buildGeminiHistory = (text, options) => buildGeminiMessage(
|
|
|
695
700
|
text, { ...options || {}, history: true }
|
|
696
701
|
);
|
|
697
702
|
|
|
698
|
-
const [getOpenAIClient, getGeminiClient,
|
|
699
|
-
= [OPENAI, GEMINI,
|
|
703
|
+
const [getOpenAIClient, getGeminiClient, getClaudeClient]
|
|
704
|
+
= [OPENAI, GEMINI, CLAUDE].map(
|
|
700
705
|
x => async options => await init({ ...provider(x), ...options })
|
|
701
706
|
);
|
|
702
707
|
|
|
@@ -742,8 +747,7 @@ const packResp = async (resp, options) => {
|
|
|
742
747
|
])) && (audio = await convert(audio, {
|
|
743
748
|
input: BUFFER, expected: BUFFER, ...options || {},
|
|
744
749
|
}));
|
|
745
|
-
options?.jsonMode && !options?.delta &&
|
|
746
|
-
&& (json = parseJson(simpleText));
|
|
750
|
+
options?.jsonMode && !options?.delta && (json = parseJson(simpleText, null));
|
|
747
751
|
if (options?.simple && options?.audioMode) { return audio; }
|
|
748
752
|
else if (options?.simple && options?.jsonMode) { return json; }
|
|
749
753
|
else if (options?.simple) { return simpleText; }
|
|
@@ -829,10 +833,6 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
829
833
|
systemPrompt = options.systemPrompt;
|
|
830
834
|
prompt = buildClaudeMessage(content, { ...options, cache_control: true });
|
|
831
835
|
break;
|
|
832
|
-
case OLLAMA:
|
|
833
|
-
systemPrompt = buildOllamaMessage(options.systemPrompt, _system);
|
|
834
|
-
prompt = buildOllamaMessage(content, options);
|
|
835
|
-
break;
|
|
836
836
|
case GEMINI:
|
|
837
837
|
const _role = { role: options.model === GEMMA_3_27B ? user : system };
|
|
838
838
|
systemPrompt = buildGeminiHistory(options.systemPrompt, _role);
|
|
@@ -852,10 +852,6 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
852
852
|
history.push(buildClaudeMessage(x.request, _user));
|
|
853
853
|
history.push(buildClaudeMessage(x.response, _assistant));
|
|
854
854
|
break;
|
|
855
|
-
case OLLAMA:
|
|
856
|
-
history.push(buildOllamaMessage(x.request, _user));
|
|
857
|
-
history.push(buildOllamaMessage(x.response, _assistant));
|
|
858
|
-
break;
|
|
859
855
|
case GEMINI:
|
|
860
856
|
if (options.attachments?.length) { return; }
|
|
861
857
|
history.push(buildGeminiHistory(x.request, _user));
|
|
@@ -864,9 +860,14 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
864
860
|
}
|
|
865
861
|
});
|
|
866
862
|
switch (options.flavor) {
|
|
867
|
-
case CHATGPT:
|
|
863
|
+
case CHATGPT:
|
|
864
|
+
history = messages([
|
|
865
|
+
systemPrompt, ...history, prompt,
|
|
866
|
+
...options.toolsResult?.length ? options.toolsResult : []
|
|
867
|
+
]);
|
|
868
|
+
break;
|
|
869
|
+
case CLAUDE:
|
|
868
870
|
history = messages([
|
|
869
|
-
...options.flavor === CLAUDE ? [] : [systemPrompt],
|
|
870
871
|
...history, prompt,
|
|
871
872
|
...options.toolsResult?.length ? options.toolsResult : []
|
|
872
873
|
]);
|
|
@@ -891,8 +892,7 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
891
892
|
content = trimTailing(trimTailing(content).slice(0, -1)) + '...';
|
|
892
893
|
}
|
|
893
894
|
}, model.maxInputTokens - options.attachments?.length * ATTACHMENT_TOKEN_COST);
|
|
894
|
-
if ([CHATGPT
|
|
895
|
-
|| options.model === GEMMA_3_27B) {
|
|
895
|
+
if ([CHATGPT].includes(options.flavor) || options.model === GEMMA_3_27B) {
|
|
896
896
|
systemPrompt = null;
|
|
897
897
|
}
|
|
898
898
|
return { systemPrompt, history, prompt };
|
|
@@ -1010,6 +1010,7 @@ const promptOpenAI = async (aiId, content, options = {}) => {
|
|
|
1010
1010
|
const resp = await client.chat.completions.create({
|
|
1011
1011
|
model: azure ? undefined : options.model, ...history,
|
|
1012
1012
|
...options.jsonMode ? { response_format: { type: JSON_OBJECT } } : {},
|
|
1013
|
+
...provider === OLLAMA ? { keep_alive: -1 } : {},
|
|
1013
1014
|
modalities, audio: options.audio || (
|
|
1014
1015
|
modalities?.find?.(x => x === AUDIO)
|
|
1015
1016
|
&& { voice: DEFAULT_MODELS[OPENAI_VOICE], format: 'pcm16' }
|
|
@@ -1063,27 +1064,6 @@ const promptOpenAI = async (aiId, content, options = {}) => {
|
|
|
1063
1064
|
return await packResp(event, options);
|
|
1064
1065
|
};
|
|
1065
1066
|
|
|
1066
|
-
const promptOllama = async (aiId, content, options = {}) => {
|
|
1067
|
-
const { client, model } = await getAi(aiId);
|
|
1068
|
-
// https://github.com/ollama/ollama-js
|
|
1069
|
-
// https://github.com/jmorganca/ollama/blob/main/examples/typescript-simplechat/client.ts
|
|
1070
|
-
options.model = options?.model || model.name;
|
|
1071
|
-
let [chunk, result] = [null, ''];
|
|
1072
|
-
const { history }
|
|
1073
|
-
= await buildPrompts(model, content, { ...options, flavor: OLLAMA });
|
|
1074
|
-
const resp = await client.chat({
|
|
1075
|
-
model: options.model, stream: true, ...history,
|
|
1076
|
-
});
|
|
1077
|
-
for await (chunk of resp) {
|
|
1078
|
-
const delta = chunk.message.content || '';
|
|
1079
|
-
result += delta;
|
|
1080
|
-
delta && await streamResp({
|
|
1081
|
-
text: options.delta ? delta : result,
|
|
1082
|
-
}, options);
|
|
1083
|
-
}
|
|
1084
|
-
return await packResp({ text: result }, options);
|
|
1085
|
-
};
|
|
1086
|
-
|
|
1087
1067
|
const promptAnthropic = async (aiId, content, options = {}) => {
|
|
1088
1068
|
const { client, model } = await getAi(aiId);
|
|
1089
1069
|
let [
|
|
@@ -1458,7 +1438,7 @@ const talk = async (input, options) => {
|
|
|
1458
1438
|
case CHATGPT: resp = await promptOpenAI(input, pmtOptions); break;
|
|
1459
1439
|
case GEMINI: resp = await promptGemini(input, pmtOptions); break;
|
|
1460
1440
|
case CLAUDE: resp = await promptAnthropic(input, pmtOptions); break;
|
|
1461
|
-
case OLLAMA: resp = await promptOllama(input, pmtOptions); break;
|
|
1441
|
+
// case OLLAMA: resp = await promptOllama(input, pmtOptions); break;
|
|
1462
1442
|
case AZURE: resp = await promptAzure(input, pmtOptions); break;
|
|
1463
1443
|
default: throwError(`Invalid AI engine: '${engine}'.`);
|
|
1464
1444
|
}
|
|
@@ -1585,13 +1565,13 @@ const PREFERRED_ENGINES = [
|
|
|
1585
1565
|
{ client: GEMINI, func: promptGemini, multimodal: 1 },
|
|
1586
1566
|
{ client: CLAUDE, func: promptAnthropic, multimodal: 2 },
|
|
1587
1567
|
// { client: AZURE, func: promptAzure, multimodal: 3 },
|
|
1588
|
-
{ client: OLLAMA, func: promptOllama, multimodal: 99 },
|
|
1568
|
+
// { client: OLLAMA, func: promptOllama, multimodal: 99 },
|
|
1589
1569
|
]; // keep gpt first to avoid gemini grounding by default
|
|
1590
1570
|
|
|
1591
1571
|
export default init;
|
|
1592
1572
|
export {
|
|
1593
1573
|
ATTACHMENT_TOKEN_COST, CLOUD_37_SONNET, CODE_INTERPRETER, DEEPSEEK_R1,
|
|
1594
|
-
DEEPSEEK_R1_70B, DEFAULT_MODELS,
|
|
1574
|
+
DEEPSEEK_R1_32B, DEEPSEEK_R1_70B, DEFAULT_MODELS,
|
|
1595
1575
|
EMBEDDING_001,
|
|
1596
1576
|
FUNCTION, GEMINI_20_FLASH, GEMINI_20_FLASH_THINKING, GPT_45, GPT_4O, GPT_4O_MINI, GPT_O1, GPT_O3_MINI, INSTRUCTIONS, MODELS,
|
|
1597
1577
|
OPENAI_VOICE, RETRIEVAL,
|
|
@@ -1619,7 +1599,6 @@ export {
|
|
|
1619
1599
|
prompt, promptOpenAI,
|
|
1620
1600
|
promptAnthropic,
|
|
1621
1601
|
promptGemini,
|
|
1622
|
-
promptOllama,
|
|
1623
1602
|
resetSession,
|
|
1624
1603
|
tailGptFineTuningEvents,
|
|
1625
1604
|
talk,
|
package/lib/manifest.mjs
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
const manifest = {
|
|
2
2
|
"name": "utilitas",
|
|
3
3
|
"description": "Just another common utility for JavaScript.",
|
|
4
|
-
"version": "1998.2.
|
|
4
|
+
"version": "1998.2.62",
|
|
5
5
|
"private": false,
|
|
6
6
|
"homepage": "https://github.com/Leask/utilitas",
|
|
7
7
|
"main": "index.mjs",
|
|
@@ -53,7 +53,6 @@ const manifest = {
|
|
|
53
53
|
"node-mailjet": "^6.0.8",
|
|
54
54
|
"node-polyfill-webpack-plugin": "^4.1.0",
|
|
55
55
|
"office-text-extractor": "^3.0.3",
|
|
56
|
-
"ollama": "^0.5.14",
|
|
57
56
|
"openai": "^4.87.3",
|
|
58
57
|
"pdfjs-dist": "^4.10.38",
|
|
59
58
|
"pg": "^8.14.0",
|
package/lib/utilitas.mjs
CHANGED
|
@@ -650,8 +650,10 @@ const checkInterval = (itv, sed) =>
|
|
|
650
650
|
|
|
651
651
|
const ignoreErrFunc = async (func, options) => {
|
|
652
652
|
const run = async () => {
|
|
653
|
-
try { return await func(...options?.args || []) }
|
|
654
|
-
|
|
653
|
+
try { return await func(...options?.args || []) } catch (err) {
|
|
654
|
+
if (Function.isFunction(options?.log)) { options.log(err); }
|
|
655
|
+
else if (options?.log) { console.error(err); }
|
|
656
|
+
}
|
|
655
657
|
};
|
|
656
658
|
if (options?.await) { await timeout(options.await); return await run(); }
|
|
657
659
|
else if (options?.wait) { return setTimeout(run, options.wait); }
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "utilitas",
|
|
3
3
|
"description": "Just another common utility for JavaScript.",
|
|
4
|
-
"version": "1998.2.
|
|
4
|
+
"version": "1998.2.62",
|
|
5
5
|
"private": false,
|
|
6
6
|
"homepage": "https://github.com/Leask/utilitas",
|
|
7
7
|
"main": "index.mjs",
|
|
@@ -64,7 +64,6 @@
|
|
|
64
64
|
"node-mailjet": "^6.0.8",
|
|
65
65
|
"node-polyfill-webpack-plugin": "^4.1.0",
|
|
66
66
|
"office-text-extractor": "^3.0.3",
|
|
67
|
-
"ollama": "^0.5.14",
|
|
68
67
|
"openai": "^4.87.3",
|
|
69
68
|
"pdfjs-dist": "^4.10.38",
|
|
70
69
|
"pg": "^8.14.0",
|