utilitas 2000.3.54 → 2000.3.56
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +30 -28
- package/lib/manifest.mjs +1 -1
- package/lib/rag.mjs +4 -4
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -48,13 +48,13 @@ const [
|
|
|
48
48
|
TOOLS_STR, TOOLS_END, TOOLS, TEXT, OK, FUNC, GPT_52, GPT_51_CODEX,
|
|
49
49
|
GPT_5_IMAGE, GEMMA_3_27B, ANTHROPIC, v8k, ais, MAX_TOOL_RECURSION, LOG,
|
|
50
50
|
name, user, system, assistant, MODEL, JSON_OBJECT, tokenSafeRatio,
|
|
51
|
-
PROMPT_IS_REQUIRED, OPENAI_HI_RES_SIZE, k,
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
51
|
+
PROMPT_IS_REQUIRED, OPENAI_HI_RES_SIZE, k, m, minute, hour, gb, trimTailing,
|
|
52
|
+
trimBeginning, GEMINI_30_PRO_IMAGE, IMAGE, JINA, JINA_DEEPSEARCH,
|
|
53
|
+
SILICONFLOW, SF_DEEPSEEK_32, MAX_TIRE, OPENROUTER_API, OPENROUTER, AUTO,
|
|
54
|
+
TOOL, S_OPENAI, S_GOOGLE, S_ANTHROPIC, ONLINE, GEMINI_30_PRO,
|
|
55
|
+
GEMINI_25_FLASH, IMAGEN_4_ULTRA, VEO_31, IMAGEN_4_UPSCALE, ERROR_GENERATING,
|
|
56
|
+
GEMINI_25_FLASH_TTS, GEMINI_25_PRO_TTS, wav, GPT_4O_MIMI_TTS,
|
|
57
|
+
GPT_4O_TRANSCRIBE, INVALID_AUDIO, OGG_EXT, ELLIPSIS,
|
|
58
58
|
] = [
|
|
59
59
|
'OpenAI', 'Google', 'Ollama', 'nova', 'deepseek-3.2-speciale', '```',
|
|
60
60
|
'claude-opus-4.5', 'audio', 'wav', 'OPENAI_VOICE', 'medium', 'think',
|
|
@@ -62,12 +62,11 @@ const [
|
|
|
62
62
|
'function', 'gpt-5.2', 'gpt-5.1-codex', 'gpt-5-image', 'gemma3:27b',
|
|
63
63
|
'Anthropic', 7680 * 4320, [], 30, { log: true }, 'Alan', 'user',
|
|
64
64
|
{ role: 'system' }, 'assistant', 'model', 'json_object', 1.1,
|
|
65
|
-
'Prompt is required.', 2048 * 2048, x =>
|
|
66
|
-
x =>
|
|
67
|
-
x =>
|
|
68
|
-
|
|
69
|
-
'
|
|
70
|
-
'deepseek-ai/DeepSeek-V3.2-exp', 768 * 768,
|
|
65
|
+
'Prompt is required.', 2048 * 2048, x => 1000 * x, x => 1000 * 1000 * x,
|
|
66
|
+
x => 60 * x, x => 60 * 60 * x, x => 1000 * 1000 * 1000 * x,
|
|
67
|
+
x => x.replace(/[\.\s]*$/, ''), x => x.replace(/^[\.\s]*/, ''),
|
|
68
|
+
'gemini-3-pro-image-preview', 'image', 'Jina', 'jina-deepsearch-v1',
|
|
69
|
+
'SiliconFlow', 'deepseek-ai/DeepSeek-V3.2-exp', 768 * 768,
|
|
71
70
|
'https://openrouter.ai/api/v1', 'OpenRouter', 'openrouter/auto', 'tool',
|
|
72
71
|
'openai', 'google', 'anthropic', ':online', 'gemini-3-pro-preview',
|
|
73
72
|
'gemini-2.5-flash-preview-09-2025', 'imagen-4.0-ultra-generate-001',
|
|
@@ -112,7 +111,7 @@ const GEMINI_RULES = {
|
|
|
112
111
|
|
|
113
112
|
const OPENAI_RULES = {
|
|
114
113
|
source: S_OPENAI, icon: '⚛️',
|
|
115
|
-
contextWindow:
|
|
114
|
+
contextWindow: k(400), maxOutputTokens: k(128),
|
|
116
115
|
imageCostTokens: ~~(OPENAI_HI_RES_SIZE / MAX_TIRE * 140 + 70),
|
|
117
116
|
maxFileSize: m(50), maxImageSize: OPENAI_HI_RES_SIZE,
|
|
118
117
|
json: true, tools: true, vision: true, hearing: true, reasoning: true,
|
|
@@ -122,7 +121,7 @@ const OPENAI_RULES = {
|
|
|
122
121
|
};
|
|
123
122
|
|
|
124
123
|
const DEEPSEEK_32_RULES = {
|
|
125
|
-
icon: '🐬', contextWindow:
|
|
124
|
+
icon: '🐬', contextWindow: k(163.8), maxOutputTokens: k(65.5),
|
|
126
125
|
json: true, tools: true, reasoning: true,
|
|
127
126
|
};
|
|
128
127
|
|
|
@@ -164,7 +163,7 @@ const MODELS = {
|
|
|
164
163
|
[GPT_51_CODEX]: { ...OPENAI_RULES },
|
|
165
164
|
[CLOUD_OPUS_45]: {
|
|
166
165
|
source: S_ANTHROPIC, icon: '✳️',
|
|
167
|
-
contextWindow:
|
|
166
|
+
contextWindow: k(200), maxOutputTokens: k(64),
|
|
168
167
|
documentCostTokens: 3000 * 10, maxDocumentFile: m(32),
|
|
169
168
|
maxDocumentPages: 100, imageCostTokens: ~~(v8k / 750),
|
|
170
169
|
maxImagePerPrompt: 100, maxFileSize: m(5), maxImageSize: 2000 * 2000,
|
|
@@ -176,15 +175,15 @@ const MODELS = {
|
|
|
176
175
|
},
|
|
177
176
|
// tts/stt models
|
|
178
177
|
[GEMINI_25_FLASH_TTS]: {
|
|
179
|
-
source: S_GOOGLE, maxInputTokens:
|
|
178
|
+
source: S_GOOGLE, maxInputTokens: k(32), audio: true, fast: true,
|
|
180
179
|
hidden: true, defaultProvider: GOOGLE,
|
|
181
180
|
},
|
|
182
181
|
[GEMINI_25_PRO_TTS]: {
|
|
183
|
-
source: S_GOOGLE, maxInputTokens:
|
|
182
|
+
source: S_GOOGLE, maxInputTokens: k(32), audio: true,
|
|
184
183
|
hidden: true, defaultProvider: GOOGLE,
|
|
185
184
|
},
|
|
186
185
|
[GPT_4O_MIMI_TTS]: {
|
|
187
|
-
source: S_OPENAI, maxInputTokens:
|
|
186
|
+
source: S_OPENAI, maxInputTokens: k(2), audio: true, fast: true,
|
|
188
187
|
hidden: true, defaultProvider: OPENAI,
|
|
189
188
|
},
|
|
190
189
|
[GPT_4O_TRANSCRIBE]: {
|
|
@@ -204,7 +203,7 @@ const MODELS = {
|
|
|
204
203
|
[SF_DEEPSEEK_32]: { ...DEEPSEEK_32_RULES, defaultProvider: SILICONFLOW },
|
|
205
204
|
// best local model
|
|
206
205
|
[GEMMA_3_27B]: {
|
|
207
|
-
icon: '❇️', contextWindow:
|
|
206
|
+
icon: '❇️', contextWindow: k(128), maxOutputTokens: k(8),
|
|
208
207
|
imageCostTokens: 256, maxImageSize: 896 * 896,
|
|
209
208
|
supportedMimeTypes: [MIME_PNG, MIME_JPEG, MIME_GIF],
|
|
210
209
|
fast: true, json: true, vision: true,
|
|
@@ -1284,9 +1283,9 @@ const talk = async (input, options = {}) => {
|
|
|
1284
1283
|
};
|
|
1285
1284
|
|
|
1286
1285
|
const getChatPromptLimit = async (options) => {
|
|
1287
|
-
let resp = 0;
|
|
1286
|
+
let [resp, aiId] = [0, ensureArray(options?.aiId).filter(x => x)];
|
|
1288
1287
|
(await getAi(null, { all: true })).map(x => {
|
|
1289
|
-
if (
|
|
1288
|
+
if (aiId.length && !aiId.includes(x.id)) { return; }
|
|
1290
1289
|
const maxInputTokens = x.model.maxInputTokens;
|
|
1291
1290
|
resp = resp ? Math.min(resp, maxInputTokens) : maxInputTokens;
|
|
1292
1291
|
});
|
|
@@ -1295,9 +1294,9 @@ const getChatPromptLimit = async (options) => {
|
|
|
1295
1294
|
};
|
|
1296
1295
|
|
|
1297
1296
|
const getChatAttachmentCost = async (options) => {
|
|
1298
|
-
let resp = 0;
|
|
1297
|
+
let [resp, aiId] = [0, ensureArray(options?.aiId).filter(x => x)];
|
|
1299
1298
|
(await getAi(null, { all: true })).map(x => {
|
|
1300
|
-
if (
|
|
1299
|
+
if (aiId.length && !aiId.includes(x.id)) { return; }
|
|
1301
1300
|
resp = Math.max(resp, x.model.imageCostTokens || 0);
|
|
1302
1301
|
});
|
|
1303
1302
|
assert(resp > 0, 'Chat engine has not been initialized.');
|
|
@@ -1346,7 +1345,7 @@ const prompt = async (input, options = {}) => {
|
|
|
1346
1345
|
};
|
|
1347
1346
|
|
|
1348
1347
|
const trimPrompt = async (getPrompt, trimFunc, contextWindow, options) => {
|
|
1349
|
-
let [i, maxTry] = [0, ~~options?.maxTry ||
|
|
1348
|
+
let [i, maxTry] = [0, ~~options?.maxTry || k(128)];
|
|
1350
1349
|
while ((await countTokens(await getPrompt(), { fast: true }) > contextWindow)
|
|
1351
1350
|
|| (await countTokens(await getPrompt()) > contextWindow)) {
|
|
1352
1351
|
await trimFunc();
|
|
@@ -1365,6 +1364,10 @@ const analyzeSessions = async (sessionIds, options) => {
|
|
|
1365
1364
|
));
|
|
1366
1365
|
if (sm.length) { sses[ids[i]] = sm; }
|
|
1367
1366
|
}
|
|
1367
|
+
const ai = await getAi(options?.aiId, {
|
|
1368
|
+
jsonMode: true, simple: true, select: { json: true, fast: true },
|
|
1369
|
+
...options || {}
|
|
1370
|
+
});
|
|
1368
1371
|
const pmt = options?.prompt || (
|
|
1369
1372
|
'Help me organize the dialogues in the following JSON into a title '
|
|
1370
1373
|
+ 'dictionary and return it in JSON format. The input data may contain '
|
|
@@ -1384,10 +1387,9 @@ const analyzeSessions = async (sessionIds, options) => {
|
|
|
1384
1387
|
x, JSON.stringify(sses[x]).length,
|
|
1385
1388
|
]).sort((x, y) => y[1] - x[1])[0][0]];
|
|
1386
1389
|
}
|
|
1387
|
-
}, await getChatPromptLimit(options));
|
|
1390
|
+
}, await getChatPromptLimit({ aiId: ai.id, ...options, }));
|
|
1388
1391
|
const aiResp = Object.keys(sses) ? (await prompt(getInput(), {
|
|
1389
|
-
|
|
1390
|
-
...options || {}
|
|
1392
|
+
aiId: ai.id, ...options || {}
|
|
1391
1393
|
})) : {};
|
|
1392
1394
|
assert(aiResp, 'Unable to analyze sessions.');
|
|
1393
1395
|
ids.map(x => resp[x] = aiResp[x] || null);
|
package/lib/manifest.mjs
CHANGED
package/lib/rag.mjs
CHANGED
|
@@ -98,8 +98,8 @@ const ensureApiKey = (options) => {
|
|
|
98
98
|
return options.apiKey;
|
|
99
99
|
};
|
|
100
100
|
|
|
101
|
-
const
|
|
102
|
-
assert(options?.
|
|
101
|
+
const ensureCredentials = (options) => {
|
|
102
|
+
assert(options?.credentials, 'Google credentials are required.', 400);
|
|
103
103
|
assert(options?.projectId, 'Google project ID is required.', 400);
|
|
104
104
|
return options;
|
|
105
105
|
};
|
|
@@ -192,7 +192,7 @@ const initReranker = async (options = {}) => {
|
|
|
192
192
|
const model = options?.model || DEFAULT_RERANKER_MODELS[provider];
|
|
193
193
|
switch (provider) {
|
|
194
194
|
case GOOGLE:
|
|
195
|
-
|
|
195
|
+
ensureCredentials(options);
|
|
196
196
|
const { RankServiceClient } = await need(
|
|
197
197
|
'@google-cloud/discoveryengine', { raw: true }
|
|
198
198
|
);
|
|
@@ -200,7 +200,7 @@ const initReranker = async (options = {}) => {
|
|
|
200
200
|
const clientOptions = {
|
|
201
201
|
...location ? { apiEndpoint: `${location}-discoveryengine.googleapis.com` } : {},
|
|
202
202
|
...options?.apiEndpoint ? { apiEndpoint: options.apiEndpoint } : {},
|
|
203
|
-
keyFilename: options.
|
|
203
|
+
keyFilename: options.credentials,
|
|
204
204
|
};
|
|
205
205
|
const client = new RankServiceClient(clientOptions);
|
|
206
206
|
rerankerClients[provider] = {
|