utilitas 1999.1.18 → 1999.1.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +69 -85
- package/lib/manifest.mjs +1 -1
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -54,38 +54,37 @@ const [
|
|
|
54
54
|
];
|
|
55
55
|
|
|
56
56
|
const [
|
|
57
|
-
OPENAI, GEMINI,
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
JINA_EMBEDDING, JINA_CLIP,
|
|
57
|
+
OPENAI, GEMINI, OPENAI_TRAINING, OLLAMA, GPT_4O_MINI, GPT_4O, GPT_O1,
|
|
58
|
+
GPT_O3_MINI, GEMINI_20_FLASH, GEMINI_20_FLASH_THINKING, GEMINI_20_PRO, NOVA,
|
|
59
|
+
DEEPSEEK_R1, MD_CODE, TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE,
|
|
60
|
+
CLOUD_37_SONNET, AUDIO, WAV, ATTACHMENTS, CHAT, OPENAI_VOICE, MEDIUM, LOW,
|
|
61
|
+
HIGH, GPT_REASONING_EFFORT, THINK, THINK_STR, THINK_END, AZURE, TOOLS_STR,
|
|
62
|
+
TOOLS_END, TOOLS, TEXT, THINKING, OK, FUNC, GPT_45, REDACTED_THINKING,
|
|
63
|
+
GEMMA_3_27B, AZURE_OPENAI, ANTHROPIC, VERTEX_ANTHROPIC, GEMMA327B, v8k, ais,
|
|
64
|
+
MAX_TOOL_RECURSION, LOG, name, user, system, assistant, MODEL, JSON_OBJECT,
|
|
65
|
+
TOOL, silent, GEMINI_EMBEDDING_M, INVALID_FILE, tokenSafeRatio,
|
|
66
|
+
GPT_QUERY_LIMIT, CONTENT_IS_REQUIRED, OPENAI_HI_RES_SIZE, k, kT, m, minute,
|
|
67
|
+
hour, gb, trimTailing, EBD, GEMINI_20_FLASH_EXP, IMAGE, JINA,
|
|
68
|
+
JINA_DEEPSEARCH, JINA_CLIP,
|
|
70
69
|
] = [
|
|
71
|
-
'OpenAI', 'Gemini', '
|
|
72
|
-
'
|
|
73
|
-
'gemini-2.0-flash', 'gemini-2.0-
|
|
74
|
-
'
|
|
75
|
-
'text-embedding-3-
|
|
76
|
-
'
|
|
77
|
-
'
|
|
78
|
-
'</
|
|
79
|
-
'
|
|
80
|
-
'
|
|
81
|
-
|
|
82
|
-
'
|
|
70
|
+
'OpenAI', 'Gemini', 'OPENAI_TRAINING', 'Ollama', 'gpt-4o-mini',
|
|
71
|
+
'gpt-4o', 'o1', 'o3-mini', 'gemini-2.0-flash',
|
|
72
|
+
'gemini-2.0-flash-thinking-exp', 'gemini-2.0-pro-exp', 'nova',
|
|
73
|
+
'deepseek-r1', '```', 'text-embedding-3-small',
|
|
74
|
+
'text-embedding-3-large', 'claude-3-7-sonnet@20250219', 'audio', 'wav',
|
|
75
|
+
'[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium', 'low', 'high',
|
|
76
|
+
'medium', 'think', '<think>', '</think>', 'AZURE', '<tools>',
|
|
77
|
+
'</tools>', 'tools', 'text', 'thinking', 'OK', 'function',
|
|
78
|
+
'gpt-4.5-preview', 'redacted_thinking', 'gemma-3-27b-it',
|
|
79
|
+
'Azure Openai', 'Anthropic', 'Vertex Anthropic', 'gemma3:27b',
|
|
80
|
+
7680 * 4320, [], 10, { log: true }, 'Alan', 'user', 'system',
|
|
81
|
+
'assistant', 'model', 'json_object', 'tool', true,
|
|
83
82
|
'gemini-embedding-exp-03-07', 'Invalid file data.', 1.1, 100,
|
|
84
83
|
'Content is required.', 2000 * 768, x => 1024 * x, x => 1000 * x,
|
|
85
84
|
x => 1024 * 1024 * x, x => 60 * x, x => 60 * 60 * x,
|
|
86
85
|
x => 1024 * 1024 * 1024 * x, x => x.replace(/[\.\s]*$/, ''),
|
|
87
86
|
{ embedding: true }, 'gemini-2.0-flash-exp', 'image', 'Jina',
|
|
88
|
-
'jina-deepsearch-v1', '
|
|
87
|
+
'jina-deepsearch-v1', 'jina-clip-v2',
|
|
89
88
|
];
|
|
90
89
|
|
|
91
90
|
const [tool, messages, text]
|
|
@@ -226,12 +225,14 @@ const DEFAULT_MODELS = {
|
|
|
226
225
|
[JINA]: JINA_DEEPSEARCH,
|
|
227
226
|
[OLLAMA]: GEMMA327B,
|
|
228
227
|
[OPENAI_VOICE]: NOVA,
|
|
229
|
-
[OPENAI_EMBEDDING]: TEXT_EMBEDDING_3_SMALL,
|
|
230
|
-
[GEMINI_EMEDDING]: GEMINI_EMBEDDING_M,
|
|
231
|
-
[JINA_EMBEDDING]: JINA_CLIP,
|
|
232
228
|
[OPENAI_TRAINING]: GPT_4O_MINI, // https://platform.openai.com/docs/guides/fine-tuning
|
|
233
229
|
};
|
|
234
|
-
|
|
230
|
+
|
|
231
|
+
const DEFAULT_EMBEDDING = {
|
|
232
|
+
[OPENAI]: TEXT_EMBEDDING_3_SMALL,
|
|
233
|
+
[GEMINI]: GEMINI_EMBEDDING_M,
|
|
234
|
+
[JINA]: JINA_CLIP,
|
|
235
|
+
};
|
|
235
236
|
|
|
236
237
|
const tokenRatioByWords = Math.min(
|
|
237
238
|
100 / 75, // ChatGPT: https://platform.openai.com/tokenizer
|
|
@@ -353,6 +354,17 @@ const buildAiId = (provider, model) => [provider, model].map(
|
|
|
353
354
|
x => ensureString(x, { case: 'SNAKE' })
|
|
354
355
|
).join('_');
|
|
355
356
|
|
|
357
|
+
const setupAi = ai => {
|
|
358
|
+
const id = buildAiId(ai.provider, ai.model.name);
|
|
359
|
+
ais.push({
|
|
360
|
+
id, initOrder: ais.length,
|
|
361
|
+
priority: DEFAULT_MODELS[ai.provider] === ai.model.name ? -1 : 0,
|
|
362
|
+
modelEmbedding: MODELS[DEFAULT_EMBEDDING[ai.provider]], ...ai,
|
|
363
|
+
prompt: ai.prompt && (async (c, o) => await ai.prompt(id, c, o)),
|
|
364
|
+
embedding: ai.embedding && (async (c, o) => await ai.embedding(id, c, o)),
|
|
365
|
+
});
|
|
366
|
+
};
|
|
367
|
+
|
|
356
368
|
const init = async (options = {}) => {
|
|
357
369
|
const provider = unifyProvider(options?.provider);
|
|
358
370
|
let models;
|
|
@@ -372,11 +384,9 @@ const init = async (options = {}) => {
|
|
|
372
384
|
assertApiKey(provider, options);
|
|
373
385
|
var client = await OpenAI(options);
|
|
374
386
|
for (let model of models) {
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
379
|
-
embedding: async (i, o) => await createOpenAIEmbedding(id, i, o),
|
|
387
|
+
setupAi({
|
|
388
|
+
provider, model, client,
|
|
389
|
+
prompt: promptOpenAI, embedding: createOpenAIEmbedding,
|
|
380
390
|
});
|
|
381
391
|
}
|
|
382
392
|
break;
|
|
@@ -389,33 +399,23 @@ const init = async (options = {}) => {
|
|
|
389
399
|
apiVersion: '2025-01-01-preview',
|
|
390
400
|
deployment: model.name, ...options,
|
|
391
401
|
});
|
|
392
|
-
|
|
393
|
-
ais.push({
|
|
394
|
-
id, provider, model, priority: 0, initOrder: ais.length, client,
|
|
395
|
-
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
396
|
-
});
|
|
402
|
+
setupAi({ provider, model, client, prompt: promptOpenAI });
|
|
397
403
|
break;
|
|
398
404
|
case AZURE:
|
|
399
405
|
assertApiKey(provider, options);
|
|
400
406
|
assert(options.baseURL, `${provider} api endpoint is required.`);
|
|
401
407
|
var model = models[0];
|
|
402
408
|
var client = await OpenAI(options);
|
|
403
|
-
|
|
404
|
-
ais.push({
|
|
405
|
-
id, provider, model, priority: 0, initOrder: ais.length, client,
|
|
406
|
-
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
407
|
-
});
|
|
409
|
+
setupAi({ provider, model, client, prompt: promptOpenAI });
|
|
408
410
|
break;
|
|
409
411
|
case GEMINI:
|
|
410
412
|
assertApiKey(provider, options);
|
|
411
413
|
const { GoogleGenerativeAI } = await need('@google/generative-ai');
|
|
412
414
|
var client = new GoogleGenerativeAI(options.apiKey);
|
|
413
415
|
for (let model of models) {
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
prompt: async (cnt, opts) => await promptGemini(id, cnt, opts),
|
|
418
|
-
embedding: async (i, o) => await createGeminiEmbedding(id, i, o),
|
|
416
|
+
setupAi({
|
|
417
|
+
provider, model, client,
|
|
418
|
+
prompt: promptGemini, embedding: createGeminiEmbedding,
|
|
419
419
|
});
|
|
420
420
|
}
|
|
421
421
|
break;
|
|
@@ -425,11 +425,7 @@ const init = async (options = {}) => {
|
|
|
425
425
|
await need('@anthropic-ai/sdk')
|
|
426
426
|
).Anthropic)(options)
|
|
427
427
|
for (let model of models) {
|
|
428
|
-
|
|
429
|
-
ais.push({
|
|
430
|
-
id, provider, model, priority: 0, initOrder: ais.length, client,
|
|
431
|
-
prompt: async (cnt, opts) => await promptAnthropic(id, cnt, opts),
|
|
432
|
-
});
|
|
428
|
+
setupAi({ provider, model, client, prompt: promptAnthropic });
|
|
433
429
|
}
|
|
434
430
|
break;
|
|
435
431
|
case VERTEX_ANTHROPIC:
|
|
@@ -441,25 +437,19 @@ const init = async (options = {}) => {
|
|
|
441
437
|
var client = new ((
|
|
442
438
|
await need('@anthropic-ai/vertex-sdk')
|
|
443
439
|
).AnthropicVertex)({ region: options?.region || 'us-east5' });
|
|
444
|
-
|
|
445
|
-
ais.push({
|
|
446
|
-
id, provider, model, priority: 0, initOrder: ais.length, client: client,
|
|
447
|
-
prompt: async (cnt, opts) => await promptAnthropic(id, cnt, opts),
|
|
448
|
-
});
|
|
440
|
+
setupAi({ provider, model, client, prompt: promptAnthropic });
|
|
449
441
|
break;
|
|
450
442
|
case JINA:
|
|
451
443
|
assertApiKey(provider, options);
|
|
452
|
-
var [client,
|
|
444
|
+
var [client, clientEmbedding] = [await OpenAI({
|
|
453
445
|
baseURL: 'https://deepsearch.jina.ai/v1/', ...options,
|
|
454
446
|
}), await OpenAI({
|
|
455
447
|
baseURL: 'https://api.jina.ai/v1/', ...options,
|
|
456
448
|
})];
|
|
457
449
|
for (let model of models) {
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
462
|
-
embedding: async (i, o) => await createJinaEmbedding(ebd, i, o),
|
|
450
|
+
setupAi({
|
|
451
|
+
provider, model, client, clientEmbedding,
|
|
452
|
+
prompt: promptOpenAI, embedding: createOpenAIEmbedding,
|
|
463
453
|
});
|
|
464
454
|
}
|
|
465
455
|
break;
|
|
@@ -469,11 +459,7 @@ const init = async (options = {}) => {
|
|
|
469
459
|
const phLog = m => log(`Ollama preheat: ${m?.message || m}`);
|
|
470
460
|
var client = await OpenAI({ baseURL, apiKey: 'ollama', ...options });
|
|
471
461
|
for (let model of models) {
|
|
472
|
-
|
|
473
|
-
ais.push({
|
|
474
|
-
id, provider, model, priority: 0, initOrder: ais.length, client,
|
|
475
|
-
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
476
|
-
});
|
|
462
|
+
setupAi({ provider, model, client, prompt: promptOpenAI });
|
|
477
463
|
ignoreErrFunc(async () => {
|
|
478
464
|
phLog(await (await fetch(`${baseURL}completions`, {
|
|
479
465
|
method: 'POST', body: JSON.stringify({
|
|
@@ -492,8 +478,9 @@ const init = async (options = {}) => {
|
|
|
492
478
|
|
|
493
479
|
const packAi = (ais, options = {}) => {
|
|
494
480
|
const res = options.basic ? ais.map(x => ({
|
|
495
|
-
id: x.id,
|
|
496
|
-
|
|
481
|
+
id: x.id, initOrder: x.initOrder, priority: x.priority,
|
|
482
|
+
provider: x.provider, model: x.model, modelEmbedding: x.modelEmbedding,
|
|
483
|
+
prompt: !!x.prompt, embedding: !!x.embedding,
|
|
497
484
|
})) : ais;
|
|
498
485
|
return options.all ? res : res[0];
|
|
499
486
|
};
|
|
@@ -1216,7 +1203,7 @@ const checkEmbeddingInput = async (input, model) => {
|
|
|
1216
1203
|
return getInput();
|
|
1217
1204
|
};
|
|
1218
1205
|
|
|
1219
|
-
const createOpenAIEmbedding = async (
|
|
1206
|
+
const createOpenAIEmbedding = async (aiId, input, options) => {
|
|
1220
1207
|
// args from vertex embedding may be useful uere
|
|
1221
1208
|
// https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings
|
|
1222
1209
|
// task_type Description
|
|
@@ -1225,23 +1212,20 @@ const createOpenAIEmbedding = async (client, input, options) => {
|
|
|
1225
1212
|
// SEMANTIC_SIMILARITY Specifies the given text will be used for Semantic Textual Similarity(STS).
|
|
1226
1213
|
// CLASSIFICATION Specifies that the embeddings will be used for classification.
|
|
1227
1214
|
// CLUSTERING Specifies that the embeddings will be used for clustering.
|
|
1228
|
-
|
|
1229
|
-
const model = options?.model ||
|
|
1230
|
-
const resp = await client.embeddings.create({
|
|
1215
|
+
let { client, modelEmbedding, clientEmbedding } = await getAi(aiId);
|
|
1216
|
+
const model = options?.model || modelEmbedding.name;
|
|
1217
|
+
const resp = await (clientEmbedding || client).embeddings.create({
|
|
1231
1218
|
model, input: await checkEmbeddingInput(input, model),
|
|
1232
1219
|
});
|
|
1233
1220
|
return options?.raw ? resp : resp?.data[0].embedding;
|
|
1234
1221
|
};
|
|
1235
1222
|
|
|
1236
|
-
const createJinaEmbedding = async (client, input, options) =>
|
|
1237
|
-
await createOpenAIEmbedding(client, input, {
|
|
1238
|
-
model: DEFAULT_MODELS[JINA_EMBEDDING], ...options || {}
|
|
1239
|
-
});
|
|
1240
|
-
|
|
1241
1223
|
const createGeminiEmbedding = async (aiId, input, options) => {
|
|
1242
|
-
const { client } = await getAi(aiId);
|
|
1243
|
-
const model = options?.model ||
|
|
1244
|
-
const resp = await
|
|
1224
|
+
const { client, modelEmbedding, clientEmbedding } = await getAi(aiId);
|
|
1225
|
+
const model = options?.model || modelEmbedding.name;
|
|
1226
|
+
const resp = await (
|
|
1227
|
+
clientEmbedding || client
|
|
1228
|
+
).getGenerativeModel({ model }).embedContent(
|
|
1245
1229
|
await checkEmbeddingInput(input, model)
|
|
1246
1230
|
);
|
|
1247
1231
|
return options?.raw ? resp : resp?.embedding.values;
|
package/lib/manifest.mjs
CHANGED