utilitas 1999.1.19 → 1999.1.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -38,7 +38,7 @@ You may be provided with some tools(functions) to help you gather information an
38
38
  - Unless otherwise specified to require the original result, in most cases, you may reorganize the information obtained after using the tool to solve the problem as needed.`;
39
39
 
40
40
  const _NEED = [
41
- '@anthropic-ai/sdk', '@anthropic-ai/vertex-sdk', '@google/generative-ai',
41
+ '@anthropic-ai/sdk', '@anthropic-ai/vertex-sdk', '@google/genai',
42
42
  'js-tiktoken', 'OpenAI',
43
43
  ];
44
44
 
@@ -101,6 +101,7 @@ const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
101
101
  const assertContent = content => assert(content.length, CONTENT_IS_REQUIRED);
102
102
  const countToolCalls = r => r?.split('\n').filter(x => x === TOOLS_STR).length;
103
103
  const assertApiKey = (p, o) => assert(o?.apiKey, `${p} api key is required.`);
104
+ const getProviderIcon = provider => PROVIDER_ICONS[provider] || '🔮';
104
105
  const libOpenAi = async opts => await need('openai', { ...opts, raw: true });
105
106
  const OpenAI = async opts => new (await libOpenAi(opts)).OpenAI(opts);
106
107
  const AzureOpenAI = async opts => new (await libOpenAi(opts)).AzureOpenAI(opts);
@@ -166,8 +167,8 @@ const MODELS = {
166
167
  [JINA_DEEPSEARCH]: {
167
168
  contextWindow: Infinity, maxInputTokens: Infinity,
168
169
  maxOutputTokens: Infinity, imageCostTokens: 0, maxImageSize: Infinity,
169
- supportedMimeTypes: [png, jpeg, MIME_TEXT, webp, pdf],
170
- reasoning: true, json: true, vision: true, defaultProvider: JINA,
170
+ supportedMimeTypes: [png, jpeg, MIME_TEXT, webp, pdf], reasoning: true,
171
+ json: true, vision: true, deepsearch: true, defaultProvider: JINA,
171
172
  },
172
173
  [DEEPSEEK_R1]: {
173
174
  contextWindow: kT(128), maxOutputTokens: k(32),
@@ -234,6 +235,17 @@ const DEFAULT_EMBEDDING = {
234
235
  [JINA]: JINA_CLIP,
235
236
  };
236
237
 
238
+ const PROVIDER_ICONS = {
239
+ [OPENAI]: '⚛️', [AZURE_OPENAI]: '⚛️',
240
+ [ANTHROPIC]: '✳️', [VERTEX_ANTHROPIC]: '✳️',
241
+ [GEMINI]: '♊️', [AZURE]: '☁️', [OLLAMA]: '🦙', [JINA]: '✴️',
242
+ };
243
+
244
+ const FEATURE_ICONS = {
245
+ audio: '📣', deepsearch: '🔍', fast: '⚡️', image: '🎨',
246
+ json: '📊', reasoning: '🧠', tools: '🧰', vision: '👁️',
247
+ };
248
+
237
249
  const tokenRatioByWords = Math.min(
238
250
  100 / 75, // ChatGPT: https://platform.openai.com/tokenizer
239
251
  Math.min(100 / 60, 100 / 80), // Gemini: https://ai.google.dev/gemini-api/docs/tokens?lang=node
@@ -344,9 +356,9 @@ const toolsGemini = async () => (await toolsOpenAI()).map(x => ({
344
356
  properties: x.def.function.parameters.properties,
345
357
  required: x.def.function.parameters.required,
346
358
  },
347
- response: x.def.function?.response ?? {
348
- type: 'string', description: 'It could be a string or JSON',
349
- },
359
+ // Vertex API only: response: x.def.function?.response ?? {
360
+ // type: 'string', description: 'It could be a string or JSON',
361
+ // },
350
362
  }
351
363
  }));
352
364
 
@@ -356,8 +368,13 @@ const buildAiId = (provider, model) => [provider, model].map(
356
368
 
357
369
  const setupAi = ai => {
358
370
  const id = buildAiId(ai.provider, ai.model.name);
371
+ const icon = getProviderIcon(ai.provider);
372
+ const features = Object.entries(FEATURE_ICONS).map(
373
+ x => ai.model[x[0]] ? x[1] : ''
374
+ ).join('');
359
375
  ais.push({
360
- id, initOrder: ais.length,
376
+ id, name: `${icon} ${ai.provider} (${ai.model.name})`,
377
+ features, initOrder: ais.length,
361
378
  priority: DEFAULT_MODELS[ai.provider] === ai.model.name ? -1 : 0,
362
379
  modelEmbedding: MODELS[DEFAULT_EMBEDDING[ai.provider]], ...ai,
363
380
  prompt: ai.prompt && (async (c, o) => await ai.prompt(id, c, o)),
@@ -410,8 +427,8 @@ const init = async (options = {}) => {
410
427
  break;
411
428
  case GEMINI:
412
429
  assertApiKey(provider, options);
413
- const { GoogleGenerativeAI } = await need('@google/generative-ai');
414
- var client = new GoogleGenerativeAI(options.apiKey);
430
+ const { GoogleGenAI } = await need('@google/genai');
431
+ var client = new GoogleGenAI(options);
415
432
  for (let model of models) {
416
433
  setupAi({
417
434
  provider, model, client,
@@ -478,7 +495,8 @@ const init = async (options = {}) => {
478
495
 
479
496
  const packAi = (ais, options = {}) => {
480
497
  const res = options.basic ? ais.map(x => ({
481
- id: x.id, initOrder: x.initOrder, priority: x.priority,
498
+ id: x.id, name: x.name, features: x.features,
499
+ initOrder: x.initOrder, priority: x.priority,
482
500
  provider: x.provider, model: x.model, modelEmbedding: x.modelEmbedding,
483
501
  prompt: !!x.prompt, embedding: !!x.embedding,
484
502
  })) : ais;
@@ -488,7 +506,7 @@ const packAi = (ais, options = {}) => {
488
506
  const getAi = async (id, options = {}) => {
489
507
  if (id) {
490
508
  const ai = ais.find(x => x.id === id);
491
- assert(ais, `AI not found: ${id}.`);
509
+ assert(ai, `AI not found: ${id}.`);
492
510
  return options?.client ? ai?.client : ai;
493
511
  } else if (options?.select) {
494
512
  const res = [];
@@ -777,6 +795,8 @@ const buildPrompts = async (model, input, options = {}) => {
777
795
  history.push(buildClaudeMessage(x.response, _assistant));
778
796
  break;
779
797
  case GEMINI:
798
+ // https://github.com/google/generative-ai-js/blob/main/samples/node/advanced-chat.js
799
+ // Google's bug: history is not allowed while using inline_data?
780
800
  if (options.attachments?.length) { return; }
781
801
  history.push(buildGeminiHistory(x.request, _user));
782
802
  history.push(buildGeminiHistory(x.response, { role: MODEL }));
@@ -808,9 +828,9 @@ const buildPrompts = async (model, input, options = {}) => {
808
828
  }
809
829
  };
810
830
  msgBuilder();
811
- await trimPrompt(() => [systemPrompt, history, prompt], () => {
812
- if (options.messages.length) {
813
- options.messages.shift();
831
+ await trimPrompt(() => [systemPrompt, history, content], () => {
832
+ if (options.messages?.length) {
833
+ options.messages?.shift();
814
834
  msgBuilder();
815
835
  } else {
816
836
  content = trimTailing(trimTailing(content).slice(0, -1)) + '...';
@@ -1092,15 +1112,6 @@ const deleteFile = async (aiId, file_id, options) => {
1092
1112
  return await client.files.del(file_id);
1093
1113
  };
1094
1114
 
1095
- const generationConfig = options => ({
1096
- generationConfig: {
1097
- responseMimeType: options.jsonMode ? MIME_JSON : MIME_TEXT,
1098
- responseModalities: options.modalities
1099
- || (options.imageMode ? [TEXT, IMAGE] : undefined),
1100
- ...options?.generationConfig || {},
1101
- },
1102
- });
1103
-
1104
1115
  const packGeminiReferences = (chunks, supports) => {
1105
1116
  let references = null;
1106
1117
  if (chunks?.length && supports?.length) {
@@ -1116,8 +1127,8 @@ const packGeminiReferences = (chunks, supports) => {
1116
1127
 
1117
1128
  const promptGemini = async (aiId, content, options = {}) => {
1118
1129
  let { client, model } = await getAi(aiId);
1119
- let [result, references, functionCalls, responded, images]
1120
- = [options.result ?? '', null, null, false, []];
1130
+ let [event, result, references, functionCalls, responded, images] =
1131
+ [null, options.result ?? '', null, [], false, []];
1121
1132
  options.model = options.model || model.name;
1122
1133
  model?.image === true && (options.imageMode = true);
1123
1134
  assert(!(options.imageMode && !model.image), 'Image mode is not supported.');
@@ -1128,43 +1139,44 @@ const promptGemini = async (aiId, content, options = {}) => {
1128
1139
  }
1129
1140
  const { systemPrompt: systemInstruction, history, prompt }
1130
1141
  = await buildPrompts(model, content, { ...options, flavor: GEMINI });
1131
- const _client = client.getGenerativeModel({
1132
- model: options.model, systemInstruction,
1133
- ...model?.tools && !options.jsonMode
1134
- && options.model !== GEMINI_20_FLASH_EXP ? (options.tools ?? {
1135
- tools: [
1136
- // @todo: Gemini will failed when using these tools together.
1137
- // https://ai.google.dev/gemini-api/docs/function-calling
1138
- // { codeExecution: {} },
1139
- // { googleSearch: {} },
1140
- {
1141
- functionDeclarations: (
1142
- await toolsGemini()
1143
- ).map(x => x.def)
1144
- },
1145
- ],
1146
- toolConfig: { functionCallingConfig: { mode: 'AUTO' } },
1147
- }) : {},
1142
+ const chat = client.chats.create({
1143
+ model: options.model, history, config: {
1144
+ responseMimeType: options.jsonMode ? MIME_JSON : MIME_TEXT,
1145
+ systemInstruction, responseModalities: options.modalities || (
1146
+ options.imageMode ? [TEXT, IMAGE] : undefined
1147
+ ), ...options?.config || {}, ...model?.tools && !options.jsonMode
1148
+ && options.model !== GEMINI_20_FLASH_EXP ? (options.tools ?? {
1149
+ tools: [
1150
+ // @todo: Gemini will failed when using these tools together.
1151
+ // https://ai.google.dev/gemini-api/docs/function-calling
1152
+ // { codeExecution: {} },
1153
+ // { googleSearch: {} },
1154
+ {
1155
+ functionDeclarations: (
1156
+ await toolsGemini()
1157
+ ).map(x => x.def)
1158
+ },
1159
+ ], toolConfig: { functionCallingConfig: { mode: 'AUTO' } },
1160
+ }) : {},
1161
+ },
1148
1162
  });
1149
- // https://github.com/google/generative-ai-js/blob/main/samples/node/advanced-chat.js
1150
- // Google's bug: history is not allowed while using inline_data?
1151
- const chat = _client.startChat({ history, ...generationConfig(options) });
1152
- const resp = await chat.sendMessageStream(prompt);
1153
- for await (const chunk of resp.stream) {
1154
- const deltaImages = [];
1155
- chunk.candidates[0].content?.parts?.filter(
1156
- x => x?.inlineData?.mimeType === png
1157
- )?.map?.(x => {
1158
- deltaImages.push(x.inlineData);
1159
- images.push(x.inlineData);
1163
+ const resp = await chat.sendMessageStream({ message: prompt });
1164
+ for await (const chunk of resp) {
1165
+ event = chunk.candidates[0];
1166
+ let [deltaText, deltaImages] = ['', []];
1167
+ event?.content?.parts?.map(x => {
1168
+ if (x.text) { deltaText = x.text; }
1169
+ else if (x.functionCall) { functionCalls.push(x); }
1170
+ else if (x.inlineData?.mimeType === png) {
1171
+ deltaImages.push(x.inlineData);
1172
+ images.push(x.inlineData);
1173
+ }
1160
1174
  });
1161
- functionCalls || (functionCalls = chunk.functionCalls);
1162
1175
  const rfc = packGeminiReferences(
1163
- chunk.candidates[0]?.groundingMetadata?.groundingChunks,
1164
- chunk.candidates[0]?.groundingMetadata?.groundingSupports
1176
+ event?.groundingMetadata?.groundingChunks,
1177
+ event?.groundingMetadata?.groundingSupports
1165
1178
  );
1166
1179
  rfc && (references = rfc);
1167
- let deltaText = chunk?.text?.() || '';
1168
1180
  options.result && deltaText
1169
1181
  && (responded = responded || (deltaText = `\n\n${deltaText}`));
1170
1182
  result += deltaText;
@@ -1173,10 +1185,6 @@ const promptGemini = async (aiId, content, options = {}) => {
1173
1185
  images: options.delta ? deltaImages : images,
1174
1186
  }, options);
1175
1187
  }
1176
- const _resp = await resp.response;
1177
- functionCalls = (
1178
- functionCalls() || _resp.functionCalls() || []
1179
- ).map(x => ({ functionCall: x }));
1180
1188
  const { toolsResult, toolsResponse } = await handleToolsCall(
1181
1189
  { role: MODEL, parts: functionCalls },
1182
1190
  { ...options, result, flavor: GEMINI }
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1999.1.19",
4
+ "version": "1999.1.21",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
@@ -19,7 +19,7 @@ const manifest = {
19
19
  "xlsx": "https://cdn.sheetjs.com/xlsx-0.20.1/xlsx-0.20.1.tgz"
20
20
  },
21
21
  "dependencies": {
22
- "file-type": "^20.4.0",
22
+ "file-type": "^20.4.1",
23
23
  "mathjs": "^14.3.1",
24
24
  "uuid": "^11.1.0"
25
25
  },
@@ -28,14 +28,15 @@ const manifest = {
28
28
  "@anthropic-ai/vertex-sdk": "^0.7.0",
29
29
  "@ffmpeg-installer/ffmpeg": "^1.1.0",
30
30
  "@ffprobe-installer/ffprobe": "^2.1.2",
31
- "@google-cloud/speech": "^6.7.1",
31
+ "@google-cloud/speech": "^7.0.0",
32
32
  "@google-cloud/storage": "^7.15.2",
33
- "@google-cloud/text-to-speech": "^5.8.1",
34
- "@google-cloud/vision": "^4.3.3",
33
+ "@google-cloud/text-to-speech": "^6.0.0",
34
+ "@google-cloud/vision": "^5.0.0",
35
+ "@google/genai": "^0.4.0",
35
36
  "@google/generative-ai": "^0.24.0",
36
37
  "@mozilla/readability": "github:mozilla/readability",
37
- "@sentry/node": "^9.5.0",
38
- "@sentry/profiling-node": "^9.5.0",
38
+ "@sentry/node": "^9.6.0",
39
+ "@sentry/profiling-node": "^9.6.0",
39
40
  "acme-client": "^5.4.0",
40
41
  "browserify-fs": "^1.0.0",
41
42
  "buffer": "^6.0.3",
@@ -53,9 +54,9 @@ const manifest = {
53
54
  "node-mailjet": "^6.0.8",
54
55
  "node-polyfill-webpack-plugin": "^4.1.0",
55
56
  "office-text-extractor": "^3.0.3",
56
- "openai": "^4.87.3",
57
- "pdfjs-dist": "^4.10.38",
58
- "pg": "^8.14.0",
57
+ "openai": "^4.87.4",
58
+ "pdfjs-dist": "^5.0.375",
59
+ "pg": "^8.14.1",
59
60
  "pgvector": "^0.2.0",
60
61
  "ping": "^0.4.4",
61
62
  "process": "^0.11.10",
@@ -68,7 +69,7 @@ const manifest = {
68
69
  "url": "github:Leask/node-url",
69
70
  "webpack-cli": "^6.0.1",
70
71
  "whisper-node": "^1.1.1",
71
- "wrangler": "^3.114.1",
72
+ "wrangler": "^4.1.0",
72
73
  "xlsx": "https://cdn.sheetjs.com/xlsx-0.20.1/xlsx-0.20.1.tgz",
73
74
  "youtube-transcript": "^1.2.1"
74
75
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1999.1.19",
4
+ "version": "1999.1.21",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
@@ -30,7 +30,7 @@
30
30
  "xlsx": "https://cdn.sheetjs.com/xlsx-0.20.1/xlsx-0.20.1.tgz"
31
31
  },
32
32
  "dependencies": {
33
- "file-type": "^20.4.0",
33
+ "file-type": "^20.4.1",
34
34
  "mathjs": "^14.3.1",
35
35
  "uuid": "^11.1.0"
36
36
  },
@@ -39,14 +39,15 @@
39
39
  "@anthropic-ai/vertex-sdk": "^0.7.0",
40
40
  "@ffmpeg-installer/ffmpeg": "^1.1.0",
41
41
  "@ffprobe-installer/ffprobe": "^2.1.2",
42
- "@google-cloud/speech": "^6.7.1",
42
+ "@google-cloud/speech": "^7.0.0",
43
43
  "@google-cloud/storage": "^7.15.2",
44
- "@google-cloud/text-to-speech": "^5.8.1",
45
- "@google-cloud/vision": "^4.3.3",
44
+ "@google-cloud/text-to-speech": "^6.0.0",
45
+ "@google-cloud/vision": "^5.0.0",
46
+ "@google/genai": "^0.4.0",
46
47
  "@google/generative-ai": "^0.24.0",
47
48
  "@mozilla/readability": "github:mozilla/readability",
48
- "@sentry/node": "^9.5.0",
49
- "@sentry/profiling-node": "^9.5.0",
49
+ "@sentry/node": "^9.6.0",
50
+ "@sentry/profiling-node": "^9.6.0",
50
51
  "acme-client": "^5.4.0",
51
52
  "browserify-fs": "^1.0.0",
52
53
  "buffer": "^6.0.3",
@@ -64,9 +65,9 @@
64
65
  "node-mailjet": "^6.0.8",
65
66
  "node-polyfill-webpack-plugin": "^4.1.0",
66
67
  "office-text-extractor": "^3.0.3",
67
- "openai": "^4.87.3",
68
- "pdfjs-dist": "^4.10.38",
69
- "pg": "^8.14.0",
68
+ "openai": "^4.87.4",
69
+ "pdfjs-dist": "^5.0.375",
70
+ "pg": "^8.14.1",
70
71
  "pgvector": "^0.2.0",
71
72
  "ping": "^0.4.4",
72
73
  "process": "^0.11.10",
@@ -79,7 +80,7 @@
79
80
  "url": "github:Leask/node-url",
80
81
  "webpack-cli": "^6.0.1",
81
82
  "whisper-node": "^1.1.1",
82
- "wrangler": "^3.114.1",
83
+ "wrangler": "^4.1.0",
83
84
  "xlsx": "https://cdn.sheetjs.com/xlsx-0.20.1/xlsx-0.20.1.tgz",
84
85
  "youtube-transcript": "^1.2.1"
85
86
  }