utilitas 1999.1.97 → 1999.1.98

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -48,7 +48,7 @@ const [
48
48
  OPENAI, GEMINI, OLLAMA, GEMINI_25_FLASH, NOVA, DEEPSEEK_R1, MD_CODE,
49
49
  CLOUD_SONNET_45, AUDIO, WAV, ATTACHMENTS, OPENAI_VOICE,
50
50
  GPT_REASONING_EFFORT, THINK, THINK_STR, THINK_END, TOOLS_STR, TOOLS_END,
51
- TOOLS, TEXT, OK, FUNC, GPT_51, GPT_51_CODEX, GEMMA_3_27B, ANTHROPIC, v8k, ais,
51
+ TOOLS, TEXT, OK, FUNC, GPT_51, GPT_51_CODEX, GPT_5_IMAGE, GEMMA_3_27B, ANTHROPIC, v8k, ais,
52
52
  MAX_TOOL_RECURSION, LOG, name, user, system, assistant, MODEL, JSON_OBJECT,
53
53
  tokenSafeRatio, CONTENT_IS_REQUIRED, OPENAI_HI_RES_SIZE, k, kT, m, minute,
54
54
  hour, gb, trimTailing, GEMINI_25_FLASH_IMAGE, IMAGE, JINA, JINA_DEEPSEARCH,
@@ -59,11 +59,11 @@ const [
59
59
  'nova', 'deepseek-r1', '```', 'anthropic/claude-sonnet-4.5', 'audio',
60
60
  'wav', '[ATTACHMENTS]', 'OPENAI_VOICE', 'medium', 'think', '<think>',
61
61
  '</think>', '<tools>', '</tools>', 'tools', 'text', 'OK', 'function',
62
- 'gpt-5.1', 'gpt-5.1-codex', 'gemma3:27b', 'Anthropic', 7680 * 4320, [],
63
- 30, { log: true }, 'Alan', 'user', { role: 'system' }, 'assistant',
64
- 'model', 'json_object', 1.1, 'Content is required.', 2048 * 2048,
65
- x => 1024 * x, x => 1000 * x, x => 1024 * 1024 * x, x => 60 * x,
66
- x => 60 * 60 * x, x => 1024 * 1024 * 1024 * x,
62
+ 'gpt-5.1', 'gpt-5.1-codex', 'gpt-5-image', 'gemma3:27b', 'Anthropic',
63
+ 7680 * 4320, [], 30, { log: true }, 'Alan', 'user', { role: 'system' },
64
+ 'assistant', 'model', 'json_object', 1.1, 'Content is required.',
65
+ 2048 * 2048, x => 1024 * x, x => 1000 * x, x => 1024 * 1024 * x,
66
+ x => 60 * x, x => 60 * 60 * x, x => 1024 * 1024 * 1024 * x,
67
67
  x => x.replace(/[\.\s]*$/, ''), 'gemini-2.5-flash-image', 'image',
68
68
  'Jina', 'jina-deepsearch-v1', 'gemini-3-pro-preview', 'SiliconFlow',
69
69
  'Pro/deepseek-ai/DeepSeek-R1', 768 * 768,
@@ -126,20 +126,21 @@ const DEEPSEEK_R1_RULES = {
126
126
  // https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models
127
127
  // https://openrouter.ai/docs/features/multimodal/audio (only support input audio)
128
128
  const MODELS = {
129
- [GPT_51]: { ...OPENAI_RULES, fast: true },
130
- [GPT_51_CODEX]: { ...OPENAI_RULES },
131
- [GEMINI_25_FLASH_IMAGE]: {
132
- ...GEMINI_RULES, contextWindow: k(64), maxOutputTokens: k(32),
133
- fast: true, image: true,
129
+ [GEMINI_30_PRO]: {
130
+ ...GEMINI_RULES, contextWindow: m(1), maxOutputTokens: k(64),
131
+ reasoning: true, tools: true,
134
132
  },
135
133
  [GEMINI_25_FLASH]: {
136
134
  ...GEMINI_RULES, contextWindow: m(1), maxOutputTokens: k(64),
137
135
  fast: true, reasoning: true, tools: true,
138
136
  },
139
- [GEMINI_30_PRO]: {
140
- ...GEMINI_RULES, contextWindow: m(1), maxOutputTokens: k(64),
141
- reasoning: true, tools: true,
137
+ [GEMINI_25_FLASH_IMAGE]: {
138
+ ...GEMINI_RULES, contextWindow: k(64), maxOutputTokens: k(32),
139
+ fast: true, image: true,
142
140
  },
141
+ [GPT_51]: { ...OPENAI_RULES, fast: true },
142
+ [GPT_51_CODEX]: { ...OPENAI_RULES },
143
+ [GPT_5_IMAGE]: { ...OPENAI_RULES, image: true },
143
144
  [GEMMA_3_27B]: {
144
145
  contextWindow: kT(128), maxOutputTokens: k(8),
145
146
  imageCostTokens: 256, maxImageSize: 896 * 896,
@@ -255,7 +256,7 @@ const tokenRatioByCharacters = Math.max(
255
256
  );
256
257
 
257
258
 
258
- let tokeniser;
259
+ let tokeniser, _tools;
259
260
 
260
261
  const unifyProvider = provider => {
261
262
  assert(provider = (provider || '').trim(), 'AI provider is required.');
@@ -326,7 +327,7 @@ const tools = [
326
327
  },
327
328
  ];
328
329
 
329
- const toolsOpenAI = async () => {
330
+ const packTools = async () => {
330
331
  const _tools = [];
331
332
  for (const t of tools) {
332
333
  (t.depend ? await t.depend() : true) ? _tools.push(t) : log(
@@ -383,6 +384,7 @@ const init = async (options = {}) => {
383
384
  }
384
385
  assert(models.length,
385
386
  `Model name or description is required for provider: ${provider}.`);
387
+ _tools || (_tools = await packTools());
386
388
  switch (provider) {
387
389
  case JINA:
388
390
  assertApiKey(provider, options);
@@ -670,13 +672,17 @@ const packResp = async (resp, options) => {
670
672
  }
671
673
  txt = txt.join('\n');
672
674
  !options?.delta && !options?.processing && (txt = txt.trim());
675
+ print(options);
673
676
  return {
674
677
  ...text(txt), ...options?.jsonMode ? { json } : {},
675
678
  ...references ? { references } : {},
676
679
  ...referencesMarkdown ? { referencesMarkdown } : {},
677
680
  ...audio ? { audio } : {}, ...images?.length ? { images } : {},
678
681
  processing: !!options?.processing,
679
- model: options?.model,
682
+ model: [
683
+ options.provider, options?.router?.provider,
684
+ options?.router?.model || options?.model,
685
+ ].filter(x => x).join('/'),
680
686
  };
681
687
  };
682
688
 
@@ -790,11 +796,12 @@ const promptOpenAI = async (aiId, content, options = {}) => {
790
796
  let { provider, client, model } = await getAi(aiId);
791
797
  let [
792
798
  result, resultAudio, resultImages, resultReasoning, event, resultTools,
793
- responded, modalities, source, reasoningEnd
799
+ responded, modalities, source, reasoningEnd, reasoning_details,
794
800
  ] = [
795
801
  options.result ?? '', Buffer.alloc(0), [], '', null, [], false,
796
- options.modalities, model?.source, false
802
+ options.modalities, model?.source, false, []
797
803
  ];
804
+ options.provider = provider;
798
805
  options.model = options.model || model.name;
799
806
  const { history }
800
807
  = await buildPrompts(MODELS[options.model], content, options);
@@ -816,14 +823,16 @@ const promptOpenAI = async (aiId, content, options = {}) => {
816
823
  modalities?.find?.(x => x === AUDIO)
817
824
  && { voice: DEFAULT_MODELS[OPENAI_VOICE], format: 'pcm16' }
818
825
  ), ...model?.tools && !googleImageMode ? {
819
- tools: options.tools ?? (await toolsOpenAI()).map(x => x.def),
820
- tool_choice: 'auto',
826
+ tools: options.tools ?? _tools.map(x => x.def), tool_choice: 'auto',
821
827
  } : {},
822
828
  store: true, stream: true,
823
829
  reasoning_effort: options.reasoning_effort,
824
830
  });
825
831
  for await (event of resp) {
826
832
  // print(JSON.stringify(event, null, 2));
833
+ event?.provider && event?.model && (options.router = {
834
+ provider: event.provider, model: event.model,
835
+ });
827
836
  event = event?.choices?.[0] || {};
828
837
  const delta = event.delta || {};
829
838
  let [delteReasoning, deltaText] = [
@@ -836,6 +845,8 @@ const promptOpenAI = async (aiId, content, options = {}) => {
836
845
  const deltaAudio = delta.audio?.data ? await convert(
837
846
  delta.audio.data, { input: BASE64, expected: BUFFER }
838
847
  ) : Buffer.alloc(0);
848
+ delta?.reasoning_details?.length
849
+ && reasoning_details.push(...delta.reasoning_details);
839
850
  for (const x of delta.tool_calls || []) {
840
851
  let curFunc = resultTools.find(y => y.index === x.index);
841
852
  curFunc || (resultTools.push(curFunc = {}));
@@ -870,7 +881,9 @@ const promptOpenAI = async (aiId, content, options = {}) => {
870
881
  }, options);
871
882
  }
872
883
  event = {
873
- role: assistant, text: result, tool_calls: resultTools,
884
+ role: assistant, text: result,
885
+ ...reasoning_details?.length ? { reasoning_details } : {},
886
+ tool_calls: resultTools,
874
887
  ...resultImages.length ? { images: resultImages } : {},
875
888
  ...resultAudio.length ? { audio: { data: resultAudio } } : {},
876
889
  };
package/lib/gen.mjs CHANGED
@@ -10,11 +10,11 @@ const _NEED = ['OpenAI', '@google/genai'];
10
10
  const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
11
11
  const [
12
12
  clients, OPENAI, GEMINI, BASE64, FILE, BUFFER, ERROR_GENERATING,
13
- IMAGEN_MODEL, OPENAI_MODEL, VEO_MODEL,
13
+ IMAGEN_MODEL, OPENAI_MODEL, VEO_MODEL, IMAGEN_UPSCALE_MODEL,
14
14
  ] = [
15
15
  {}, 'OPENAI', 'GEMINI', 'BASE64', 'FILE', 'BUFFER',
16
16
  'Error generating media.', 'imagen-4.0-ultra-generate-001',
17
- 'gpt-image-1', 'veo-3.1-generate-preview',
17
+ 'gpt-image-1', 'veo-3.1-generate-preview', 'imagen-4.0-upscale-preview',
18
18
  ];
19
19
 
20
20
  const init = async (options) => {
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1999.1.97",
4
+ "version": "1999.1.98",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1999.1.97",
4
+ "version": "1999.1.98",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",