utilitas 2001.1.86 โ†’ 2001.1.88

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -54,7 +54,8 @@ const [
54
54
  GEMINI_30_PRO, GEMINI_30_FLASH, IMAGEN_4_ULTRA, VEO_31, IMAGEN_4_UPSCALE,
55
55
  ERROR_GENERATING, GEMINI_25_FLASH_TTS, GEMINI_25_PRO_TTS, wav, PNG_EXT,
56
56
  GPT_4O_MIMI_TTS, GPT_4O_TRANSCRIBE, INVALID_AUDIO, OGG_EXT, ELLIPSIS,
57
- TOP_LIMIT, ATTACHMENT, PROCESSING, CURSOR, LN1, LN2, TOP, DEEPSEEK
57
+ TOP_LIMIT, ATTACHMENT, PROCESSING, CURSOR, LN1, LN2, TOP, DEEPSEEK,
58
+ DEEP_RESEARCH_PRO,
58
59
  ] = [
59
60
  'OpenAI', 'Google', 'Ollama', 'nova', 'deepseek-3.2-speciale', '```',
60
61
  'claude-opus-4.5', 'audio', 'wav', 'OPENAI_VOICE', 'medium', 'think',
@@ -73,7 +74,7 @@ const [
73
74
  'gemini-2.5-flash-preview-tts', 'gemini-2.5-pro-preview-tts', 'wav',
74
75
  'png', 'gpt-4o-mini-tts', 'gpt-4o-transcribe', 'Invalid audio data.',
75
76
  'ogg', '...', 3, 'ATTACHMENT', { processing: true }, ' โ–ˆ', '\n\n',
76
- '\n\n\n', 'top', 'DeepSeek',
77
+ '\n\n\n', 'top', 'DeepSeek', 'deep-research-pro-preview-12-2025',
77
78
  ];
78
79
 
79
80
  const [joinL1, joinL2]
@@ -102,14 +103,14 @@ const MODEL_ICONS = {
102
103
  };
103
104
 
104
105
  const FEATURE_ICONS = {
105
- audio: '๐Ÿ”Š', deepsearch: '๐Ÿ”', fast: 'โšก๏ธ', hearing: '๐Ÿ‘‚', hidden: '๐Ÿ™ˆ',
106
- image: '๐ŸŽจ', json: '๐Ÿ“Š', reasoning: '๐Ÿง ', tools: '๐Ÿงฐ', video: '๐ŸŽฌ',
106
+ audio: '๐Ÿ”Š', 'deep-research': '๐Ÿ”', fast: 'โšก๏ธ', hearing: '๐Ÿ‘‚', hidden: '๐Ÿ™ˆ',
107
+ image: '๐ŸŽจ', reasoning: '๐Ÿง ', structured: '๐Ÿ“Š', tools: '๐Ÿงฐ', video: '๐ŸŽฌ',
107
108
  vision: '๐Ÿ‘๏ธ', // finetune: '๐Ÿ”ง',
108
109
  };
109
110
 
110
111
  const GEMINI_RULES = {
111
112
  source: GOOGLE, contextWindow: m(1.05), maxOutputTokens: k(65.5),
112
- hearing: true, json: true, reasoning: true, tools: true, vision: true,
113
+ hearing: true, reasoning: true, structured: true, tools: true, vision: true,
113
114
  supportedMimeTypes: [
114
115
  MIME_PNG, MIME_JPEG, MIME_MOV, MIME_MPEG, MIME_MP4, MIME_MPG, MIME_AVI,
115
116
  MIME_WMV, MIME_MPEGPS, MIME_FLV, MIME_PDF, MIME_AAC, MIME_FLAC,
@@ -120,7 +121,7 @@ const GEMINI_RULES = {
120
121
 
121
122
  const OPENAI_RULES = {
122
123
  source: OPENAI, contextWindow: k(400), maxOutputTokens: k(128),
123
- hearing: true, json: true, reasoning: true, tools: true, vision: true,
124
+ hearing: true, reasoning: true, structured: true, tools: true, vision: true,
124
125
  supportedMimeTypes: [
125
126
  MIME_PNG, MIME_JPEG, MIME_GIF, MIME_WEBP, MIME_PDF, MIME_WAV
126
127
  ], defaultProvider: OPENROUTER,
@@ -128,7 +129,7 @@ const OPENAI_RULES = {
128
129
 
129
130
  const DEEPSEEK_32_RULES = {
130
131
  source: DEEPSEEK, contextWindow: k(163.8), maxOutputTokens: k(65.5),
131
- json: true, tools: true, reasoning: true,
132
+ structured: true, tools: true, reasoning: true,
132
133
  };
133
134
 
134
135
  // https://platform.openai.com/docs/models
@@ -137,7 +138,7 @@ const DEEPSEEK_32_RULES = {
137
138
  const MODELS = {
138
139
  // fast and balanced models
139
140
  [GEMINI_30_FLASH]: { // https://gemini.google.com/app/c680748b3307790b
140
- ...GEMINI_RULES, fast: true, json: false, // issue with json output via OpenRouter
141
+ ...GEMINI_RULES, fast: true, structured: false, // issue with json output via OpenRouter
141
142
  },
142
143
  // strong and fast
143
144
  [GPT_52]: { ...OPENAI_RULES, fast: true },
@@ -169,7 +170,7 @@ const MODELS = {
169
170
  [GPT_51_CODEX]: { ...OPENAI_RULES },
170
171
  [CLOUD_OPUS_45]: {
171
172
  source: ANTHROPIC, contextWindow: k(200), maxOutputTokens: k(64),
172
- json: true, reasoning: true, tools: true, vision: true,
173
+ reasoning: true, structured: true, tools: true, vision: true,
173
174
  supportedMimeTypes: [
174
175
  MIME_TEXT, MIME_PNG, MIME_JPEG, MIME_GIF, MIME_WEBP, MIME_PDF,
175
176
  ], defaultProvider: OPENROUTER,
@@ -191,10 +192,14 @@ const MODELS = {
191
192
  source: OPENAI, maxInputTokens: 0,
192
193
  hearing: true, fast: true, hidden: true, defaultProvider: OPENAI,
193
194
  },
194
- // models with deepsearch capabilities
195
+ // agents with deep-research capabilities
196
+ [DEEP_RESEARCH_PRO]: {
197
+ source: GOOGLE, contextWindow: m(1.05), maxOutputTokens: k(65.5),
198
+ 'deep-research': true, reasoning: true, defaultProvider: GOOGLE,
199
+ },
195
200
  [JINA_DEEPSEARCH]: { // @todo: parse more details from results, eg: "reed urls".
196
201
  maxInputTokens: Infinity, attachmentTokenCost: 0,
197
- deepsearch: true, json: true, reasoning: true, vision: true,
202
+ 'deep-research': true, reasoning: true,
198
203
  supportedMimeTypes: [MIME_PNG, MIME_JPEG, MIME_TEXT, MIME_WEBP, MIME_PDF],
199
204
  defaultProvider: JINA,
200
205
  },
@@ -204,7 +209,7 @@ const MODELS = {
204
209
  // best local model
205
210
  [GEMMA_3_27B]: {
206
211
  source: GOOGLE, contextWindow: k(128), maxOutputTokens: k(8),
207
- fast: true, json: true, vision: true,
212
+ fast: true, structured: true, vision: true,
208
213
  supportedMimeTypes: [MIME_PNG, MIME_JPEG, MIME_GIF],
209
214
  defaultProvider: OLLAMA,
210
215
  },
@@ -252,7 +257,7 @@ for (const n in MODELS) {
252
257
  // }
253
258
  // // for other features, if any model supports it, then AUTO supports it
254
259
  // for (const key of [
255
- // 'json', 'reasoning', 'tools', 'vision', 'fast', 'deepsearch', 'image',
260
+ // 'structured', 'reasoning', 'tools', 'vision', 'fast', 'deep-research', 'image',
256
261
  // ]) {
257
262
  // MODELS[AUTO][key] = MODELS[AUTO][key] || MODELS[n][key];
258
263
  // }
@@ -516,7 +521,7 @@ const packAi = (ais, options = {}) => {
516
521
 
517
522
  const getAi = async (id, options = {}) => {
518
523
  options?.select || (options.select = {});
519
- options?.jsonMode && (options.select.json = true);
524
+ options?.jsonMode && (options.select.structured = true);
520
525
  if (id) {
521
526
  const ai = ais.find(x => x.id === id);
522
527
  assert(ai, `AI not found: ${id}.`);
@@ -687,7 +692,7 @@ const packResp = async (resp, options) => {
687
692
  // "title": "ๅœจ็ทšๆ™‚้˜- ็›ฎๅ‰ๆ™‚้–“- ็ทšไธŠๆ™‚้˜- ๆ™‚้˜็ทšไธŠ - ้ฌง้˜",
688
693
  // "url": "https://naozhong.tw/shijian/",
689
694
  // "content": "- [้ฌง้˜](https://naozhong.tw/)\n- [่จˆๆ™‚ๅ™จ](https://naozhong.tw/jishiqi/)\n- [็ขผ้Œถ](https://naozhong.tw/miaobiao/)\n- [ๆ™‚้–“](https://naozhong.tw/shijian/)\n\n# ็พๅœจๆ™‚้–“\n\nๅŠ ๅ…ฅ\n\n- [็ทจ่ผฏ](javascript:;)\n- [็งป่‡ณ้ ‚็ซฏ](javascript:;)\n- [ไธŠ็งป](javascript:;)\n- [ไธ‹็งป](javascript:;)\n- [ๅˆช้™ค](javascript:;)\n\n# ๆœ€ๅธธ็”จ\n\n| | |\n| --- | --- |\n| [ๅฐๅŒ—](https://naozhong.tw/shijian/%E5%8F%B0%E5%8C%97/) | 10:09:14 |\n| [ๅŒ—ไบฌ๏ผŒไธญๅœ‹](https://naozhong.tw/shijian/%E5%8C%97%E4%BA%AC-%E4%B8%AD%E5%9C%8B/) | 10:09:14 |\n| [ไธŠๆตท๏ผŒไธญๅœ‹](https://naozhong.tw/shijian/%E4%B8%8A%E6%B5%B7-%E4%B8%AD%E5%9C%8B/) | 10:09:14 |\n| [็ƒ้ญฏๆœจ้ฝŠ๏ผŒไธญๅœ‹](https://naozhong.tw/shijian/%E7%83%8F%E9%AD%AF%",
690
- // "dateTime": "2025-03-13 06:48:01" // jina deepsearch only
695
+ // "dateTime": "2025-03-13 06:48:01" // jina deep-research only
691
696
  // }
692
697
  // },
693
698
  // ];
@@ -732,7 +737,7 @@ const packResp = async (resp, options) => {
732
737
  ...audio ? { audio } : {}, ...images?.length ? { images } : {},
733
738
  processing: !!options?.processing,
734
739
  model: packModelId([
735
- options.provider, options?.router?.provider,
740
+ options?.provider, options?.router?.provider,
736
741
  options?.router?.model || options?.model,
737
742
  ]),
738
743
  };
@@ -750,8 +755,8 @@ const packModelId = (model_reference, options = {}) => {
750
755
  };
751
756
 
752
757
  const buildPrompts = async (model, input, options = {}) => {
753
- assert(!(options.jsonMode && !model?.json),
754
- `This model does not support JSON output: ${model.name}`);
758
+ assert(!(options.jsonMode && !model?.structured),
759
+ `This model does not support structured output: ${model.name}`);
755
760
  assert(!(options.reasoning && !model?.reasoning),
756
761
  `This model does not support reasoning: ${model.name}`);
757
762
  options.attachments = (await Promise.all((
@@ -1117,6 +1122,60 @@ const promptGoogle = async (aiId, prompt, options = {}) => {
1117
1122
  }, model: packModelId([provider, M.source, M.name]),
1118
1123
  };
1119
1124
  }
1125
+ } else if (M?.['deep-research']) {
1126
+ const pkgOptions = { ...options || {}, provider, model: M.name };
1127
+ let interactionId, last_event_id, isComplete = false;
1128
+ let [thought, text, result] = ['', '', ''];
1129
+ var resp;
1130
+ // Helper to handle the event logic
1131
+ const handleStream = async (stream) => {
1132
+ for await (const chunk of stream) {
1133
+ chunk.event_type === 'interaction.start'
1134
+ && (interactionId = chunk.interaction.id);
1135
+ chunk.event_id && (last_event_id = chunk.event_id);
1136
+ let deltaThought = '', deltaText = '', delta = '';
1137
+ if (chunk.event_type === 'content.delta') {
1138
+ if (chunk.delta.type === 'text') {
1139
+ thought && (deltaThought = `${THINK_END}\n\n`);
1140
+ text += (deltaText = chunk.delta.text);
1141
+ } else if (chunk.delta.type === 'thought_summary') {
1142
+ deltaThought = chunk.delta.content.text;
1143
+ thought || (deltaThought = `${THINK_STR}\n${deltaThought}`);
1144
+ thought += deltaThought;
1145
+ }
1146
+ result += (delta = deltaThought + deltaText);
1147
+ await streamResp({
1148
+ text: options.delta ? delta : result,
1149
+ }, pkgOptions);
1150
+ } else if (chunk.event_type === 'interaction.complete') {
1151
+ isComplete = true;
1152
+ }
1153
+ }
1154
+ };
1155
+ // 1. Start the task with streaming
1156
+ resp = await client.interactions.create({
1157
+ input: prompt, agent: M.name, background: true, store: true,
1158
+ stream: true, // tools: [],
1159
+ agent_config: { type: 'deep-research', thinking_summaries: 'auto' },
1160
+ previous_interaction_id: options?.previous_interaction_id,
1161
+ });
1162
+ await handleStream(resp);
1163
+ // 2. Reconnect Loop
1164
+ while (!isComplete && interactionId) {
1165
+ log(`[DRS] Reconnecting to interaction ${interactionId} from event ${last_event_id}...`);
1166
+ try {
1167
+ resp = await client.interactions.get(interactionId, {
1168
+ stream: true, last_event_id,
1169
+ });
1170
+ await handleStream(resp);
1171
+ } catch (e) {
1172
+ log('[DRS] Reconnection failed, retrying in 2s...');
1173
+ await timeout(2000);
1174
+ }
1175
+ }
1176
+ // 3. Return response
1177
+ options?.raw || (resp = await packResp({ text: result }, pkgOptions));
1178
+ return resp;
1120
1179
  } else {
1121
1180
  throwError('Unsupported model.');
1122
1181
  }
@@ -1499,7 +1558,7 @@ const analyzeSessions = async (sessionIds, options) => {
1499
1558
  if (sm.length) { sses[ids[i]] = sm; }
1500
1559
  }
1501
1560
  const ai = await getAi(options?.aiId, {
1502
- jsonMode: true, simple: true, select: { json: true, fast: true },
1561
+ jsonMode: true, simple: true, select: { structured: true, fast: true },
1503
1562
  ...options || {}
1504
1563
  });
1505
1564
  const pmt = options?.prompt || (
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "2001.1.86",
4
+ "version": "2001.1.88",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "2001.1.86",
4
+ "version": "2001.1.88",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",