utilitas 1998.1.14 → 1998.1.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -17,15 +17,15 @@ const _NEED = [
17
17
  const [
18
18
  OPENAI, GEMINI, CHATGPT, OPENAI_EMBEDDING, GEMINI_EMEDDING, OPENAI_TRAINING,
19
19
  OLLAMA, CLAUDE, GPT_4O_MINI, GPT_4O, GPT_O1, GPT_O3_MINI, GEMINI_20_FLASH,
20
- GEMINI_20_FLASH_THINKING, NOVA, EMBEDDING_001, MISTRAL, CHATGPT_REASONING,
21
- TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE, CLAUDE_35_SONNET,
22
- CLAUDE_35_HAIKU, AUDIO, WAV, CHATGPT_MINI, ATTACHMENTS, CHAT, OPENAI_VOICE,
23
- MEDIUM, LOW, HIGH,
20
+ GEMINI_20_FLASH_THINKING, NOVA, EMBEDDING_001, DEEPSEEK_R1,
21
+ CHATGPT_REASONING, TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE,
22
+ CLAUDE_35_SONNET, CLAUDE_35_HAIKU, AUDIO, WAV, CHATGPT_MINI, ATTACHMENTS,
23
+ CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH,
24
24
  ] = [
25
25
  'OPENAI', 'GEMINI', 'CHATGPT', 'OPENAI_EMBEDDING', 'GEMINI_EMEDDING',
26
26
  'OPENAI_TRAINING', 'OLLAMA', 'CLAUDE', 'gpt-4o-mini', 'gpt-4o',
27
27
  'o1', 'o3-mini', 'gemini-2.0-flash-exp',
28
- 'gemini-2.0-flash-thinking-exp', 'nova', 'embedding-001', 'mistral',
28
+ 'gemini-2.0-flash-thinking-exp', 'nova', 'embedding-001', 'DeepSeek-R1',
29
29
  'CHATGPT_REASONING', 'text-embedding-3-small', 'text-embedding-3-large',
30
30
  'claude-3-5-sonnet-latest', 'claude-3-5-haiku-latest', 'audio', 'wav',
31
31
  'CHATGPT_MINI', '[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium',
@@ -82,7 +82,7 @@ const DEFAULT_MODELS = {
82
82
  [CLAUDE]: CLAUDE_35_SONNET,
83
83
  [GEMINI_EMEDDING]: EMBEDDING_001,
84
84
  [GEMINI]: GEMINI_20_FLASH,
85
- [OLLAMA]: MISTRAL,
85
+ [OLLAMA]: DEEPSEEK_R1,
86
86
  [OPENAI_EMBEDDING]: TEXT_EMBEDDING_3_SMALL,
87
87
  [OPENAI_TRAINING]: GPT_4O_MINI, // https://platform.openai.com/docs/guides/fine-tuning
88
88
  [OPENAI_VOICE]: NOVA,
@@ -230,10 +230,14 @@ const MODELS = {
230
230
  png, jpeg,
231
231
  ],
232
232
  },
233
- [MISTRAL]: {
233
+ [DEEPSEEK_R1]: {
234
234
  contextWindow: 128000,
235
+ maxOutputTokens: 32768,
235
236
  requestLimitsRPM: Infinity,
236
237
  tokenLimitsTPM: Infinity,
238
+ json: false,
239
+ vision: false,
240
+ reasoning: true,
237
241
  },
238
242
  [TEXT_EMBEDDING_3_SMALL]: {
239
243
  contextWindow: 8191,
@@ -363,11 +367,9 @@ const init = async (options) => {
363
367
  }
364
368
  break;
365
369
  case OLLAMA:
366
- const Ollama = await need('ollama');
367
370
  clients[provider] = {
368
- // @todo: this feature does not work yet.
369
- // client: new Ollama({ host: options?.endpoint })
370
- client: Ollama,
371
+ client: new (await need('ollama', { raw: true })).Ollama(options),
372
+ model: options?.model || DEFAULT_MODELS[OLLAMA],
371
373
  };
372
374
  break;
373
375
  default:
@@ -430,6 +432,14 @@ const buildGptMessage = (content, options) => {
430
432
  return message;
431
433
  };
432
434
 
435
+ const buildOllamaMessage = (content, options) => {
436
+ const message = String.isString(content) ? {
437
+ role: options?.role || user, content,
438
+ } : content;
439
+ assertContent(message.content);
440
+ return message;
441
+ };
442
+
433
443
  const buildGeminiParts = (text, attachments) => {
434
444
  // Gemini API does not allow empty text, even you prompt with attachments.
435
445
  const message = [...text?.length || attachments?.length ? [{
@@ -521,28 +531,38 @@ const packResp = async (resp, options) => {
521
531
  // ]
522
532
  // };
523
533
  let [richText, referencesMarkdown] = [null, null];
524
- if (!options?.jsonMode && references?.segments?.length
525
- && references?.links?.length && !options?.processing) {
526
- richText = txt;
527
- for (let i = references.segments.length - 1; i >= 0; i--) {
528
- let idx = richText.indexOf(references.segments[i].text);
529
- if (idx < 0) { continue; }
530
- idx += references.segments[i].text.length;
531
- richText = richText.slice(0, idx)
532
- + references.segments[i].indices.map(y => ` (${y + 1})`).join('')
533
- + richText.slice(idx);
534
+ if (!options?.jsonMode) {
535
+ if (!options?.processing
536
+ && references?.segments?.length && references?.links?.length) {
537
+ richText = txt;
538
+ for (let i = references.segments.length - 1; i >= 0; i--) {
539
+ let idx = richText.indexOf(references.segments[i].text);
540
+ if (idx < 0) { continue; }
541
+ idx += references.segments[i].text.length;
542
+ richText = richText.slice(0, idx)
543
+ + references.segments[i].indices.map(y => ` (${y + 1})`).join('')
544
+ + richText.slice(idx);
545
+ }
546
+ referencesMarkdown = 'References:\n\n' + references.links.map((x, i) => {
547
+ return `${i + 1}. [${x.title}](${x.uri})`;
548
+ }).join('\n');
549
+ }
550
+ const lines = (richText || txt).split('\n');
551
+ const indexOfEnd = lines.indexOf('</think>');
552
+ if (lines[0] === '<think>' && indexOfEnd !== -1) {
553
+ lines[0] = '```think';
554
+ lines[indexOfEnd] = '```';
555
+ richText = lines.join('\n');
534
556
  }
535
- referencesMarkdown = 'References:\n\n' + references.links.map((x, i) => {
536
- return `${i + 1}. [${x.title}](${x.uri})`;
537
- }).join('\n');
538
557
  }
539
558
  return {
540
559
  ...text(txt), ...options?.jsonMode && !(
541
560
  options?.delta && options?.processing
542
561
  ) ? { json: parseJson(txt) } : {},
543
- ...richText && referencesMarkdown ? { richText, referencesMarkdown } : {},
544
- ...audio ? { audio, audioMimeType: options?.audioMimeType } : {},
562
+ ...richText ? { richText } : {},
545
563
  ...references ? { references } : {},
564
+ ...referencesMarkdown ? { referencesMarkdown } : {},
565
+ ...audio ? { audio, audioMimeType: options?.audioMimeType } : {},
546
566
  model: options?.model,
547
567
  };
548
568
  };
@@ -629,13 +649,13 @@ const promptChatGPT = async (content, options = {}) => {
629
649
  };
630
650
 
631
651
  const promptOllama = async (content, options = {}) => {
632
- const { client } = await getOllamaClient(options);
652
+ const { client, model } = await getOllamaClient(options);
633
653
  // https://github.com/ollama/ollama-js
634
654
  // https://github.com/jmorganca/ollama/blob/main/examples/typescript-simplechat/client.ts
635
- options.model = options?.model || DEFAULT_MODELS[OLLAMA];
655
+ options.model = options?.model || model;
636
656
  const resp = await client.chat({
637
657
  model: options.model, stream: true,
638
- ...messages([...options?.messages || [], buildGptMessage(content)]),
658
+ ...messages([...options?.messages || [], buildOllamaMessage(content)]),
639
659
  })
640
660
  let [chunk, result] = [null, ''];
641
661
  for await (chunk of resp) {
@@ -962,7 +982,7 @@ const talk = async (input, options) => {
962
982
  const session = await getSession(sessionId, { engine, ...options });
963
983
  let [resp, sys, messages, msgBuilder] = [null, [], [], null];
964
984
  switch (engine) {
965
- case CHATGPT: case OLLAMA:
985
+ case CHATGPT:
966
986
  sys.push(buildGptMessage(session.systemPrompt, { role: system }));
967
987
  msgBuilder = () => {
968
988
  messages = [];
@@ -995,6 +1015,17 @@ const talk = async (input, options) => {
995
1015
  };
996
1016
  msgBuilder()
997
1017
  break;
1018
+ case OLLAMA:
1019
+ sys.push(buildOllamaMessage(session.systemPrompt, { role: system }));
1020
+ msgBuilder = () => {
1021
+ messages = [];
1022
+ session.messages.map(x => {
1023
+ messages.push(buildOllamaMessage(x.request, { role: user }));
1024
+ messages.push(buildOllamaMessage(x.response, { role: assistant }));
1025
+ });
1026
+ };
1027
+ msgBuilder()
1028
+ break;
998
1029
  default:
999
1030
  throwError(`Invalid AI engine: '${engine}'.`);
1000
1031
  }
@@ -1160,7 +1191,7 @@ export {
1160
1191
  GPT_4O,
1161
1192
  GPT_O3_MINI,
1162
1193
  GPT_O1,
1163
- MISTRAL,
1194
+ DEEPSEEK_R1,
1164
1195
  MODELS,
1165
1196
  OPENAI_VOICE,
1166
1197
  RETRIEVAL,
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.1.14",
4
+ "version": "1998.1.16",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.1.14",
4
+ "version": "1998.1.16",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",