halbot 1993.2.10 → 1993.2.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -3,7 +3,7 @@
3
3
  [![MIT licensed](https://img.shields.io/badge/license-MIT-blue)](./LICENSE)
4
4
  [![Node.js Package](https://github.com/Leask/halbot/actions/workflows/npm-publish.yml/badge.svg)](https://github.com/Leask/halbot/actions/workflows/npm-publish.yml)
5
5
 
6
- Just another `Gemini` / `ChatGPT` / `Claude` / `Mistral (by ollama)` Telegram bob, which is simple design, easy to use, extendable and fun.
6
+ Just another `Gemini` / `ChatGPT` / `Claude` / `Ollama` Telegram bob, which is simple design, easy to use, extendable and fun.
7
7
 
8
8
  Live demo, click to watch on YouTube:
9
9
 
@@ -29,7 +29,7 @@ alt="Halbot live demo" width="240" height="180" border="10" /></a>
29
29
  - [ChatGPT](https://openai.com/blog/chatgpt) (`OpenAI` API key required)
30
30
  - [Gemini](https://ai.google.dev/gemini-api/docs) (Google `Gemini` API Key required)
31
31
  - [Claude](https://www.anthropic.com/api) (`Anthropic` API Key required)
32
- - [Mistral](https://mistral.ai/) (Install [Ollama](https://github.com/jmorganca/ollama) and enable `Mistral`)
32
+ - [Ollama](https://github.com/jmorganca/ollama) (Install `Ollama` and serve your model)
33
33
  - Speech-to-Text (`OpenAI` or `Google Cloud` API key required, or your own engine)
34
34
  - Text-to-Speech (`OpenAI` or `Google Cloud` API key required, or your own engine)
35
35
  - Text-to-Image by DALL·E (`OpenAI` API key required, or your own engine)
@@ -105,15 +105,15 @@ All supported configuration fields:
105
105
  // OPTIONAL, integer, default: 2.
106
106
  "claudePriority": "[[Custom Claude Priority]]",
107
107
 
108
- // Set some of these fields if you need Mistral features.
108
+ // Set some of these fields if you need Ollama features.
109
109
  // OPTIONAL, boolean.
110
- "mistralEnabled": "[[Enable Mistral hosted by Ollama]]",
110
+ "ollamaEnabled": "[[Enable Ollama API]]",
111
111
  // OPTIONAL, string.
112
- "mistralEndpoint": "[[Custom Mistral API endpoint]]",
113
- // OPTIONAL, string, default: "Mistral" (Mistral 7B).
114
- "mistralModel": "[[Custom Mistral Model ID]]",
112
+ "ollamaEndpoint": "[[Custom Ollama API endpoint]]",
113
+ // OPTIONAL, string, default: "DeepSeek-R1" (DeepSeek-R1 7B).
114
+ "ollamaModel": "[[Custom Ollama Model ID]]",
115
115
  // OPTIONAL, integer, default: 3.
116
- "mistralPriority": "[[Custom Mistral Priority]]",
116
+ "ollamaPriority": "[[Custom Ollama Priority]]",
117
117
 
118
118
  // OPTIONAL, undefined || array of string.
119
119
  // To open the bot to PUBLIC, DO NOT set this field;
package/index.mjs CHANGED
@@ -77,16 +77,16 @@ const init = async (options) => {
77
77
  model: options?.claudeModel,
78
78
  };
79
79
  }
80
- if (options?.mistralEnabled || options?.mistralEndpoint) {
80
+ if (options?.ollamaEnabled || options?.ollamaEndpoint) {
81
81
  await alan.init({
82
- provider: 'OLLAMA', endpoint: options?.mistralEndpoint,
82
+ provider: 'OLLAMA', endpoint: options?.ollamaEndpoint,
83
83
  });
84
- ai['Mistral'] = {
85
- engine: 'OLLAMA', priority: options?.mistralPriority || 3,
84
+ ai['Ollama'] = {
85
+ engine: 'OLLAMA', priority: options?.ollamaPriority || 3,
86
86
  };
87
87
  engines['OLLAMA'] = {
88
88
  // only support custom model while prompting
89
- model: options?.mistralModel,
89
+ model: options?.ollamaModel || alan.DEFAULT_MODELS['OLLAMA'],
90
90
  };
91
91
  }
92
92
  assert(utilitas.countKeys(ai), 'No AI provider is configured.');
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "halbot",
3
- "description": "Just another `ChatGPT` / `Gemini` / `Mistral (by ollama)` Telegram bob, which is simple design, easy to use, extendable and fun.",
4
- "version": "1993.2.10",
3
+ "description": "Just another `ChatGPT` / `Gemini` / `Ollama` Telegram bob, which is simple design, easy to use, extendable and fun.",
4
+ "version": "1993.2.12",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/halbot",
7
7
  "type": "module",
@@ -52,7 +52,7 @@
52
52
  "pgvector": "^0.2.0",
53
53
  "telegraf": "^4.16.3",
54
54
  "tesseract.js": "^6.0.0",
55
- "utilitas": "^1998.1.15",
55
+ "utilitas": "^1998.1.16",
56
56
  "youtube-transcript": "^1.2.1"
57
57
  }
58
58
  }
@@ -1,7 +1,7 @@
1
1
  import { alan, bot, utilitas } from 'utilitas';
2
2
 
3
3
  const NAME_HACK = {
4
- 'ChatGPT': '⚛️', 'Gemini': '♊️', 'Claude': '✴️', 'Mistral': 'Ⓜ️',
4
+ 'ChatGPT': '⚛️', 'Gemini': '♊️', 'Claude': '✴️', 'Ollama': '🦙',
5
5
  };
6
6
 
7
7
  const NAME_HACK_REVERSE = utilitas.reverseKeyValues(NAME_HACK);
@@ -92,7 +92,7 @@ export const { name, run, priority, func, help, args } = {
92
92
  },
93
93
  ai: {
94
94
  type: 'string', short: 'a', default: '',
95
- desc: "`(ChatGPT, Gemini, Claude, Mistral, @)` Select AI engine.",
95
+ desc: "`(ChatGPT, Gemini, Claude, Ollama, @)` Select AI engine.",
96
96
  validate: validateAi,
97
97
  },
98
98
  render: {
@@ -29,11 +29,11 @@ const action = async (ctx, next) => {
29
29
  ctx.selectedAi = ['Claude'];
30
30
  ctx.hello(ctx.cmd.args);
31
31
  break;
32
- case 'mistral':
33
- if (!utilitas.insensitiveHas(allAi, 'mistral')) {
34
- return await ctx.er('Mistral is not available.');
32
+ case 'ollama':
33
+ if (!utilitas.insensitiveHas(allAi, 'ollama')) {
34
+ return await ctx.er('Ollama is not available.');
35
35
  }
36
- ctx.selectedAi = ['Mistral'];
36
+ ctx.selectedAi = ['Ollama'];
37
37
  ctx.hello(ctx.cmd.args);
38
38
  break;
39
39
  }
@@ -53,6 +53,6 @@ export const { name, run, priority, func, help, cmds } = {
53
53
  gemini: 'Use ♊️ Gemini temporary: /gemini Say hello to Gemini!',
54
54
  chatgpt: 'Use ⚛️ ChatGPT temporary: /chatgpt Say hello to ChatGPT!',
55
55
  claude: 'Use ✴️ Claude temporary: /claude Say hello to Claude!',
56
- mistral: 'Use Ⓜ️ Mistral temporary: /mistral Say hello to Mistral!',
56
+ ollama: 'Use 🦙 Ollama temporary: /ollama Say hello to Ollama!',
57
57
  },
58
58
  };
@@ -2,12 +2,16 @@ import { alan, bot, utilitas } from 'utilitas';
2
2
 
3
3
  const onProgress = { onProgress: true };
4
4
  const [joinL1, joinL2] = [a => a.join(LN2), a => a.join(LN2)];
5
- const enrich = name => name; // Human readable name, eg: 'VERTEX' => 'Gemini'
6
5
  const log = content => utilitas.log(content, import.meta.url);
7
6
  const [BOT, BOTS, LN2] = [`${bot.EMOJI_BOT} `, {
8
- ChatGPT: '⚛️', Gemini: '♊️', Claude: '✴️', Mistral: 'Ⓜ️',
7
+ ChatGPT: '⚛️', Gemini: '♊️', Claude: '✴️', Ollama: '🦙', 'DeepSeek-R1': '🐬',
9
8
  }, '\n\n'];
10
9
 
10
+ const enrich = (name, ctx) => {
11
+ const m = ctx._.ai[name]?.model;
12
+ return m ? ` | ${BOTS[m] ? `${BOTS[m]} ` : ''}${m}` : '';
13
+ };
14
+
11
15
  const action = async (ctx, next) => {
12
16
  if (!ctx.prompt && !ctx.carry.attachments.length) { return await next(); }
13
17
  const [YOU, msgs, tts, pms, extra]
@@ -25,7 +29,7 @@ const action = async (ctx, next) => {
25
29
  pure.push(content);
26
30
  packed.push(joinL2([
27
31
  ...(ctx.multiAi || !ctx.isDefaultAi(n) || said) && !options?.tts
28
- ? [`${BOTS[n]} ${enrich(n)}:`] : [], content
32
+ ? [`${BOTS[n]} ${n}${enrich(n, ctx)}:`] : [], content
29
33
  ]));
30
34
  });
31
35
  return options?.tts && !pure.join('').trim().length ? '' : joinL1(packed);