halbot 1993.2.59 → 1993.2.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.mjs CHANGED
@@ -1,4 +1,4 @@
1
- import { alan, bot, image, shot, speech, utilitas } from 'utilitas';
1
+ import { alan, bot, image, web, speech, utilitas } from 'utilitas';
2
2
 
3
3
  await utilitas.locate(utilitas.__(import.meta.url, 'package.json'));
4
4
  const skillPath = utilitas.__(import.meta.url, 'skills');
@@ -10,7 +10,6 @@ const init = async (options = {}) => {
10
10
  const info = bot.lines([
11
11
  `[${bot.EMOJI_BOT} ${pkg.title}](${pkg.homepage})`, pkg.description
12
12
  ]);
13
- let embedding;
14
13
  // init ai engines
15
14
  // use AI vision, AI stt if ChatGPT or Gemini is enabled
16
15
  if (options.openaiApiKey || options.googleApiKey) {
@@ -21,55 +20,74 @@ const init = async (options = {}) => {
21
20
  // use openai embedding, dall-e, tts if openai is enabled
22
21
  if (options.openaiApiKey) {
23
22
  const apiKey = { apiKey: options.openaiApiKey };
24
- const ai = await alan.init({
25
- id: 'ChatGPT', provider: 'OPENAI', model: options?.chatGptModel,
26
- ...apiKey, priority: options?.chatGptPriority || 0, ...options
23
+ await alan.init({
24
+ provider: 'OPENAI', model: options.openaiModel || '*',
25
+ ...apiKey, priority: options.openaiPriority, ...options
27
26
  });
28
- embedding = ai.embedding;
29
27
  await image.init(apiKey);
30
28
  await speech.init({ ...apiKey, provider: 'OPENAI', ...speechOptions });
31
29
  _speech.tts = speech.tts;
32
30
  }
33
31
  // use gemini embedding if gemini is enabled and chatgpt is not enabled
34
32
  // use google tts if google api key is ready
35
- if (options?.googleApiKey) {
33
+ if (options.googleApiKey) {
36
34
  const apiKey = { apiKey: options.googleApiKey };
37
- const ai = await alan.init({
38
- id: 'Gemini', provider: 'GEMINI', model: options?.geminiModel,
39
- ...apiKey, priority: options?.geminiPriority || 1, ...options
35
+ await alan.init({
36
+ provider: 'GEMINI', model: options.geminiModel || '*',
37
+ ...apiKey, priority: options.geminiPriority, ...options
40
38
  });
41
- embedding || (embedding = ai.embedding);
42
39
  if (!_speech.tts) {
43
40
  await speech.init({
44
- ...apiKey, provider: 'GOOGLE', ...speechOptions,
41
+ provider: 'GOOGLE', ...apiKey, ...speechOptions,
45
42
  });
46
43
  _speech.tts = speech.tts;
47
44
  }
48
- options?.googleCx && await shot.initSearch({
49
- apiKey: options.googleApiKey, cx: options.googleCx
45
+ options.googleCx && await web.initSearch({
46
+ provider: 'GOOGLE', ...apiKey, cx: options.googleCx
47
+ });
48
+ }
49
+ if (options.anthropicApiKey) {
50
+ await alan.init({
51
+ provider: 'ANTHROPIC', model: options.anthropicModel || '*',
52
+ apiKey: options.anthropicApiKey,
53
+ priority: options.anthropicPriority, ...options
54
+ });
55
+ }
56
+ if (options.anthropicCredentials && options.anthropicProjectId) {
57
+ await alan.init({
58
+ provider: 'VERTEX ANTHROPIC', model: options.anthropicModel || '*',
59
+ credentials: options.anthropicCredentials,
60
+ projectId: options.anthropicProjectId,
61
+ priority: options.anthropicPriority, ...options
62
+ });
63
+ }
64
+ if (options.jinaApiKey) {
65
+ const apiKey = { apiKey: options.jinaApiKey };
66
+ await alan.init({
67
+ provider: 'JINA', model: options.jinaModel || '*',
68
+ ...apiKey, priority: options.jinaPriority, ...options
50
69
  });
70
+ await web.initSearch({ provider: 'Jina', ...apiKey });
51
71
  }
52
- if (options?.anthropicApiKey
53
- || (options?.anthropicCredentials && options?.anthropicProjectId)) {
72
+ if (options.azureApiKey && options.azureEndpoint) {
54
73
  await alan.init({
55
- id: 'Claude', provider: 'VERTEX ANTHROPIC', model: options?.anthropicModel,
56
- apiKey: options?.anthropicApiKey,
57
- credentials: options?.anthropicCredentials,
58
- projectId: options?.anthropicProjectId,
59
- priority: options?.anthropicPriority || 2, ...options
74
+ provider: 'AZURE', model: options.azureModel,
75
+ apiKey: options.azureApiKey, priority: options.azurePriority,
76
+ baseURL: options.azureEndpoint, ...options
60
77
  });
61
78
  }
62
- if (options?.azureApiKey && options?.azureEndpoint) {
79
+ if (options.azureOpenaiApiKey && options.azureOpenaiEndpoint) {
63
80
  await alan.init({
64
- id: 'Azure', provider: 'AZURE', model: options?.azureModel,
65
- apiKey: options?.azureApiKey, priority: options?.azurePriority || 3,
66
- baseURL: options?.azureEndpoint, ...options
81
+ provider: 'AZURE OPENAI', model: options.azureOpenaiModel,
82
+ apiKey: options.azureOpenaiApiKey,
83
+ priority: options.azureOpenaiPriority,
84
+ endpoint: options.azureOpenaiEndpoint, ...options
67
85
  });
68
86
  }
69
87
  if (options?.ollamaEnabled || options?.ollamaEndpoint) {
70
88
  await alan.init({
71
- id: 'Ollama', provider: 'OLLAMA', model: options?.ollamaModel,
72
- priority: options?.ollamaPriority || 99,
89
+ provider: 'OLLAMA', model: options?.ollamaModel || '*',
90
+ priority: options?.ollamaPriority,
73
91
  host: options?.ollamaEndpoint, ...options
74
92
  });
75
93
  }
@@ -88,7 +106,8 @@ const init = async (options = {}) => {
88
106
  chatType: options?.chatType,
89
107
  cmds: options?.cmds,
90
108
  database: options?.storage?.client && options?.storage,
91
- embedding, supportedMimeTypes,
109
+ embedding: ais.find(x => x.embedding)?.embedding,
110
+ supportedMimeTypes,
92
111
  hello: options?.hello,
93
112
  help: options?.help,
94
113
  homeGroup: options?.homeGroup,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "halbot",
3
3
  "description": "Just another `ChatGPT` / `Gemini` / `Ollama` Telegram bob, which is simple design, easy to use, extendable and fun.",
4
- "version": "1993.2.59",
4
+ "version": "1993.2.61",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/halbot",
7
7
  "type": "module",
@@ -33,10 +33,10 @@
33
33
  "@anthropic-ai/vertex-sdk": "^0.7.0",
34
34
  "@ffmpeg-installer/ffmpeg": "^1.1.0",
35
35
  "@ffprobe-installer/ffprobe": "^2.1.2",
36
- "@google-cloud/speech": "^6.7.1",
37
- "@google-cloud/text-to-speech": "^5.8.1",
38
- "@google-cloud/vision": "^4.3.3",
39
- "@google/generative-ai": "^0.24.0",
36
+ "@google-cloud/speech": "^7.0.0",
37
+ "@google-cloud/text-to-speech": "^6.0.0",
38
+ "@google-cloud/vision": "^5.0.0",
39
+ "@google/genai": "^0.4.0",
40
40
  "@mozilla/readability": "^0.6.0",
41
41
  "fluent-ffmpeg": "^2.1.3",
42
42
  "ioredis": "^5.6.0",
@@ -46,12 +46,12 @@
46
46
  "mime": "^4.0.6",
47
47
  "mysql2": "^3.13.0",
48
48
  "office-text-extractor": "^3.0.3",
49
- "openai": "^4.87.3",
50
- "pg": "^8.14.0",
49
+ "openai": "^4.87.4",
50
+ "pg": "^8.14.1",
51
51
  "pgvector": "^0.2.0",
52
52
  "telegraf": "^4.16.3",
53
53
  "tesseract.js": "^6.0.0",
54
- "utilitas": "^1999.1.10",
54
+ "utilitas": "^1999.1.24",
55
55
  "youtube-transcript": "^1.2.1"
56
56
  }
57
57
  }
@@ -1,6 +1,6 @@
1
1
  import { alan, bot, uoid, utilitas } from 'utilitas';
2
2
 
3
- const [EMIJI_FINISH, END, NEW, THREAD, CLR] = ['☑️', '❎', '✨', '🧵', '🆑'];
3
+ const [EMIJI_FINISH, END, NEW, THREAD] = ['☑️', '❎', '✨', '🧵'];
4
4
 
5
5
  const [CREATED, SWITCHED] = [
6
6
  `${NEW} Thread created: `, `${EMIJI_FINISH} Thread switched: `
@@ -8,14 +8,14 @@ const [CREATED, SWITCHED] = [
8
8
 
9
9
  // moved to help and configs
10
10
  const keyboards = [[
11
- { text: `/clear ${CLR}` },
11
+ { text: `/ai ${bot.EMOJI_BOT}` },
12
+ { text: `/new ${NEW}` },
12
13
  { text: `/end ${END}` },
13
14
  { text: `/list ${THREAD}` },
14
- { text: `/new ${NEW}` },
15
15
  ], [
16
16
  { text: '/polish ❇️' },
17
- { text: '/translate 🇨🇳' },
18
- { text: '/translate 🇺🇸' },
17
+ { text: '/to 🇨🇳' },
18
+ { text: '/to 🇺🇸' },
19
19
  ], [
20
20
  { text: '/help 🛟' },
21
21
  { text: '/set --tts=🔇' },
@@ -91,7 +91,7 @@ const action = async (ctx, next) => {
91
91
  ...options || {},
92
92
  ...options?.buttons ? {} : (options?.keyboards || { keyboards }),
93
93
  });
94
- const sendList = async (names, lastMsgId) => {
94
+ const listThreads = async (names, lastMsgId) => {
95
95
  lastMsgId = lastMsgId || ctx.update?.callback_query?.message?.message_id;
96
96
  const message = `${THREAD} Thread${ctx.session.sessions.length > 0 ? 's' : ''}:`;
97
97
  const buttons = ctx.session.sessions.map((x, i) => {
@@ -121,7 +121,7 @@ const action = async (ctx, next) => {
121
121
  + `${getLabel(findSession(ctx.session.sessionId))}\``);
122
122
  await ctx.clear();
123
123
  break;
124
- case 'clearall':
124
+ case 'endall':
125
125
  ctx.carry.threadInfo.push(`🔄 All threads have been cleared.`);
126
126
  resetSessions();
127
127
  break;
@@ -129,14 +129,14 @@ const action = async (ctx, next) => {
129
129
  resetSession();
130
130
  break;
131
131
  case 'list':
132
- const resp = await sendList();
132
+ const resp = await listThreads();
133
133
  utilitas.ignoreErrFunc(async () => {
134
134
  const sNames = await alan.analyzeSessions(
135
135
  ctx.session.sessions.filter(
136
136
  x => (x.labelUpdatedAt || 0) < x.touchedAt
137
137
  ).map(x => x.id), { ignoreRequest: bot.HELLO }
138
138
  );
139
- return await sendList(sNames, resp[0]?.message_id);
139
+ return await listThreads(sNames, resp[0]?.message_id);
140
140
  }, { log: true });
141
141
  return resp;
142
142
  case 'end':
@@ -157,7 +157,7 @@ const action = async (ctx, next) => {
157
157
  case 'switch':
158
158
  ctx.session.sessionId = utilitas.trim(ctx.cmd.args);
159
159
  await switchSession();
160
- await sendList();
160
+ await listThreads();
161
161
  return await switched();
162
162
  case 'factory':
163
163
  case 'reset':
@@ -168,18 +168,18 @@ const action = async (ctx, next) => {
168
168
  await next();
169
169
  };
170
170
 
171
- export const { name, run, priority, func, help, cmds, cmdx } = {
171
+ export const { name, run, priority, func, help, cmdx } = {
172
172
  name: 'Thread',
173
173
  run: true,
174
174
  priority: -8845,
175
175
  func: action,
176
176
  help: 'Thread management.',
177
177
  cmdx: {
178
- new: 'Create a new thread.',
179
- end: 'End current thread.',
180
- switch: 'Switch to a thread. Usage: /switch `THREAD_ID`.',
181
178
  clear: 'Clear current thread.',
182
- clearall: 'Clear all threads.',
179
+ end: 'End current thread.',
180
+ endall: 'End all threads.',
183
181
  list: 'List all threads.',
182
+ new: 'Create a new thread.',
183
+ switch: 'Switch to a thread. Usage: /switch `THREAD_ID`.',
184
184
  },
185
185
  };
@@ -1,20 +1,26 @@
1
1
  import { alan, bot, utilitas } from 'utilitas';
2
2
 
3
- const NAME_HACK = {
4
- 'ChatGPT': '⚛️', 'Gemini': '♊️', 'Claude': '✴️', 'Ollama': '🦙', 'Azure': '☁️',
5
- };
6
-
7
- const NAME_HACK_REVERSE = utilitas.reverseKeyValues(NAME_HACK);
8
- const AI_CMD = '/set --ai=';
3
+ const EMIJI_FINISH = '☑️';
9
4
 
10
5
  const action = async (ctx, next) => {
11
6
  const ais = await alan.getAi(null, { all: true });
12
- if (ctx.carry?.keyboards?.length && !ctx.carry.keyboards.find(
13
- x => x.find(y => y.text.includes(AI_CMD))
14
- )) {
15
- ctx.carry.keyboards.unshift(ais.slice(0, 3).map(
16
- x => ({ text: `${AI_CMD}${NAME_HACK[x.id] || x.id}` })
17
- ));
7
+ const listAIs = async () => {
8
+ const lastMessageId = ctx.update?.callback_query?.message?.message_id;
9
+ const message = `${bot.EMOJI_BOT} AI${ais.length > 0 ? 's' : ''}:`;
10
+ const buttons = ais.map(x => ({
11
+ label: `${ctx.session.config?.ai === x.id
12
+ ? `${EMIJI_FINISH} ` : ''}${x.name}: ${x.features}`,
13
+ text: `/ai ${x.id}`,
14
+ }));
15
+ return await ctx.ok(message, { lastMessageId, buttons });
16
+ };
17
+ switch (ctx.cmd?.cmd) {
18
+ case 'ai':
19
+ const aiId = utilitas.trim(ctx.cmd.args);
20
+ if (!aiId || aiId === bot.EMOJI_BOT) { return await listAIs(); }
21
+ assert(ais.find(x => x.id === aiId), 'No AI engine matched.');
22
+ ctx.session.config.ai = aiId;
23
+ return await listAIs();
18
24
  }
19
25
  switch (ctx.session.config?.ai) {
20
26
  case '@': ctx.selectedAi = ais.map(x => x.id); break;
@@ -52,37 +58,22 @@ const action = async (ctx, next) => {
52
58
  await next();
53
59
  };
54
60
 
55
- const validateAi = async val => {
56
- NAME_HACK_REVERSE[val] && (val = NAME_HACK_REVERSE[val]);
57
- for (let name of [...(
58
- await alan.getAi(null, { all: true })
59
- ).map(x => x.id), '', '@']) {
60
- if (utilitas.insensitiveCompare(val, name)) { return name; }
61
- }
62
- utilitas.throwError('No AI engine matched.');
63
- };
64
-
65
- export const { name, run, priority, func, help, args } = {
66
- name: 'Engine',
61
+ export const { name, run, priority, func, help, args, cmdx } = {
62
+ name: 'AI',
67
63
  run: true,
68
64
  priority: 10,
69
65
  func: action,
70
66
  help: bot.lines([
71
67
  '¶ Set initial prompt to the AI engine.',
72
68
  "Tip 1: Set `hello=''` to reset to default initial prompt.",
73
- '¶ Select between AI engines.',
74
- "Tip 2: Set `ai=''` to use default AI engine.",
75
- 'Tip 3: Set `ai=@` to use all AI engines simultaneously.',
76
69
  ]),
77
70
  args: {
78
71
  hello: {
79
72
  type: 'string', short: 's', default: 'You are a helpful assistant.',
80
73
  desc: "Change initial prompt: /set --hello 'Bonjour!'",
81
74
  },
82
- ai: {
83
- type: 'string', short: 'a', default: '',
84
- desc: "`(ChatGPT, Gemini, Claude, Azure, Ollama, @)` Select AI engine.",
85
- validate: validateAi,
86
- },
87
75
  },
76
+ cmdx: {
77
+ ai: 'List all available AIs.',
78
+ }
88
79
  };
@@ -37,7 +37,7 @@ const action = async (ctx, next) => {
37
37
  ctx.result = bot.map(cnf);
38
38
  ctx.hello();
39
39
  break;
40
- case 'translate': promptTranslate(ctx, ctx.cmd.args || ctx.session.config?.lang || ctx._.lang); break;
40
+ case 'to': promptTranslate(ctx, ctx.cmd.args || ctx.session.config?.lang || ctx._.lang); break;
41
41
  case 'polish': promptPolish(ctx); break;
42
42
  case 'toen': promptTranslate(ctx, 'English'); break;
43
43
  case 'tofr': promptTranslate(ctx, 'French'); break;
@@ -1,16 +1,12 @@
1
- import { alan, bot, storage, utilitas } from 'utilitas';
1
+ import { alan, utilitas } from 'utilitas';
2
2
 
3
3
  const onProgress = { onProgress: true };
4
+ const LN2 = '\n\n';
4
5
  const [joinL1, joinL2] = [a => a.join(LN2), a => a.join(LN2)];
5
6
  const log = content => utilitas.log(content, import.meta.url);
6
- const enrich = m => m ? ` ${BOTS[m.split(':')[0]]
7
- ? `| ${BOTS[m.split(':')[0]]} ${m}` : `(${m})`}` : '';
8
- const [BOT, BOTS, LN2] = [`${bot.EMOJI_BOT} `, {
9
- ChatGPT: '⚛️', Gemini: '♊️', Claude: '✴️', Ollama: '🦙', 'deepseek-r1': '🐳',
10
- Azure: '☁️',
11
- }, '\n\n'];
12
7
 
13
8
  const action = async (ctx, next) => {
9
+ const ais = await alan.getAi(null, { all: true });
14
10
  if (!ctx.prompt && !ctx.carry.attachments.length) { return await next(); }
15
11
  const [YOU, msgs, tts, rsm, pms, extra, lock]
16
12
  = [`${ctx.avatar} You:`, {}, {}, {}, [], { buttons: [] }, 1000 * 5];
@@ -26,7 +22,7 @@ const action = async (ctx, next) => {
26
22
  const content = source[n] || '';
27
23
  pure.push(content);
28
24
  packed.push(joinL2([...options?.tts ? [] : [
29
- `${BOTS[n]} ${n}${enrich(rsm[n])}:`
25
+ `${ais.find(x => x.id === n).name}:`
30
26
  ], content]));
31
27
  });
32
28
  return pure.join('').trim().length ? joinL1(packed) : '';
@@ -69,7 +65,6 @@ const action = async (ctx, next) => {
69
65
  tts[ai] = ctx.selectedAi.length === 1
70
66
  && !msgs[ai].split('\n').some(x => /^\s*```/.test(x))
71
67
  ? resp.spoken : '';
72
- rsm[ai] = resp.model;
73
68
  for (let img of resp?.images || []) {
74
69
  await ctx.image(img.data, { caption: `🎨 by ${resp.model}` });
75
70
  await ctx.timeout();