halbot 1992.1.6 → 1992.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.mjs CHANGED
@@ -37,38 +37,52 @@ const init = async (options) => {
37
37
  let embedding;
38
38
  // init ai engines
39
39
  if (options?.openaiApiKey || options?.chatGptApiKey) {
40
- const apiKey = { apiKey: options?.openaiApiKey || options?.chatGptApiKey };
41
- await alan.init({ ...apiKey, ...options, provider: 'OPENAI' });
40
+ await alan.init({
41
+ provider: 'OPENAI',
42
+ apiKey: options?.openaiApiKey || options?.chatGptApiKey,
43
+ ...options || {},
44
+ });
42
45
  ai['ChatGPT'] = {
43
- engine: 'CHATGPT', priority: options?.chatGptPriority || 0, // ASSISTANT
46
+ engine: 'CHATGPT', priority: options?.chatGptPriority || 0,
44
47
  };
45
- engines['CHATGPT'] = { // ASSISTANT
48
+ engines['CHATGPT'] = {
46
49
  // only support custom model while prompting
47
50
  model: options?.chatGptModel,
48
51
  };
49
52
  }
50
- if (options?.googleCredentials && options?.googleProject) {
53
+ if (options?.googleApiKey) {
51
54
  await alan.init({
52
- provider: 'VERTEX',
53
- credentials: options.googleCredentials,
54
- project: options.googleProject,
55
- // only support custom model while initiating
56
- model: options?.geminiModel,
55
+ provider: 'GEMINI', apiKey: options?.googleApiKey,
56
+ model: options?.geminiModel, // only support custom model while initiating
57
+ ...options || {},
57
58
  });
58
59
  ai['Gemini'] = {
59
- engine: 'VERTEX', priority: options?.geminiPriority || 1,
60
+ engine: 'GEMINI', priority: options?.geminiPriority || 1,
60
61
  };
61
- engines['VERTEX'] = {
62
+ engines['GEMINI'] = {
62
63
  // save for reference not for prompting
63
64
  model: options?.geminiModel,
64
65
  };
65
66
  }
67
+ if (options?.claudeApiKey) {
68
+ await alan.init({
69
+ provider: 'CLAUDE', apiKey: options?.claudeApiKey,
70
+ ...options || {},
71
+ });
72
+ ai['Claude'] = {
73
+ engine: 'CLAUDE', priority: options?.claudePriority || 2,
74
+ };
75
+ engines['CLAUDE'] = {
76
+ // only support custom model while prompting
77
+ model: options?.claudeModel,
78
+ };
79
+ }
66
80
  if (options?.mistralEnabled || options?.mistralEndpoint) {
67
81
  await alan.init({
68
82
  provider: 'OLLAMA', endpoint: options?.mistralEndpoint,
69
83
  });
70
84
  ai['Mistral'] = {
71
- engine: 'OLLAMA', priority: options?.mistralPriority || 2,
85
+ engine: 'OLLAMA', priority: options?.mistralPriority || 3,
72
86
  };
73
87
  engines['OLLAMA'] = {
74
88
  // only support custom model while prompting
@@ -86,7 +100,7 @@ const init = async (options) => {
86
100
  } else if (options?.googleApiKey) {
87
101
  const apiKey = { apiKey: options.googleApiKey };
88
102
  await speech.init({ ...apiKey, provider: 'GOOGLE', ...speechOptions });
89
- embedding = alan.createVertexEmbedding;
103
+ embedding = alan.createGeminiEmbedding;
90
104
  }
91
105
  // init vision engine
92
106
  const supportedMimeTypes = new Set(Object.values(engines).map(
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "halbot",
3
3
  "description": "Just another `ChatGPT` / `Gemini` / `Mistral (by ollama)` Telegram bob, which is simple design, easy to use, extendable and fun.",
4
- "version": "1992.1.6",
4
+ "version": "1992.1.8",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/halbot",
7
7
  "type": "module",
@@ -29,29 +29,29 @@
29
29
  "url": "https://github.com/Leask/halbot.git"
30
30
  },
31
31
  "dependencies": {
32
+ "@anthropic-ai/sdk": "^0.33.1",
32
33
  "@ffmpeg-installer/ffmpeg": "^1.1.0",
33
34
  "@ffprobe-installer/ffprobe": "^2.1.2",
34
- "@google-cloud/aiplatform": "^3.26.0",
35
35
  "@google-cloud/speech": "^6.7.0",
36
- "@google-cloud/text-to-speech": "^5.3.0",
37
- "@google-cloud/vertexai": "^1.4.1",
38
- "@google-cloud/vision": "^4.3.1",
36
+ "@google-cloud/text-to-speech": "^5.7.0",
37
+ "@google-cloud/vision": "^4.3.2",
38
+ "@google/generative-ai": "^0.21.0",
39
39
  "@mozilla/readability": "^0.5.0",
40
- "csv-parse": "^5.5.6",
40
+ "csv-parse": "^5.6.0",
41
41
  "fluent-ffmpeg": "^2.1.3",
42
- "ioredis": "^5.4.1",
43
- "js-tiktoken": "^1.0.14",
44
- "jsdom": "^24.1.1",
45
- "mime": "^4.0.4",
46
- "mysql2": "^3.11.0",
42
+ "ioredis": "^5.4.2",
43
+ "js-tiktoken": "^1.0.16",
44
+ "jsdom": "^25.0.1",
45
+ "mime": "^4.0.6",
46
+ "mysql2": "^3.12.0",
47
47
  "office-text-extractor": "^3.0.3",
48
- "ollama": "^0.5.8",
49
- "openai": "^4.56.0",
50
- "pg": "^8.12.0",
48
+ "ollama": "^0.5.11",
49
+ "openai": "^4.77.0",
50
+ "pg": "^8.13.1",
51
51
  "pgvector": "^0.2.0",
52
52
  "telegraf": "^4.16.3",
53
- "tesseract.js": "^5.1.0",
54
- "utilitas": "^1997.1.7",
53
+ "tesseract.js": "^5.1.1",
54
+ "utilitas": "^1997.1.32",
55
55
  "youtube-transcript": "^1.2.1"
56
56
  }
57
57
  }
@@ -48,7 +48,7 @@ export const { name, run, priority, func, help, args } = {
48
48
  },
49
49
  ai: {
50
50
  type: 'string', short: 'a', default: '',
51
- desc: "`(ChatGPT, Gemini, Mistral, @)` Select AI engine.",
51
+ desc: "`(ChatGPT, Gemini, Claude, Mistral, @)` Select AI engine.",
52
52
  validate: validateAi,
53
53
  },
54
54
  render: {
@@ -22,6 +22,13 @@ const action = async (ctx, next) => {
22
22
  ctx.selectedAi = ['Gemini'];
23
23
  ctx.hello(ctx.cmd.args);
24
24
  break;
25
+ case 'claude':
26
+ if (!utilitas.insensitiveHas(allAi, 'claude')) {
27
+ return await ctx.er('Claude is not available.');
28
+ }
29
+ ctx.selectedAi = ['Claude'];
30
+ ctx.hello(ctx.cmd.args);
31
+ break;
25
32
  case 'mistral':
26
33
  if (!utilitas.insensitiveHas(allAi, 'mistral')) {
27
34
  return await ctx.er('Mistral is not available.');
@@ -45,6 +52,7 @@ export const { name, run, priority, func, help, cmds } = {
45
52
  all: 'Use all AI engines simultaneously: /all Say hello to all AIs!',
46
53
  chatgpt: 'Use ⚛️ ChatGPT temporary: /chatgpt Say hello to ChatGPT!',
47
54
  gemini: 'Use ♊️ Gemini temporary: /gemini Say hello to Gemini!',
55
+ claude: 'Use ✴️ Claude temporary: /claude Say hello to Claude!',
48
56
  mistral: 'Use Ⓜ️ Mistral temporary: /mistral Say hello to Mistral!',
49
57
  },
50
58
  };
@@ -2,15 +2,17 @@ import { alan, bot, utilitas } from 'utilitas';
2
2
 
3
3
  const onProgress = { onProgress: true };
4
4
  const [joinL1, joinL2] = [a => a.join(LN2), a => a.join(LN2)];
5
- const enrich = name => name === 'VERTEX' ? 'Gemini' : name;
5
+ const enrich = name => name; // Human readable name, eg: 'VERTEX' => 'Gemini'
6
6
  const log = content => utilitas.log(content, import.meta.url);
7
- const [BOT, BOTS, LN2]
8
- = [`${bot.EMOJI_BOT} `, { ChatGPT: '⚛️', Gemini: '♊️', Mistral: 'Ⓜ️' }, '\n\n'];
7
+ const [BOT, BOTS, LN2] = [`${bot.EMOJI_BOT} `, {
8
+ ChatGPT: '⚛️', Gemini: '♊️', Claude: '✴️', Mistral: 'Ⓜ️',
9
+ }, '\n\n'];
9
10
 
10
11
  const action = async (ctx, next) => {
11
12
  if (!ctx.prompt) { return await next(); }
12
- const [YOU, msgs, tts, pms, extra] = [`${ctx.avatar} You:`, {}, {}, [], {}];
13
- let [lastMsg, lastSent] = [null, 0];
13
+ const [YOU, msgs, tts, pms, extra]
14
+ = [`${ctx.avatar} You:`, {}, {}, [], { buttons: [] }];
15
+ let [lastMsg, lastSent, references] = [null, 0, null];
14
16
  const packMsg = options => {
15
17
  const said = !options?.tts && ctx.result ? ctx.result : '';
16
18
  const packed = [
@@ -35,9 +37,14 @@ const action = async (ctx, next) => {
35
37
  )) { return; }
36
38
  [lastSent, lastMsg] = [curTime, curMsg];
37
39
  const cmd = ctx.session.context?.cmd;
38
- options?.final && cmd && (extra.buttons = [{
39
- label: `❎ End context: \`${cmd}\``, text: '/clear',
40
- }]);
40
+ if (options?.final) {
41
+ (references?.links || []).map((x, i) => extra.buttons.push({
42
+ label: `${i + 1}. ${x.title}`, url: x.uri,
43
+ }));
44
+ cmd && (extra.buttons.push({
45
+ label: `❎ End context: \`${cmd}\``, text: '/clear',
46
+ }));
47
+ }
41
48
  return await ctx.ok(curMsg, {
42
49
  ...ctx.carry.keyboards ? { keyboards: ctx.carry.keyboards } : {},
43
50
  md: true, ...extra, ...options || {},
@@ -54,8 +61,9 @@ const action = async (ctx, next) => {
54
61
  ctx.carry.threadInfo.length || await ok(onProgress);
55
62
  },
56
63
  });
57
- msgs[n] = ctx.session.config?.render === true
58
- ? resp.rendered : resp.text;
64
+ references = resp.references;
65
+ msgs[n] = ctx.session.config?.render === false
66
+ ? resp.text : resp.rendered;
59
67
  tts[n] = ctx.selectedAi.length === 1
60
68
  && !msgs[n].split('\n').some(x => /^\s*```/.test(x))
61
69
  ? resp.spoken : '';