halbot 1994.1.7 → 1995.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -133,10 +133,6 @@ All supported configuration fields:
133
133
  // To restrict the bot to PRIVATE, set chat/group/channel ids in this array.
134
134
  "private": ["[[CHAT_ID]]", "[[GROUP_ID]]", "[[CHANNEL_ID]]", ...],
135
135
 
136
- // OPTIONAL, string.
137
- // Set some of these fields if you want to use a `magic word` to authenticate the bot.
138
- "magicWord": "[[Your Magic Word here]]",
139
-
140
136
  // OPTIONAL, string.
141
137
  // Use a HOME GROUP to authentication users.
142
138
  // Anyone in this group can access the bot.
package/bin/halbot.mjs CHANGED
@@ -10,7 +10,7 @@ const _getConfig = async () => await _storage.getConfig();
10
10
  const getConfig = async key => (await _getConfig())?.config?.[key];
11
11
 
12
12
  let storage = {
13
- provider: 'FILE',
13
+ provider: _storage.FILE,
14
14
  get: async key => (await getConfig(MEMORY))?.[key],
15
15
  set: async (k, v) => await _storage.setConfig({ [MEMORY]: { [k]: v } }),
16
16
  };
package/index.mjs CHANGED
@@ -1,63 +1,65 @@
1
- import { alan, bot, embedding, gen, web, speech, utilitas } from 'utilitas';
1
+ import { alan, bot, rag, web, utilitas } from 'utilitas';
2
2
  import * as hal from './lib/hal.mjs';
3
3
 
4
4
  await utilitas.locate(utilitas.__(import.meta.url, 'package.json'));
5
- const skillPath = utilitas.__(import.meta.url, 'skills');
5
+ const pipelinePath = utilitas.__(import.meta.url, 'pipeline');
6
6
 
7
7
  const init = async (options = {}) => {
8
8
  assert(options.telegramToken, 'Telegram Bot API Token is required.');
9
- let [pkg, _speech, _embedding, speechOptions, vision, opts] = [
10
- await utilitas.which(), options?.speech || {}, options?.embedding,
11
- { tts: true, stt: true }, {}, null,
12
- ];
9
+ let [pkg, _embed, _rerank, opts] =
10
+ [await utilitas.which(), options?.embed, options?.rerank, null];
13
11
  const info = bot.lines([
14
- `[${hal.EMOJI_BOT} ${pkg.title}](${pkg.homepage})`, pkg.description
12
+ `[${bot.BOT} ${pkg.title}](${pkg.homepage})`, pkg.description
15
13
  ]);
16
- // use AI vision, AI stt if OpenRouter, Gemini or OpenAI is enabled
17
- if (options.openrouterApiKey
18
- || options.openaiApiKey || options.googleApiKey) {
19
- vision.read = alan.distillFile;
20
- vision.see = alan.distillFile;
21
- _speech?.stt || (_speech.stt = alan.distillFile);
22
- }
23
- // use embedding if OpenRouter is enabled
24
- if (options.openrouterApiKey && !_embedding) {
25
- await embedding.init({
26
- provider: 'OPENROUTER', apiKey: options.openrouterApiKey,
27
- });
28
- _embedding = embedding.embed;
14
+ // use google's search if google is enabled
15
+ options.googleApiKey && options.googleCx && await web.initSearch({
16
+ provider: 'GOOGLE', apiKey: options.googleApiKey, cx: options.googleCx,
17
+ });
18
+ // use openrouter's AI models, embedding if OpenRouter is enabled
19
+ if (options.openrouterApiKey) {
20
+ opts = { provider: 'OPENROUTER', apiKey: options.openrouterApiKey };
21
+ await alan.init({
22
+ ...opts,
23
+ model: options.openrouterModel || '*',
24
+ priority: options.openrouterPriority, ...options,
25
+ })
26
+ if (!_embed) {
27
+ await rag.initEmbedding(opts);
28
+ _embed = rag.embed;
29
+ }
29
30
  }
30
- // use google's imagen, veo, search, tts if google is enabled
31
+ // use google's imagen, veo, tts if google is enabled
31
32
  if (options.googleApiKey) {
32
33
  opts = { provider: 'GOOGLE', apiKey: options.googleApiKey };
33
- await gen.init(opts);
34
- options.googleCx && await web.initSearch({
35
- ...opts, cx: options.googleCx,
34
+ await alan.init({
35
+ ...opts, model: options.googleModel || '*',
36
+ priority: options.googlePriority, ...options,
36
37
  });
37
- if (!_speech.tts) {
38
- await speech.init({ ...opts, ...speechOptions });
39
- _speech.tts = speech.tts;
40
- }
41
38
  }
42
- // use openai's dall-e, embedding, tts if openai is enabled, and google is not
39
+ // use openai's embedding, tts if openai is enabled, and google is not
43
40
  if (options.openaiApiKey) {
44
41
  opts = { provider: 'OPENAI', apiKey: options.openaiApiKey };
45
- await gen.init(opts);
46
- if (!_embedding) {
47
- await embedding.init(opts);
48
- _embedding = embedding.embed;
42
+ await alan.init({
43
+ ...opts, model: options.openaiModel || '*',
44
+ priority: options.openaiPriority, ...options,
45
+ });
46
+ if (!_embed) {
47
+ await rag.initEmbedding(opts);
48
+ _embed = rag.embed;
49
49
  }
50
- if (!_speech.tts) {
51
- await speech.init({ ...opts, ...speechOptions });
52
- _speech.tts = speech.tts;
50
+ }
51
+ // use google rerank if google is enabled
52
+ if (options?.googleCredentials && options.googleProjectId) {
53
+ opts = {
54
+ provider: 'GOOGLE', credentials: options.googleCredentials,
55
+ projectId: options.googleProjectId,
56
+ };
57
+ if (!_rerank) {
58
+ await rag.initReranker(opts);
59
+ _rerank = rag.rerank;
53
60
  }
54
61
  }
55
- // init ai providers
56
- options.openrouterApiKey && await alan.init({
57
- provider: 'OPENROUTER', apiKey: options.openrouterApiKey,
58
- model: options.openrouterModel || '*',
59
- priority: options.openrouterPriority, ...options,
60
- });
62
+ // init other ai providers
61
63
  options.siliconflowApiKey && await alan.init({
62
64
  provider: 'SILICONFLOW', apiKey: options.siliconflowApiKey,
63
65
  model: options.siliconflowModel || '*',
@@ -70,6 +72,10 @@ const init = async (options = {}) => {
70
72
  priority: options.jinaPriority, ...options
71
73
  });
72
74
  await web.initSearch(opts);
75
+ if (!_rerank) {
76
+ await rag.initReranker(opts);
77
+ _rerank = rag.rerank;
78
+ }
73
79
  }
74
80
  if (options?.ollamaEnabled || options?.ollamaEndpoint) {
75
81
  await alan.init({
@@ -78,41 +84,26 @@ const init = async (options = {}) => {
78
84
  host: options?.ollamaEndpoint, ...options
79
85
  });
80
86
  }
81
- const { ais } = await alan.initChat({ sessions: options?.storage });
82
- const cmds = options?.cmds || [];
83
- // config multimodal engines
84
- const supportedMimeTypes = new Set(Object.values(ais).map(x => {
85
- // init instant ai selection
86
- cmds.push(hal.newCommand(`ai_${x.id}`, `${x.name}: ${x.features}`));
87
- return x.model;
88
- }).map(x => [
89
- ...x.supportedMimeTypes,
90
- ...x.supportedDocTypes,
91
- ...x.supportedAudioTypes,
92
- ]).flat().map(x => x.toLowerCase()));
93
87
  // init hal
94
88
  const _hal = await hal.init({
95
89
  args: options?.args,
96
90
  auth: options?.auth,
97
91
  botToken: options?.telegramToken,
98
92
  chatType: options?.chatType,
99
- cmds,
100
- database: options?.storage?.client && options?.storage,
101
- supportedMimeTypes,
93
+ cmds: options?.cmds,
94
+ embed: _embed,
102
95
  hello: options?.hello,
103
96
  help: options?.help,
104
97
  homeGroup: options?.homeGroup,
105
98
  info: options?.info || info,
106
- magicWord: options?.magicWord,
99
+ lang: options?.lang || 'English',
100
+ pipeline: options?.pipeline,
101
+ pipelinePath: options?.pipelinePath || pipelinePath,
107
102
  private: options?.private,
108
103
  provider: 'telegram',
109
- session: options?.storage,
110
- skillPath: options?.skillPath || skillPath,
111
- embedding: _embedding, speech: _speech, vision,
104
+ rerank: _rerank,
105
+ storage: options?.storage,
112
106
  });
113
- _hal._.lang = options?.lang || 'English';
114
- _hal._.gen = options?.gen
115
- || (options?.openaiApiKey || geminiGenReady ? gen : null);
116
107
  return _hal;
117
108
  };
118
109