halbot 1990.1.118 → 1990.1.120

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -67,26 +67,41 @@ All supported configuration fields:
67
67
  // REQUIRED, string.
68
68
  "telegramToken": "[[Telegram Bot API Token]]",
69
69
 
70
- // OPTIONAL, string.
71
70
  // Set some of these fields if you need ChatGPT features.
71
+ // OPTIONAL, string.
72
72
  "openaiApiKey": "[[OpenAI API Key]]",
73
+ // OPTIONAL, string.
73
74
  "openaiEndpoint": "[[Custom OpenAI API endpoint]]",
75
+ // OPTIONAL, string, default: "gpt-3.5-turbo".
74
76
  "chatGptModel": "[[Custom ChatGPT Model ID]]",
77
+ // OPTIONAL, integer, default: 0.
78
+ "chatGptPriority": "[[Custom ChatGPT Priority]]",
75
79
 
76
- // OPTIONAL, string.
77
80
  // Set some of these fields if you need to use custom ChatGPT API.
81
+ // OPTIONAL, string.
78
82
  "chatGptApiKey": "[[Custom ChatGPT API Key]]",
83
+ // OPTIONAL, string.
79
84
  "chatGptEndpoint": "[[Custom ChatGPT API endpoint]]",
80
85
 
81
- // OPTIONAL, string.
82
86
  // Set this field if you need Gemini features.
87
+ // OPTIONAL, string.
83
88
  "googleCredentials": "[[Google Cloud Credentials]]",
89
+ // OPTIONAL, string.
84
90
  "googleProject": "[[Google Cloud Project ID]]",
91
+ // OPTIONAL, string, default: "gemini-pro-vision".
92
+ "geminiModel": "[[Custom Gemini Model ID]]",
93
+ // OPTIONAL, integer, default: 1.
94
+ "geminiPriority": "[[Custom Gemini Priority]]",
85
95
 
86
- // OPTIONAL, boolean.
87
96
  // Set this field if you need Mistral features.
97
+ // OPTIONAL, boolean.
88
98
  "mistralEnabled": "[[Enable Mistral hosted by Ollama]]",
99
+ // OPTIONAL, string.
89
100
  "mistralEndpoint": "[[Custom Mistral API endpoint]]",
101
+ // OPTIONAL, string, default: "Mistral" (Mistral 7B).
102
+ "mistralModel": "[[Custom Mistral Model ID]]",
103
+ // OPTIONAL, integer, default: 2.
104
+ "mistralPriority": "[[Custom Mistral Priority]]",
90
105
 
91
106
  // OPTIONAL, string.
92
107
  // Set this field if you need TTS/STT/OCR/OBJECT_DETECT features.
package/index.mjs CHANGED
@@ -1,6 +1,5 @@
1
1
  import { alan, bot, image, shot, speech, utilitas, vision } from 'utilitas';
2
2
  import { parse } from 'csv-parse/sync';
3
- import { end } from 'utilitas/lib/event.mjs';
4
3
 
5
4
  await utilitas.locate(utilitas.__(import.meta.url, 'package.json'));
6
5
  const log = content => utilitas.log(content, 'halbot');
@@ -35,47 +34,69 @@ const init = async (options) => {
35
34
  const info = bot.lines([
36
35
  `[${bot.EMOJI_BOT} ${pkg.title}](${pkg.homepage})`, pkg.description
37
36
  ]);
37
+ // init ai engines
38
38
  if (options?.openaiApiKey || options?.chatGptApiKey) {
39
39
  const apiKey = { apiKey: options?.chatGptApiKey || options?.openaiApiKey };
40
40
  await alan.init({
41
41
  provider: 'openai', ...apiKey,
42
42
  baseURL: options?.chatGptEndpoint || options?.openaiEndpoint,
43
43
  });
44
- ai['ChatGPT'] = { engine: 'CHATGPT' };
45
- engines['CHATGPT'] = { model: options?.chatGptModel };
46
- }
47
- if (options?.openaiApiKey) {
48
- const apiKey = { apiKey: options.openaiApiKey };
49
- await speech.init({ ...apiKey, provider: 'OPENAI', ...speechOptions });
50
- await image.init(apiKey);
51
- }
52
- if (options?.googleApiKey) {
53
- const apiKey = { apiKey: options.googleApiKey };
54
- await vision.init(apiKey);
55
- options?.openaiApiKey || await speech.init({
56
- ...apiKey, provider: 'GOOGLE', ...speechOptions,
57
- });
44
+ ai['ChatGPT'] = {
45
+ engine: 'CHATGPT', priority: options?.chatGptPriority || 0,
46
+ };
47
+ engines['CHATGPT'] = {
48
+ // only support custom model while prompting
49
+ model: options?.chatGptModel,
50
+ };
58
51
  }
59
52
  if (options?.googleCredentials && options?.googleProject) {
60
53
  await alan.init({
61
54
  provider: 'VERTEX',
62
55
  credentials: options.googleCredentials,
63
56
  project: options.googleProject,
57
+ // only support custom model while initiating
58
+ model: options?.geminiModel,
64
59
  });
65
- ai['Gemini'] = { engine: 'VERTEX' };
66
- engines['VERTEX'] = {};
60
+ ai['Gemini'] = {
61
+ engine: 'VERTEX', priority: options?.geminiPriority || 1,
62
+ };
63
+ engines['VERTEX'] = {
64
+ // save for reference not for prompting
65
+ model: options?.geminiModel,
66
+ };
67
67
  }
68
68
  if (options?.mistralEnabled || options?.mistralEndpoint) {
69
69
  await alan.init({
70
- provider: 'OLLAMA',
71
- endpoint: options?.mistralEndpoint,
72
- model: options?.mistralModel,
70
+ provider: 'OLLAMA', endpoint: options?.mistralEndpoint,
73
71
  });
74
- ai['Mistral'] = { engine: 'OLLAMA' };
75
- engines['OLLAMA'] = {};
72
+ ai['Mistral'] = {
73
+ engine: 'OLLAMA', priority: options?.mistralPriority || 2,
74
+ };
75
+ engines['OLLAMA'] = {
76
+ // only support custom model while prompting
77
+ model: options?.mistralModel,
78
+ };
76
79
  }
77
- await alan.initChat({ engines, sessions: options?.storage });
78
80
  assert(utilitas.countKeys(ai), 'No AI provider is configured.');
81
+ await alan.initChat({ engines, sessions: options?.storage });
82
+ // init image, speech engines
83
+ if (options?.openaiApiKey) {
84
+ const apiKey = { apiKey: options.openaiApiKey };
85
+ await image.init(apiKey);
86
+ await speech.init({ ...apiKey, provider: 'OPENAI', ...speechOptions });
87
+ } else if (options?.googleApiKey) {
88
+ const apiKey = { apiKey: options.googleApiKey };
89
+ await speech.init({ ...apiKey, provider: 'GOOGLE', ...speechOptions });
90
+ }
91
+ // init vision engine
92
+ const supportedMimeTypes = new Set(Object.values(engines).map(
93
+ x => alan.MODELS[x.model]
94
+ ).map(x => x.supportedMimeTypes || []).flat().map(x => x.toLowerCase()));
95
+ if (options?.googleApiKey) {
96
+ const apiKey = { apiKey: options.googleApiKey };
97
+ await vision.init(apiKey);
98
+ }
99
+ // init bot
79
100
  const _bot = await bot.init({
80
101
  args: options?.args,
81
102
  auth: options?.auth,
@@ -93,6 +114,7 @@ const init = async (options) => {
93
114
  skillPath: options?.skillPath || skillPath,
94
115
  speech: (options?.openaiApiKey || options?.googleApiKey) && speech,
95
116
  vision: options?.googleApiKey && vision,
117
+ supportedMimeTypes,
96
118
  });
97
119
  _bot._.ai = ai; // Should be an array of a map of AIs.
98
120
  _bot._.lang = options?.lang || 'English';
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "halbot",
3
3
  "description": "Just another ChatGPT/Bing Chat Telegram bob, which is simple design, easy to use, extendable and fun.",
4
- "version": "1990.1.118",
4
+ "version": "1990.1.120",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/halbot",
7
7
  "type": "module",
@@ -4,7 +4,10 @@ let configuredAi;
4
4
 
5
5
  const action = async (ctx, next) => {
6
6
  ctx.isDefaultAi = name => name === ctx.firstAi;
7
- ctx.firstAi = (configuredAi = Object.keys(ctx._.ai))[0];
7
+ const arrSort = (configuredAi = Object.keys(ctx._.ai)).map(
8
+ k => [k, ctx._.ai[k].priority]
9
+ ).sort((x, y) => x[1] - y[1]);
10
+ ctx.firstAi = arrSort[0][0];
8
11
  switch (ctx.session.config?.ai) {
9
12
  case '': ctx.selectedAi = [ctx.firstAi]; break;
10
13
  case '@': ctx.selectedAi = configuredAi; break;
@@ -4,7 +4,7 @@ const action = async (ctx, next) => {
4
4
  if (!ctx.cmd.args) {
5
5
  return await ctx.ok('Please input your prompt.');
6
6
  }
7
- const objMsg = (await ctx.ok('✍️'))[0];
7
+ const objMsg = (await ctx.ok('💭'))[0];
8
8
  const images = await ctx._.image.generate(ctx.cmd.args, { expected: 'URL' });
9
9
  await ctx.deleteMessage(objMsg.message_id);
10
10
  for (let image of images) {
@@ -14,15 +14,15 @@ const action = async (ctx, next) => {
14
14
  };
15
15
 
16
16
  export const { name, run, priority, func, cmds, help } = {
17
- name: 'Image',
17
+ name: 'Dream',
18
18
  run: true,
19
19
  priority: 40,
20
20
  func: action,
21
21
  help: bot.lines([
22
22
  'Use DALL-E to generate images.',
23
- 'Example: /image a cat',
23
+ 'Example: /dream a cat',
24
24
  ]),
25
25
  cmds: {
26
- image: 'Use DALL-E to generate images: /image `PROMPT`',
26
+ image: 'Use DALL-E to generate images: /dream `PROMPT`',
27
27
  },
28
28
  };
@@ -17,15 +17,16 @@ const action = async (ctx, next) => {
17
17
  }
18
18
  // prompt
19
19
  const maxInputTokens = alan.getMaxChatPromptLimit();
20
- const additionInfo = ctx.collected.length ? ctx.collected.map(
21
- x => x.content
22
- ).join('\n').split(' ') : [];
20
+ const additionInfo = ctx.collected.filter(
21
+ x => String.isString(x.content)
22
+ ).map(x => x.content).join('\n').split(' ').filter(x => x);
23
23
  ctx.prompt = (ctx.text || '') + '\n\n';
24
24
  while (alan.countTokens(ctx.prompt) < maxInputTokens
25
25
  && additionInfo.length) {
26
26
  ctx.prompt += ` ${additionInfo.shift()}`;
27
27
  }
28
28
  ctx.prompt = utilitas.trim(ctx.prompt);
29
+ ctx.carry.attachments = ctx.collected.filter(x => x.type === 'PROMPT').map(x => x.content);
29
30
  additionInfo.filter(x => x).length && (ctx.prompt += '...');
30
31
  // next
31
32
  await next();