halbot 1993.2.53 → 1993.2.55
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/index.mjs +31 -73
- package/package.json +2 -4
- package/skills/-8845_thread.mjs +1 -1
- package/skills/10_engine.mjs +22 -29
- package/skills/20_instant.mjs +3 -2
- package/skills/50_prompt.mjs +0 -18
- package/skills/60_prepare.mjs +5 -3
- package/skills/70_chat.mjs +6 -6
package/README.md
CHANGED
|
@@ -35,7 +35,7 @@ alt="Halbot live demo" width="240" height="180" border="10" /></a>
|
|
|
35
35
|
- Text-to-Image by DALL·E (`OpenAI` API key required, or your own engine)
|
|
36
36
|
- OCR/OBJECT_DETECT (`OpenAI` or `Google Cloud` API key required, or your own engine)
|
|
37
37
|
- Feeding webpage and [YouTube](https://www.youtube.com/) to enhance your prompt
|
|
38
|
-
- Custom prompt
|
|
38
|
+
- Custom prompt at your fingertips
|
|
39
39
|
- Support `private` and `public` mode, with multiple authenticate methods.
|
|
40
40
|
- `Middleware` style workflow, easy to extend.
|
|
41
41
|
- Built-in support parsing webpages, `YouTube` videos, PDFs, images, Office documents, code files, text files...
|
package/index.mjs
CHANGED
|
@@ -1,56 +1,31 @@
|
|
|
1
|
-
import { parse } from 'csv-parse/sync';
|
|
2
1
|
import { alan, bot, image, shot, speech, utilitas } from 'utilitas';
|
|
3
2
|
|
|
4
3
|
await utilitas.locate(utilitas.__(import.meta.url, 'package.json'));
|
|
5
|
-
const log = content => utilitas.log(content, 'halbot');
|
|
6
4
|
const skillPath = utilitas.__(import.meta.url, 'skills');
|
|
7
5
|
|
|
8
|
-
const
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
]
|
|
12
|
-
|
|
13
|
-
const fetchPrompts = async () => {
|
|
14
|
-
const prompts = {};
|
|
15
|
-
for (let source of promptSource) {
|
|
16
|
-
try {
|
|
17
|
-
const resp = (await shot.get(source)).content;
|
|
18
|
-
const pmts = parse(resp, { columns: true, skip_empty_lines: true });
|
|
19
|
-
assert(pmts?.length, `Failed to load external prompts: ${source}.`);
|
|
20
|
-
pmts.filter(x => x.act && x.prompt).map(x => {
|
|
21
|
-
const { command, description } = bot.newCommand(x.act, x.act);
|
|
22
|
-
prompts[command] = { ...x, command, act: description };
|
|
23
|
-
});
|
|
24
|
-
} catch (err) { log(err?.message || err); }
|
|
25
|
-
}
|
|
26
|
-
log(`Awesome ChatGPT Prompts: fetch ${utilitas.countKeys(prompts)} items.`);
|
|
27
|
-
return prompts;
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
const init = async (options) => {
|
|
31
|
-
assert(options?.telegramToken, 'Telegram Bot API Token is required.');
|
|
32
|
-
const [pkg, ai, _speech, speechOptions, engines, vision]
|
|
33
|
-
= [await utilitas.which(), {}, {}, { tts: true, stt: true }, {}, {}];
|
|
6
|
+
const init = async (options = {}) => {
|
|
7
|
+
assert(options.telegramToken, 'Telegram Bot API Token is required.');
|
|
8
|
+
const [pkg, _speech, speechOptions, vision]
|
|
9
|
+
= [await utilitas.which(), {}, { tts: true, stt: true }, {}];
|
|
34
10
|
const info = bot.lines([
|
|
35
11
|
`[${bot.EMOJI_BOT} ${pkg.title}](${pkg.homepage})`, pkg.description
|
|
36
12
|
]);
|
|
37
13
|
let embedding;
|
|
38
14
|
// init ai engines
|
|
39
15
|
// use AI vision, AI stt if ChatGPT or Gemini is enabled
|
|
40
|
-
if (options
|
|
16
|
+
if (options.openaiApiKey || options.googleApiKey) {
|
|
41
17
|
vision.read = alan.distillFile;
|
|
42
18
|
vision.see = alan.distillFile;
|
|
43
19
|
_speech.stt = alan.distillFile;
|
|
44
20
|
}
|
|
45
21
|
// use openai embedding, dall-e, tts if openai is enabled
|
|
46
|
-
if (options
|
|
22
|
+
if (options.openaiApiKey) {
|
|
47
23
|
const apiKey = { apiKey: options.openaiApiKey };
|
|
48
|
-
await alan.init({
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
};
|
|
52
|
-
|
|
53
|
-
embedding = alan.createOpenAIEmbedding;
|
|
24
|
+
const ai = await alan.init({
|
|
25
|
+
id: 'ChatGPT', provider: 'OPENAI', model: options?.chatGptModel,
|
|
26
|
+
...apiKey, priority: options?.chatGptPriority || 0, ...options
|
|
27
|
+
});
|
|
28
|
+
embedding = ai.embedding;
|
|
54
29
|
await image.init(apiKey);
|
|
55
30
|
await speech.init({ ...apiKey, provider: 'OPENAI', ...speechOptions });
|
|
56
31
|
_speech.tts = speech.tts;
|
|
@@ -59,15 +34,11 @@ const init = async (options) => {
|
|
|
59
34
|
// use google tts if google api key is ready
|
|
60
35
|
if (options?.googleApiKey) {
|
|
61
36
|
const apiKey = { apiKey: options.googleApiKey };
|
|
62
|
-
await alan.init({
|
|
63
|
-
provider: 'GEMINI',
|
|
64
|
-
|
|
37
|
+
const ai = await alan.init({
|
|
38
|
+
id: 'Gemini', provider: 'GEMINI', model: options?.geminiModel,
|
|
39
|
+
...apiKey, priority: options?.geminiPriority || 1, ...options
|
|
65
40
|
});
|
|
66
|
-
|
|
67
|
-
engine: 'GEMINI', priority: options?.geminiPriority || 1,
|
|
68
|
-
}; // save for reference not for prompting:
|
|
69
|
-
engines['GEMINI'] = { model: options?.geminiModel };
|
|
70
|
-
embedding || (embedding = alan.createGeminiEmbedding);
|
|
41
|
+
embedding || (embedding = ai.embedding);
|
|
71
42
|
if (!_speech.tts) {
|
|
72
43
|
await speech.init({
|
|
73
44
|
...apiKey, provider: 'GOOGLE', ...speechOptions,
|
|
@@ -78,45 +49,34 @@ const init = async (options) => {
|
|
|
78
49
|
apiKey: options.googleApiKey, cx: options.googleCx
|
|
79
50
|
});
|
|
80
51
|
}
|
|
81
|
-
if (options?.
|
|
52
|
+
if (options?.anthropicApiKey
|
|
53
|
+
|| (options?.anthropicCredentials && options?.anthropicProjectId)) {
|
|
82
54
|
await alan.init({
|
|
83
|
-
provider: '
|
|
84
|
-
|
|
85
|
-
|
|
55
|
+
id: 'Claude', provider: 'VERTEX ANTHROPIC', model: options?.anthropicModel,
|
|
56
|
+
apiKey: options?.anthropicApiKey,
|
|
57
|
+
credentials: options?.anthropicCredentials,
|
|
58
|
+
projectId: options?.anthropicProjectId,
|
|
59
|
+
priority: options?.anthropicPriority || 2, ...options
|
|
86
60
|
});
|
|
87
|
-
ai['Claude'] = {
|
|
88
|
-
engine: 'CLAUDE', priority: options?.claudePriority || 2,
|
|
89
|
-
}; // only support custom model while prompting:
|
|
90
|
-
engines['CLAUDE'] = { model: options?.claudeModel };
|
|
91
61
|
}
|
|
92
62
|
if (options?.azureApiKey && options?.azureEndpoint) {
|
|
93
63
|
await alan.init({
|
|
94
|
-
|
|
95
|
-
|
|
64
|
+
id: 'Azure', provider: 'AZURE', model: options?.azureModel,
|
|
65
|
+
apiKey: options?.azureApiKey, priority: options?.azurePriority || 3,
|
|
66
|
+
baseURL: options?.azureEndpoint, ...options
|
|
96
67
|
});
|
|
97
|
-
ai['Azure'] = {
|
|
98
|
-
engine: 'AZURE', priority: options?.azurePriority || 3,
|
|
99
|
-
}; // only support custom model while prompting:
|
|
100
|
-
engines['AZURE'] = { model: options?.azureModel };
|
|
101
68
|
}
|
|
102
69
|
if (options?.ollamaEnabled || options?.ollamaEndpoint) {
|
|
103
70
|
await alan.init({
|
|
104
|
-
provider: 'OLLAMA',
|
|
71
|
+
id: 'Ollama', provider: 'OLLAMA', model: options?.ollamaModel,
|
|
72
|
+
priority: options?.ollamaPriority || 99,
|
|
73
|
+
host: options?.ollamaEndpoint, ...options
|
|
105
74
|
});
|
|
106
|
-
ai['Ollama'] = {
|
|
107
|
-
engine: 'OLLAMA', priority: options?.ollamaPriority || 3,
|
|
108
|
-
};
|
|
109
|
-
engines['OLLAMA'] = {
|
|
110
|
-
// only support custom model while prompting
|
|
111
|
-
model: options?.ollamaModel || alan.DEFAULT_MODELS['OLLAMA'],
|
|
112
|
-
};
|
|
113
75
|
}
|
|
114
|
-
|
|
115
|
-
await alan.initChat({ engines, sessions: options?.storage });
|
|
116
|
-
for (const i in ai) { ai[i].model = engines[ai[i].engine].model; }
|
|
76
|
+
const { ais } = await alan.initChat({ sessions: options?.storage });
|
|
117
77
|
// config multimodal engines
|
|
118
|
-
const supportedMimeTypes = new Set(Object.values(
|
|
119
|
-
x =>
|
|
78
|
+
const supportedMimeTypes = new Set(Object.values(ais).map(
|
|
79
|
+
x => x.model
|
|
120
80
|
).map(x => [
|
|
121
81
|
...x.supportedMimeTypes || [], ...x.supportedAudioTypes || [],
|
|
122
82
|
]).flat().map(x => x.toLowerCase()));
|
|
@@ -140,10 +100,8 @@ const init = async (options) => {
|
|
|
140
100
|
skillPath: options?.skillPath || skillPath,
|
|
141
101
|
speech: _speech, vision,
|
|
142
102
|
});
|
|
143
|
-
_bot._.ai = ai; // Should be an array of a map of AIs.
|
|
144
103
|
_bot._.lang = options?.lang || 'English';
|
|
145
104
|
_bot._.image = options?.openaiApiKey && image;
|
|
146
|
-
_bot._.prompts = await fetchPrompts();
|
|
147
105
|
return _bot;
|
|
148
106
|
};
|
|
149
107
|
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "halbot",
|
|
3
3
|
"description": "Just another `ChatGPT` / `Gemini` / `Ollama` Telegram bob, which is simple design, easy to use, extendable and fun.",
|
|
4
|
-
"version": "1993.2.
|
|
4
|
+
"version": "1993.2.55",
|
|
5
5
|
"private": false,
|
|
6
6
|
"homepage": "https://github.com/Leask/halbot",
|
|
7
7
|
"type": "module",
|
|
@@ -38,7 +38,6 @@
|
|
|
38
38
|
"@google-cloud/vision": "^4.3.3",
|
|
39
39
|
"@google/generative-ai": "^0.24.0",
|
|
40
40
|
"@mozilla/readability": "^0.6.0",
|
|
41
|
-
"csv-parse": "^5.6.0",
|
|
42
41
|
"fluent-ffmpeg": "^2.1.3",
|
|
43
42
|
"ioredis": "^5.6.0",
|
|
44
43
|
"js-tiktoken": "^1.0.19",
|
|
@@ -47,13 +46,12 @@
|
|
|
47
46
|
"mime": "^4.0.6",
|
|
48
47
|
"mysql2": "^3.13.0",
|
|
49
48
|
"office-text-extractor": "^3.0.3",
|
|
50
|
-
"ollama": "^0.5.14",
|
|
51
49
|
"openai": "^4.87.3",
|
|
52
50
|
"pg": "^8.14.0",
|
|
53
51
|
"pgvector": "^0.2.0",
|
|
54
52
|
"telegraf": "^4.16.3",
|
|
55
53
|
"tesseract.js": "^6.0.0",
|
|
56
|
-
"utilitas": "^
|
|
54
|
+
"utilitas": "^1999.1.6",
|
|
57
55
|
"youtube-transcript": "^1.2.1"
|
|
58
56
|
}
|
|
59
57
|
}
|
package/skills/-8845_thread.mjs
CHANGED
package/skills/10_engine.mjs
CHANGED
|
@@ -7,63 +7,56 @@ const NAME_HACK = {
|
|
|
7
7
|
const NAME_HACK_REVERSE = utilitas.reverseKeyValues(NAME_HACK);
|
|
8
8
|
const AI_CMD = '/set --ai=';
|
|
9
9
|
|
|
10
|
-
let configuredAi;
|
|
11
|
-
|
|
12
10
|
const action = async (ctx, next) => {
|
|
13
|
-
|
|
14
|
-
const arrSort = (configuredAi = Object.keys(ctx._.ai)).map(
|
|
15
|
-
k => [k, ctx._.ai[k].priority]
|
|
16
|
-
).sort((x, y) => x[1] - y[1]);
|
|
17
|
-
ctx.firstAi = arrSort[0][0];
|
|
11
|
+
const ais = await alan.getAi(null, { all: true });
|
|
18
12
|
if (ctx.carry?.keyboards?.length && !ctx.carry.keyboards.find(
|
|
19
13
|
x => x.find(y => y.text.includes(AI_CMD))
|
|
20
14
|
)) {
|
|
21
|
-
ctx.carry.keyboards.unshift(
|
|
22
|
-
x => ({ text: `${AI_CMD}${NAME_HACK[x] || x}` })
|
|
15
|
+
ctx.carry.keyboards.unshift(ais.slice(0, 3).map(
|
|
16
|
+
x => ({ text: `${AI_CMD}${NAME_HACK[x.id] || x.id}` })
|
|
23
17
|
));
|
|
24
18
|
}
|
|
25
19
|
switch (ctx.session.config?.ai) {
|
|
26
|
-
case '@': ctx.selectedAi =
|
|
20
|
+
case '@': ctx.selectedAi = ais.map(x => x.id); break;
|
|
27
21
|
default:
|
|
28
22
|
ctx.selectedAi = [ctx.session.config?.ai];
|
|
29
|
-
const foundAi =
|
|
23
|
+
const foundAi = ais.map(x => x.id).includes(ctx.session.config?.ai);
|
|
30
24
|
if (foundAi) {
|
|
31
25
|
} else if (!ctx.collected?.length) {
|
|
32
|
-
ctx.selectedAi = [
|
|
26
|
+
ctx.selectedAi = [ais[0].id];
|
|
33
27
|
} else {
|
|
34
28
|
const supported = {};
|
|
35
|
-
for (const
|
|
29
|
+
for (const x of ais) {
|
|
36
30
|
const supportedMimeTypes = [
|
|
37
|
-
...
|
|
38
|
-
...
|
|
31
|
+
...x.model.supportedMimeTypes || [],
|
|
32
|
+
...x.model.supportedAudioTypes || [],
|
|
39
33
|
];
|
|
40
|
-
for (const
|
|
41
|
-
supported[
|
|
42
|
-
if (supportedMimeTypes.includes(
|
|
43
|
-
supported[
|
|
34
|
+
for (const i of ctx.collected) {
|
|
35
|
+
supported[x.id] || (supported[x.id] = 0);
|
|
36
|
+
if (supportedMimeTypes.includes(i?.content?.mime_type)) {
|
|
37
|
+
supported[x.id]++;
|
|
44
38
|
}
|
|
45
|
-
if (ctx.checkSpeech() && (
|
|
46
|
-
|
|
47
|
-
|
|
39
|
+
if (ctx.checkSpeech() && (
|
|
40
|
+
x.model.supportedAudioTypes || []
|
|
41
|
+
).includes(i?.content?.mime_type)) {
|
|
48
42
|
ctx.carry.audioMode = true;
|
|
49
|
-
|
|
50
|
-
supported[i]++; // Priority for audio models
|
|
51
|
-
}
|
|
43
|
+
x.model.audio && (supported[x.id]++); // Priority for audio models
|
|
52
44
|
}
|
|
53
45
|
}
|
|
54
46
|
}
|
|
55
47
|
ctx.selectedAi = [Object.keys(supported).sort(
|
|
56
48
|
(x, y) => supported[y] - supported[x]
|
|
57
|
-
)?.[0] ||
|
|
49
|
+
)?.[0] || ais[0].id];
|
|
58
50
|
}
|
|
59
51
|
}
|
|
60
52
|
await next();
|
|
61
53
|
};
|
|
62
54
|
|
|
63
|
-
const validateAi = val => {
|
|
64
|
-
assert(configuredAi, 'Preparing data for this option. Please try later.');
|
|
55
|
+
const validateAi = async val => {
|
|
65
56
|
NAME_HACK_REVERSE[val] && (val = NAME_HACK_REVERSE[val]);
|
|
66
|
-
for (let name of [...
|
|
57
|
+
for (let name of [...(
|
|
58
|
+
await alan.getAi(null, { all: true })
|
|
59
|
+
).map(x => x.id), '', '@']) {
|
|
67
60
|
if (utilitas.insensitiveCompare(val, name)) { return name; }
|
|
68
61
|
}
|
|
69
62
|
utilitas.throwError('No AI engine matched.');
|
package/skills/20_instant.mjs
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
import { bot, utilitas } from 'utilitas';
|
|
1
|
+
import { alan, bot, utilitas } from 'utilitas';
|
|
2
2
|
|
|
3
3
|
const action = async (ctx, next) => {
|
|
4
|
-
const
|
|
4
|
+
const ais = await alan.getAi(null, { all: true });
|
|
5
|
+
const allAi = ais.map(x => x.id);
|
|
5
6
|
switch (ctx.cmd.cmd) {
|
|
6
7
|
case 'all':
|
|
7
8
|
ctx.selectedAi = allAi;
|
package/skills/50_prompt.mjs
CHANGED
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
import { bot, utilitas } from 'utilitas';
|
|
2
2
|
|
|
3
|
-
const ACP = '[🧠 Awesome ChatGPT Prompts](https://github.com/f/awesome-chatgpt-prompts)';
|
|
4
|
-
|
|
5
3
|
const action = async (ctx, next) => {
|
|
6
4
|
ctx.session.prompts || (ctx.session.prompts = {});
|
|
7
5
|
const cmd = ctx.cmd?.cmd;
|
|
@@ -35,19 +33,6 @@ const action = async (ctx, next) => {
|
|
|
35
33
|
await ctx.ok('Prompt not found.');
|
|
36
34
|
}
|
|
37
35
|
return;
|
|
38
|
-
case 'acplist':
|
|
39
|
-
const list = bot.uList(Object.keys(ctx._.prompts || {}).map(
|
|
40
|
-
x => `/${ctx._.prompts[x].command}: ${ctx._.prompts[x].act}`
|
|
41
|
-
));
|
|
42
|
-
return await ctx.ok(list || 'Data not found.');
|
|
43
|
-
case 'acpdetail':
|
|
44
|
-
const details = bot.lines2(Object.keys(ctx._.prompts || {}).map(
|
|
45
|
-
x => bot.lines([
|
|
46
|
-
`- /${ctx._.prompts[x].command}: ${ctx._.prompts[x].act}`,
|
|
47
|
-
ctx._.prompts[x].prompt
|
|
48
|
-
])
|
|
49
|
-
));
|
|
50
|
-
return await ctx.ok(details || 'Data not found.');
|
|
51
36
|
default:
|
|
52
37
|
const prompt = ctx.session.prompts?.[cmd] || ctx._.prompts?.[cmd]?.prompt;
|
|
53
38
|
!ctx.context && prompt && (ctx.context = { cmd, prompt });
|
|
@@ -65,14 +50,11 @@ export const { name, run, priority, func, help, cmds, cmdx } = {
|
|
|
65
50
|
'¶ Maintain custom prompts.',
|
|
66
51
|
'Example 1: /add `code` > `Code with me.`',
|
|
67
52
|
'Example 2: /del `code`',
|
|
68
|
-
`¶ Get interesting prompts from ${ACP}.`,
|
|
69
53
|
]),
|
|
70
54
|
cmds: {
|
|
71
55
|
prompts: 'List all custom prompts.',
|
|
72
56
|
add: 'Add or edit a custom prompt: /add `COMMAND` > `PROMPT`.',
|
|
73
57
|
del: 'Delete a custom prompt: /del `COMMAND`.',
|
|
74
|
-
acplist: `List prompts from ${ACP}.`,
|
|
75
|
-
acpdetail: `Show details of ${ACP}.`,
|
|
76
58
|
},
|
|
77
59
|
cmdx: {},
|
|
78
60
|
};
|
package/skills/60_prepare.mjs
CHANGED
|
@@ -2,13 +2,15 @@ import { alan, bot, utilitas } from 'utilitas';
|
|
|
2
2
|
|
|
3
3
|
const checkUnsupportedMimeType = async ctx => {
|
|
4
4
|
ctx.carry.attachments = [];
|
|
5
|
+
const ais = await alan.getAi(null, { all: true });
|
|
5
6
|
for (const x of ctx.collected.filter(x => x.type === 'PROMPT')) {
|
|
6
7
|
let notSupported = false;
|
|
7
8
|
ctx.selectedAi.map(y => {
|
|
9
|
+
const ai = ais.find(z => z.id === y);
|
|
8
10
|
if (![
|
|
9
|
-
...
|
|
10
|
-
...
|
|
11
|
-
].includes(
|
|
11
|
+
...ai.model.supportedMimeTypes || [],
|
|
12
|
+
...ai.model.supportedAudioTypes || [],
|
|
13
|
+
].includes(x?.content?.mime_type)) { notSupported = true; }
|
|
12
14
|
});
|
|
13
15
|
notSupported ? await x.content.analyze() : ctx.carry.attachments.push({
|
|
14
16
|
...x.content, analyze: undefined,
|
package/skills/70_chat.mjs
CHANGED
|
@@ -33,9 +33,10 @@ const action = async (ctx, next) => {
|
|
|
33
33
|
};
|
|
34
34
|
const ok = async options => {
|
|
35
35
|
const [curTime, curMsg] = [Date.now(), packMsg(options)];
|
|
36
|
-
if (options?.onProgress && (
|
|
37
|
-
ctx.limit * (curTime - firstResp > 1000 * 60 ? 2 : 1)
|
|
38
|
-
|
|
36
|
+
if (options?.onProgress && (
|
|
37
|
+
(curTime - lastSent) < (ctx.limit * (curTime - firstResp > 1000 * 60 ? 2 : 1))
|
|
38
|
+
|| lastMsg === curMsg
|
|
39
|
+
)) { return; }
|
|
39
40
|
[lastSent, lastMsg] = [curTime, curMsg];
|
|
40
41
|
const cmd = ctx.session.context?.cmd;
|
|
41
42
|
if (options?.final) {
|
|
@@ -56,10 +57,9 @@ const action = async (ctx, next) => {
|
|
|
56
57
|
pms.push((async ai => {
|
|
57
58
|
try {
|
|
58
59
|
const resp = await alan.talk(ctx.prompt, {
|
|
59
|
-
|
|
60
|
-
stream: async r => {
|
|
60
|
+
id: ai, ...ctx.carry, stream: async r => {
|
|
61
61
|
msgs[ai] = r.text;
|
|
62
|
-
ctx.carry.threadInfo.length ||
|
|
62
|
+
ctx.carry.threadInfo.length || ok(onProgress);
|
|
63
63
|
},
|
|
64
64
|
});
|
|
65
65
|
references = resp.references;
|