halbot 1993.2.48 → 1993.2.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.mjs CHANGED
@@ -74,6 +74,9 @@ const init = async (options) => {
74
74
  });
75
75
  _speech.tts = speech.tts;
76
76
  }
77
+ options?.googleCx && await shot.initSearch({
78
+ apiKey: options.googleApiKey, cx: options.googleCx
79
+ });
77
80
  }
78
81
  if (options?.claudeApiKey || (options?.claudeCredentials && options?.claudeProjectId)) {
79
82
  await alan.init({
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "halbot",
3
3
  "description": "Just another `ChatGPT` / `Gemini` / `Ollama` Telegram bob, which is simple design, easy to use, extendable and fun.",
4
- "version": "1993.2.48",
4
+ "version": "1993.2.50",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/halbot",
7
7
  "type": "module",
@@ -29,31 +29,31 @@
29
29
  "url": "https://github.com/Leask/halbot.git"
30
30
  },
31
31
  "dependencies": {
32
- "@anthropic-ai/sdk": "^0.37.0",
32
+ "@anthropic-ai/sdk": "^0.39.0",
33
33
  "@anthropic-ai/vertex-sdk": "^0.7.0",
34
34
  "@ffmpeg-installer/ffmpeg": "^1.1.0",
35
35
  "@ffprobe-installer/ffprobe": "^2.1.2",
36
- "@google-cloud/speech": "^6.7.0",
37
- "@google-cloud/text-to-speech": "^5.8.0",
38
- "@google-cloud/vision": "^4.3.2",
39
- "@google/generative-ai": "^0.22.0",
40
- "@mozilla/readability": "^0.5.0",
36
+ "@google-cloud/speech": "^6.7.1",
37
+ "@google-cloud/text-to-speech": "^5.8.1",
38
+ "@google-cloud/vision": "^4.3.3",
39
+ "@google/generative-ai": "^0.24.0",
40
+ "@mozilla/readability": "^0.6.0",
41
41
  "csv-parse": "^5.6.0",
42
42
  "fluent-ffmpeg": "^2.1.3",
43
- "ioredis": "^5.5.0",
43
+ "ioredis": "^5.6.0",
44
44
  "js-tiktoken": "^1.0.19",
45
45
  "jsdom": "^26.0.0",
46
46
  "lorem-ipsum": "^2.0.8",
47
47
  "mime": "^4.0.6",
48
- "mysql2": "^3.12.0",
48
+ "mysql2": "^3.13.0",
49
49
  "office-text-extractor": "^3.0.3",
50
50
  "ollama": "^0.5.14",
51
- "openai": "^4.85.4",
51
+ "openai": "^4.86.2",
52
52
  "pg": "^8.13.3",
53
53
  "pgvector": "^0.2.0",
54
54
  "telegraf": "^4.16.3",
55
55
  "tesseract.js": "^6.0.0",
56
- "utilitas": "^1998.2.21",
56
+ "utilitas": "^1998.2.50",
57
57
  "youtube-transcript": "^1.2.1"
58
58
  }
59
59
  }
@@ -39,7 +39,7 @@ const action = async (ctx, next) => {
39
39
  ctx.clear = async context => {
40
40
  await alan.resetSession(
41
41
  ctx.session.sessionId,
42
- // { systemPrompt: context?.prompt } // @todo: switch to real system prompt
42
+ { systemPrompt: context?.prompt } // @todo: switch to real system prompt
43
43
  );
44
44
  resetContext(context);
45
45
  const id = findSession(ctx.session.sessionId);
@@ -47,7 +47,7 @@ const action = async (ctx, next) => {
47
47
  ctx.session.sessions?.[id] && (
48
48
  ctx.session.sessions[id].context = ctx.session.context
49
49
  );
50
- ctx.hello(context?.prompt);
50
+ ctx.hello();
51
51
  };
52
52
  const switchSession = async () => {
53
53
  let resp;
@@ -80,9 +80,6 @@ export const { name, run, priority, func, help, args } = {
80
80
  '¶ Select between AI engines.',
81
81
  "Tip 2: Set `ai=''` to use default AI engine.",
82
82
  'Tip 3: Set `ai=@` to use all AI engines simultaneously.',
83
- '¶ Tweak enhanced output rendering.',
84
- 'Example 1: /set --render on',
85
- 'Example 2: /set --render off',
86
83
  ]),
87
84
  args: {
88
85
  hello: {
@@ -94,10 +91,5 @@ export const { name, run, priority, func, help, args } = {
94
91
  desc: "`(ChatGPT, Gemini, Claude, Azure, Ollama, @)` Select AI engine.",
95
92
  validate: validateAi,
96
93
  },
97
- render: {
98
- type: 'string', short: 'r', default: bot.BINARY_STRINGS[0],
99
- desc: `\`(${bot.BINARY_STRINGS.join(', ')})\` Enable/Disable enhanced output rendering.`,
100
- validate: utilitas.humanReadableBoolean,
101
- },
102
94
  },
103
95
  };
@@ -12,8 +12,8 @@ const [BOT, BOTS, LN2] = [`${bot.EMOJI_BOT} `, {
12
12
 
13
13
  const action = async (ctx, next) => {
14
14
  if (!ctx.prompt && !ctx.carry.attachments.length) { return await next(); }
15
- const [YOU, msgs, tts, rsm, pms, extra]
16
- = [`${ctx.avatar} You:`, {}, {}, {}, [], { buttons: [] }];
15
+ const [YOU, msgs, tts, rsm, pms, extra, firstResp]
16
+ = [`${ctx.avatar} You:`, {}, {}, {}, [], { buttons: [] }, Date.now()];
17
17
  let [lastMsg, lastSent, references, audio] = [null, 0, null, null];
18
18
  const packMsg = options => {
19
19
  const said = !options?.tts && ctx.result ? ctx.result : '';
@@ -33,9 +33,9 @@ const action = async (ctx, next) => {
33
33
  };
34
34
  const ok = async options => {
35
35
  const [curTime, curMsg] = [Date.now(), packMsg(options)];
36
- if (options?.onProgress && (
37
- curTime - lastSent < ctx.limit || lastMsg === curMsg
38
- )) { return; }
36
+ if (options?.onProgress && (curTime - lastSent < (
37
+ ctx.limit * (curTime - firstResp > 1000 * 60 ? 2 : 1)
38
+ ) || lastMsg === curMsg)) { return; }
39
39
  [lastSent, lastMsg] = [curTime, curMsg];
40
40
  const cmd = ctx.session.context?.cmd;
41
41
  if (options?.final) {
@@ -64,8 +64,7 @@ const action = async (ctx, next) => {
64
64
  });
65
65
  references = resp.references;
66
66
  audio = resp.audio;
67
- msgs[ai] = ctx.session.config?.render === false
68
- ? resp.text : resp.richText;
67
+ msgs[ai] = resp.text;
69
68
  tts[ai] = ctx.selectedAi.length === 1
70
69
  && !msgs[ai].split('\n').some(x => /^\s*```/.test(x))
71
70
  ? resp.spoken : '';