utilitas 1998.2.50 → 1998.2.52
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -1
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +0 -1
- package/lib/bot.mjs +18 -12
- package/lib/manifest.mjs +1 -1
- package/lib/speech.mjs +10 -9
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -861,7 +861,6 @@ const buildPrompts = async (model, input, options = {}) => {
|
|
|
861
861
|
content = trimTailing(trimTailing(content).slice(0, -1)) + '...';
|
|
862
862
|
}
|
|
863
863
|
}, model.maxInputTokens - options.attachments?.length * ATTACHMENT_TOKEN_COST);
|
|
864
|
-
print(JSON.stringify(history));
|
|
865
864
|
return { systemPrompt, history, prompt };
|
|
866
865
|
};
|
|
867
866
|
|
package/lib/bot.mjs
CHANGED
|
@@ -18,6 +18,7 @@ import { isPrimary, on, report } from './callosum.mjs';
|
|
|
18
18
|
import { cleanSql, encodeVector, MYSQL, POSTGRESQL } from './dbio.mjs';
|
|
19
19
|
import { convertAudioTo16kNanoPcmWave } from './media.mjs';
|
|
20
20
|
import { get } from './shot.mjs';
|
|
21
|
+
import { OPENAI_TTS_MAX_LENGTH } from './speech.mjs';
|
|
21
22
|
import { BASE64, BUFFER, convert, FILE, isTextFile, tryRm } from './storage.mjs';
|
|
22
23
|
import { fakeUuid } from './uoid.mjs';
|
|
23
24
|
import { parseOfficeFile } from './vision.mjs';
|
|
@@ -44,7 +45,7 @@ const SEARCH_LIMIT = 10;
|
|
|
44
45
|
const [ // https://limits.tginfo.me/en
|
|
45
46
|
BOT_SEND, provider, HELLO, GROUP, PRIVATE, CHANNEL, MENTION, CALLBACK_LIMIT,
|
|
46
47
|
API_ROOT, jsonOptions, signals, sessions, HALBOT, COMMAND_REGEXP,
|
|
47
|
-
MESSAGE_LENGTH_LIMIT,
|
|
48
|
+
MESSAGE_LENGTH_LIMIT, COMMAND_LENGTH, COMMAND_LIMIT,
|
|
48
49
|
COMMAND_DESCRIPTION_LENGTH, bot_command, EMOJI_SPEECH, EMOJI_LOOK,
|
|
49
50
|
EMOJI_BOT, logOptions, ON, OFF, EMOJI_THINKING, PARSE_MODE_MD,
|
|
50
51
|
PARSE_MODE_MD_V2,
|
|
@@ -52,11 +53,12 @@ const [ // https://limits.tginfo.me/en
|
|
|
52
53
|
'BOT_SEND', 'TELEGRAM', 'Hello!', 'group', 'private', 'channel',
|
|
53
54
|
'mention', 30, 'https://api.telegram.org/',
|
|
54
55
|
{ code: true, extraCodeBlock: 1 }, ['SIGINT', 'SIGTERM'], {}, 'HALBOT',
|
|
55
|
-
/^\/([a-z0-9_]+)(@([a-z0-9_]*))?\ ?(.*)$/sig, 4096,
|
|
56
|
+
/^\/([a-z0-9_]+)(@([a-z0-9_]*))?\ ?(.*)$/sig, 4096, 32, 100, 256,
|
|
56
57
|
'bot_command', '👂', '👀', '🤖', { log: true }, 'on', 'off', '💬',
|
|
57
58
|
'Markdown', 'MarkdownV2',
|
|
58
59
|
];
|
|
59
60
|
|
|
61
|
+
const MESSAGE_SOFT_LIMIT = parseInt(MESSAGE_LENGTH_LIMIT * 0.95);
|
|
60
62
|
const parse_mode = PARSE_MODE_MD;
|
|
61
63
|
const [BUFFER_ENCODE, BINARY_STRINGS] = [{ encode: BUFFER }, [OFF, ON]];
|
|
62
64
|
|
|
@@ -215,6 +217,8 @@ const sessionSet = async chatId => {
|
|
|
215
217
|
};
|
|
216
218
|
|
|
217
219
|
const paging = (message, options) => {
|
|
220
|
+
options?.onProgress
|
|
221
|
+
&& (message = message?.length ? `${message} █` : EMOJI_THINKING)
|
|
218
222
|
const [pages, page, size] = [[], [], ~~options?.size || MESSAGE_SOFT_LIMIT];
|
|
219
223
|
const submit = () => {
|
|
220
224
|
const content = trim(lines(page));
|
|
@@ -366,7 +370,8 @@ const subconscious = [{
|
|
|
366
370
|
ctx.shouldSpeech = async text => {
|
|
367
371
|
text = isSet(text, true) ? (text || '') : ctx.tts;
|
|
368
372
|
const should = ctx._.speech?.tts && ctx.checkSpeech();
|
|
369
|
-
should && text &&
|
|
373
|
+
should && text && text.length <= OPENAI_TTS_MAX_LENGTH
|
|
374
|
+
&& await ctx.speech(text);
|
|
370
375
|
return should;
|
|
371
376
|
};
|
|
372
377
|
ctx.collect = (content, type, options) => type ? ctx.collected.push(
|
|
@@ -380,8 +385,7 @@ const subconscious = [{
|
|
|
380
385
|
ctx.skipMemorize();
|
|
381
386
|
};
|
|
382
387
|
ctx.ok = async (message, options) => {
|
|
383
|
-
let pages = paging(message);
|
|
384
|
-
pages = !pages.length && options?.onProgress ? [''] : pages;
|
|
388
|
+
let pages = paging(message, options);
|
|
385
389
|
const extra = getExtra(ctx, options);
|
|
386
390
|
const [pageIds, pageMap] = [[], {}];
|
|
387
391
|
options?.pageBreak || ctx.done.map(x => {
|
|
@@ -391,16 +395,15 @@ const subconscious = [{
|
|
|
391
395
|
for (let i in pages) {
|
|
392
396
|
const lastPage = ~~i === pages.length - 1;
|
|
393
397
|
const shouldExtra = options?.lastMessageId || lastPage;
|
|
394
|
-
if (!options?.lastMessageId
|
|
398
|
+
if (options?.onProgress && !options?.lastMessageId
|
|
395
399
|
&& pageMap[pageIds[~~i]]?.text === pages[i]) { continue; }
|
|
396
400
|
if (options?.onProgress && !pageIds[~~i]) { // progress: new page, reply text
|
|
397
401
|
ctx.done.push(await reply(
|
|
398
|
-
ctx, false, pages[i]
|
|
402
|
+
ctx, false, pages[i], extra
|
|
399
403
|
));
|
|
400
404
|
} else if (options?.onProgress) { // progress: ongoing, edit text
|
|
401
|
-
lastPage && (pages[i] += ' █');
|
|
402
405
|
ctx.done.push(await editMessageText(
|
|
403
|
-
ctx,
|
|
406
|
+
ctx, false, pageIds[~~i],
|
|
404
407
|
pages[i], shouldExtra ? extra : {}
|
|
405
408
|
));
|
|
406
409
|
} else if (options?.lastMessageId || pageIds[~~i]) { // progress: final, edit markdown
|
|
@@ -411,7 +414,7 @@ const subconscious = [{
|
|
|
411
414
|
} else { // never progress, reply markdown
|
|
412
415
|
ctx.done.push(await reply(ctx, true, pages[i], extra));
|
|
413
416
|
}
|
|
414
|
-
|
|
417
|
+
await ctx.timeout();
|
|
415
418
|
}
|
|
416
419
|
return ctx.done;
|
|
417
420
|
};
|
|
@@ -433,8 +436,11 @@ const subconscious = [{
|
|
|
433
436
|
if (Buffer.isBuffer(text)) {
|
|
434
437
|
file = await convert(text, { input: BUFFER, expected: FILE });
|
|
435
438
|
} else {
|
|
436
|
-
file = await ctx._.speech.tts(
|
|
439
|
+
file = await ignoreErrFunc(async () => await ctx._.speech.tts(
|
|
440
|
+
text, { expected: 'file' }
|
|
441
|
+
), logOptions);
|
|
437
442
|
}
|
|
443
|
+
if (!file) { return; }
|
|
438
444
|
const resp = await ctx.audio(file, options);
|
|
439
445
|
await tryRm(file);
|
|
440
446
|
return resp;
|
|
@@ -1059,7 +1065,7 @@ const init = async (options) => {
|
|
|
1059
1065
|
const pkg = await which();
|
|
1060
1066
|
mime = await need('mime');
|
|
1061
1067
|
lorem = new (await need('lorem-ipsum')).LoremIpsum;
|
|
1062
|
-
bot = new Telegraf(options?.botToken);
|
|
1068
|
+
bot = new Telegraf(options?.botToken, { handlerTimeout: 1000 * 60 * 10 }); // 10 minutes
|
|
1063
1069
|
bot.use(useNewReplies());
|
|
1064
1070
|
bot._ = {
|
|
1065
1071
|
args: { ...options?.args || {} },
|
package/lib/manifest.mjs
CHANGED
package/lib/speech.mjs
CHANGED
|
@@ -1,17 +1,18 @@
|
|
|
1
|
-
import { convert, getTempPath } from './storage.mjs';
|
|
2
1
|
import { DEFAULT_MODELS, OPENAI_VOICE } from './alan.mjs';
|
|
3
|
-
import { ensureString } from './utilitas.mjs';
|
|
4
|
-
import { get } from './shot.mjs';
|
|
5
2
|
import { getApiKeyCredentials, hash } from './encryption.mjs';
|
|
6
3
|
import { getFfmpeg } from './media.mjs';
|
|
4
|
+
import { get } from './shot.mjs';
|
|
5
|
+
import { convert, getTempPath } from './storage.mjs';
|
|
6
|
+
import { ensureString } from './utilitas.mjs';
|
|
7
7
|
|
|
8
8
|
import {
|
|
9
|
-
call, countKeys, ignoreErrFunc, inBrowser,
|
|
9
|
+
call, countKeys, ignoreErrFunc, inBrowser,
|
|
10
|
+
need, throwError
|
|
10
11
|
} from './utilitas.mjs';
|
|
11
12
|
|
|
12
13
|
import {
|
|
13
|
-
convertAudioTo16kNanoPcmWave,
|
|
14
14
|
convertAudioTo16kNanoOpusOgg,
|
|
15
|
+
convertAudioTo16kNanoPcmWave,
|
|
15
16
|
} from './media.mjs';
|
|
16
17
|
|
|
17
18
|
const _NEED = [
|
|
@@ -28,6 +29,7 @@ const [BUFFER, STREAM, BASE64, FILE, clients, languageCode, audioEncoding, suffi
|
|
|
28
29
|
|
|
29
30
|
// https://platform.openai.com/account/limits
|
|
30
31
|
const [TTS_1, TTS_1_HD] = ['tts-1', 'tts-1-hd']; // [7500 RPM, 7500 RPM]
|
|
32
|
+
const OPENAI_TTS_MAX_LENGTH = 4096;
|
|
31
33
|
const defaultOpenAITtsModel = TTS_1;
|
|
32
34
|
|
|
33
35
|
const WHISPER_MODELS = [
|
|
@@ -147,6 +149,7 @@ const checkWhisper = async (options) => {
|
|
|
147
149
|
const ttsOpenAI = async (input, options) => {
|
|
148
150
|
assert(clients.tts, 'Text-to-Speech API has not been initialized.', 500);
|
|
149
151
|
assert(input, 'Text is required.', 400);
|
|
152
|
+
assert(input.length <= OPENAI_TTS_MAX_LENGTH, 'Text is too long.', 400);
|
|
150
153
|
// https://platform.openai.com/docs/api-reference/audio/createSpeech
|
|
151
154
|
const content = await clients.tts.create({
|
|
152
155
|
model: defaultOpenAITtsModel, voice: DEFAULT_MODELS[OPENAI_VOICE],
|
|
@@ -274,13 +277,11 @@ export {
|
|
|
274
277
|
_NEED,
|
|
275
278
|
checkSay,
|
|
276
279
|
checkWhisper,
|
|
277
|
-
init,
|
|
278
|
-
stt,
|
|
279
|
-
sttGoogle,
|
|
280
|
+
init, OPENAI_TTS_MAX_LENGTH, stt, sttGoogle,
|
|
280
281
|
sttOpenAI,
|
|
281
282
|
sttWhisper,
|
|
282
283
|
tts,
|
|
283
284
|
ttsGoogle,
|
|
284
285
|
ttsOpenAI,
|
|
285
|
-
ttsSay
|
|
286
|
+
ttsSay
|
|
286
287
|
};
|