utilitas 1995.2.38 → 1995.2.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -12
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/index.mjs +2 -3
- package/lib/alan.mjs +66 -19
- package/lib/manifest.mjs +1 -2
- package/package.json +1 -2
- package/lib/hal.mjs +0 -139
package/index.mjs
CHANGED
|
@@ -13,7 +13,6 @@ import * as dbio from './lib/dbio.mjs';
|
|
|
13
13
|
import * as email from './lib/email.mjs';
|
|
14
14
|
import * as encryption from './lib/encryption.mjs';
|
|
15
15
|
import * as event from './lib/event.mjs';
|
|
16
|
-
import * as hal from './lib/hal.mjs';
|
|
17
16
|
import * as image from './lib/image.mjs';
|
|
18
17
|
import * as media from './lib/media.mjs';
|
|
19
18
|
import * as memory from './lib/memory.mjs';
|
|
@@ -41,8 +40,8 @@ export {
|
|
|
41
40
|
fileType, math, uuid,
|
|
42
41
|
// features
|
|
43
42
|
alan, bot, boxes, cache, callosum, color, dbio, email, encryption, event,
|
|
44
|
-
|
|
45
|
-
|
|
43
|
+
image, manifest, media, memory, network, sentinel, shekel, shell, shot, sms,
|
|
44
|
+
speech, ssl, storage, tape, uoid, utilitas, vision, web
|
|
46
45
|
};
|
|
47
46
|
|
|
48
47
|
if (utilitas.inBrowser() && !globalThis.utilitas) {
|
package/lib/alan.mjs
CHANGED
|
@@ -1,8 +1,12 @@
|
|
|
1
1
|
import { convert } from './storage.mjs';
|
|
2
2
|
import { create as createUoid } from './uoid.mjs';
|
|
3
|
-
import { ensureString, ignoreErrFunc, need, throwError } from './utilitas.mjs';
|
|
4
3
|
import { loop, end } from './event.mjs';
|
|
5
4
|
|
|
5
|
+
import {
|
|
6
|
+
ensureString, ignoreErrFunc, log as _log, need, renderText as _renderText,
|
|
7
|
+
throwError,
|
|
8
|
+
} from './utilitas.mjs';
|
|
9
|
+
|
|
6
10
|
const _NEED = [
|
|
7
11
|
'@google-cloud/aiplatform', '@google-cloud/vertexai',
|
|
8
12
|
'@google/generative-ai', 'js-tiktoken', 'OpenAI',
|
|
@@ -24,7 +28,7 @@ const [tool, provider, messages, text] = [
|
|
|
24
28
|
messages => ({ messages }), text => ({ text }),
|
|
25
29
|
];
|
|
26
30
|
|
|
27
|
-
const [name, user, system, assistant,
|
|
31
|
+
const [name, user, system, assistant, MODEL]
|
|
28
32
|
= ['Alan', 'user', 'system', 'assistant', 'model'];
|
|
29
33
|
const [CODE_INTERPRETER, RETRIEVAL, FUNCTION]
|
|
30
34
|
= ['code_interpreter', 'retrieval', 'function'].map(tool);
|
|
@@ -37,12 +41,15 @@ const [tokenRatio, tokenSafeRatio, GPT_QUERY_LIMIT, minsOfDay]
|
|
|
37
41
|
= [100 / 75, 1.1, 100, 60 * 24]; // https://platform.openai.com/tokenizer
|
|
38
42
|
const tokenSafe = count => Math.ceil(count * tokenSafeRatio);
|
|
39
43
|
const clients = {};
|
|
40
|
-
const
|
|
44
|
+
const LOG = { log: true };
|
|
41
45
|
const sessionType = `${name.toUpperCase()}-SESSION`;
|
|
42
46
|
const unifyProvider = options => unifyType(options?.provider, 'AI provider');
|
|
43
47
|
const unifyEngine = options => unifyType(options?.engine, 'AI engine');
|
|
44
48
|
const packResp = _text => [text(_text)];
|
|
45
49
|
const trimTailing = text => text.replace(/[\.\s]*$/, '');
|
|
50
|
+
const newSessionId = () => createUoid({ type: sessionType });
|
|
51
|
+
const renderText = (t, o) => _renderText(t, { extraCodeBlock: 0, ...o || {} });
|
|
52
|
+
const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
|
|
46
53
|
|
|
47
54
|
const [
|
|
48
55
|
OPENAI, VERTEX, GEMINI, CHATGPT, ASSISTANT, OPENAI_EMBEDDING,
|
|
@@ -277,7 +284,7 @@ const promptChatGPT = async (content, options) => {
|
|
|
277
284
|
};
|
|
278
285
|
await ignoreErrFunc(async () => await options?.stream?.(
|
|
279
286
|
packGptResp(chunk, options)
|
|
280
|
-
),
|
|
287
|
+
), LOG);
|
|
281
288
|
}
|
|
282
289
|
return { response: packGptResp(chunk, options) };
|
|
283
290
|
};
|
|
@@ -387,7 +394,7 @@ const promptAssistant = async (content, options) => {
|
|
|
387
394
|
const resp = await getRun(thread.id, objRun.id);
|
|
388
395
|
await ignoreErrFunc(async () => await options?.stream?.(
|
|
389
396
|
options?.raw ? resp : packResp('')
|
|
390
|
-
),
|
|
397
|
+
), LOG);
|
|
391
398
|
if (resp?.status !== 'completed') { return; }
|
|
392
399
|
resolve(resp);
|
|
393
400
|
} catch (err) {
|
|
@@ -481,7 +488,7 @@ const handleGeminiResponse = async (resp, options) => {
|
|
|
481
488
|
for await (const chunk of _resp.stream) {
|
|
482
489
|
await ignoreErrFunc(async () => await options.stream(
|
|
483
490
|
options?.raw ? chunk : chunk.candidates[0].content.parts
|
|
484
|
-
),
|
|
491
|
+
), LOG);
|
|
485
492
|
}
|
|
486
493
|
}
|
|
487
494
|
const result = await _resp.response;
|
|
@@ -671,25 +678,45 @@ const initChat = async (options) => {
|
|
|
671
678
|
return chatConfig;
|
|
672
679
|
};
|
|
673
680
|
|
|
674
|
-
const
|
|
675
|
-
messages: [], systemPrompt: chatConfig.systemPrompt,
|
|
676
|
-
...
|
|
677
|
-
sessionId, options?.prompt, options
|
|
678
|
-
)) || {},
|
|
681
|
+
const defaultSession = session => ({
|
|
682
|
+
messages: [], systemPrompt: chatConfig.systemPrompt,
|
|
683
|
+
threadId: null, ...session || {},
|
|
679
684
|
});
|
|
680
685
|
|
|
681
|
-
const
|
|
682
|
-
|
|
686
|
+
const assertSessionId = sessionId => {
|
|
687
|
+
sessionId = ensureString(sessionId, { case: 'UP' });
|
|
688
|
+
assert(sessionId, 'Session ID is required.');
|
|
689
|
+
return sessionId;
|
|
690
|
+
};
|
|
691
|
+
|
|
692
|
+
const getSession = async (sessionId, options) => {
|
|
693
|
+
sessionId = assertSessionId(sessionId);
|
|
694
|
+
return defaultSession(await chatConfig.sessions.get(
|
|
695
|
+
sessionId, options?.prompt, options
|
|
696
|
+
));
|
|
697
|
+
};
|
|
698
|
+
|
|
699
|
+
const setSession = async (sessionId, session, options) => {
|
|
700
|
+
sessionId = assertSessionId(sessionId);
|
|
701
|
+
return await chatConfig.sessions.set(sessionId, session, options);
|
|
702
|
+
};
|
|
703
|
+
|
|
704
|
+
const resetSession = async (sessionId, options) => {
|
|
705
|
+
const session = {
|
|
706
|
+
...defaultSession(),
|
|
707
|
+
...options?.systemPrompt ? { systemPrompt: options.systemPrompt } : {},
|
|
708
|
+
};
|
|
709
|
+
return await setSession(sessionId, session);
|
|
710
|
+
};
|
|
683
711
|
|
|
684
712
|
const talk = async (input, options) => {
|
|
685
713
|
const engine = unifyEngine({ engine: CHATGPT, ...options });
|
|
686
714
|
assert(chatConfig.engines[engine], NOT_INIT);
|
|
687
715
|
const model = MODELS[chatConfig.engines[engine].model];
|
|
688
|
-
const sessionId = options?.sessionId ||
|
|
716
|
+
const sessionId = options?.sessionId || newSessionId();
|
|
689
717
|
const session = await getSession(sessionId, { engine, ...options });
|
|
690
718
|
let [_provider, _model, resp, sys, messages, msgBuilder] =
|
|
691
719
|
[engine, null, null, [], [], null];
|
|
692
|
-
print('<<<<<<<<<<<<<<<>>>>>>>>>>', session);
|
|
693
720
|
switch (engine) {
|
|
694
721
|
case CHATGPT:
|
|
695
722
|
_provider = OPENAI;
|
|
@@ -715,7 +742,7 @@ const talk = async (input, options) => {
|
|
|
715
742
|
messages = [];
|
|
716
743
|
session.messages.map(x => {
|
|
717
744
|
messages.push(buildVertexMessage(x.request, { role: user }));
|
|
718
|
-
messages.push(buildVertexMessage(x.response, { role:
|
|
745
|
+
messages.push(buildVertexMessage(x.response, { role: MODEL }));
|
|
719
746
|
});
|
|
720
747
|
};
|
|
721
748
|
msgBuilder()
|
|
@@ -734,6 +761,7 @@ const talk = async (input, options) => {
|
|
|
734
761
|
}
|
|
735
762
|
}
|
|
736
763
|
const chat = { request: input };
|
|
764
|
+
log(`Prompt: ${JSON.stringify(input)}`);
|
|
737
765
|
switch (engine) {
|
|
738
766
|
case CHATGPT:
|
|
739
767
|
resp = await promptChatGPT(input, {
|
|
@@ -761,8 +789,24 @@ const talk = async (input, options) => {
|
|
|
761
789
|
}
|
|
762
790
|
}
|
|
763
791
|
session.messages.push(chat);
|
|
764
|
-
await
|
|
765
|
-
|
|
792
|
+
await setSession(sessionId, session, options);
|
|
793
|
+
const text = resp.response.filter(x => x.text).map(x => x.text).join('\n\n');
|
|
794
|
+
log(`Response: ${JSON.stringify(resp.response)}`);
|
|
795
|
+
return {
|
|
796
|
+
sessionId, response: resp.response, text, rendered: renderText(text),
|
|
797
|
+
spoken: renderText(text, { noCode: true }).replace(/\[\^\d\^\]/ig, ''),
|
|
798
|
+
};
|
|
799
|
+
};
|
|
800
|
+
|
|
801
|
+
const getMaxChatPromptLimit = (options) => {
|
|
802
|
+
let resp = 0;
|
|
803
|
+
for (const i in chatConfig.engines) {
|
|
804
|
+
if (options?.engine && i !== options.engine) { continue; }
|
|
805
|
+
const maxInputTokens = MODELS[chatConfig.engines[i].model].maxInputTokens;
|
|
806
|
+
resp = resp ? Math.min(resp, maxInputTokens) : maxInputTokens;
|
|
807
|
+
}
|
|
808
|
+
assert(resp > 0, 'Chat engine has not been initialized.');
|
|
809
|
+
return resp;
|
|
766
810
|
};
|
|
767
811
|
|
|
768
812
|
export default init;
|
|
@@ -788,11 +832,11 @@ export {
|
|
|
788
832
|
buildGptTrainingCase,
|
|
789
833
|
buildGptTrainingCases,
|
|
790
834
|
cancelGptFineTuningJob,
|
|
835
|
+
countTokens,
|
|
791
836
|
createAssistant,
|
|
792
837
|
createGeminiEmbedding,
|
|
793
838
|
createGptFineTuningJob,
|
|
794
839
|
createMessage,
|
|
795
|
-
initChat,
|
|
796
840
|
createOpenAIEmbedding,
|
|
797
841
|
createVertexEmbedding,
|
|
798
842
|
deleteAllFilesFromAssistant,
|
|
@@ -806,9 +850,11 @@ export {
|
|
|
806
850
|
getAssistant,
|
|
807
851
|
getGptFineTuningJob,
|
|
808
852
|
getLatestMessage,
|
|
853
|
+
getMaxChatPromptLimit,
|
|
809
854
|
getRun,
|
|
810
855
|
getThread,
|
|
811
856
|
init,
|
|
857
|
+
initChat,
|
|
812
858
|
listAssistant,
|
|
813
859
|
listAssistantFiles,
|
|
814
860
|
listFiles,
|
|
@@ -821,6 +867,7 @@ export {
|
|
|
821
867
|
promptChatGPT,
|
|
822
868
|
promptGemini,
|
|
823
869
|
promptVertex,
|
|
870
|
+
resetSession,
|
|
824
871
|
run,
|
|
825
872
|
tailGptFineTuningEvents,
|
|
826
873
|
talk,
|
package/lib/manifest.mjs
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
const manifest = {
|
|
2
2
|
"name": "utilitas",
|
|
3
3
|
"description": "Just another common utility for JavaScript.",
|
|
4
|
-
"version": "1995.2.
|
|
4
|
+
"version": "1995.2.39",
|
|
5
5
|
"private": false,
|
|
6
6
|
"homepage": "https://github.com/Leask/utilitas",
|
|
7
7
|
"main": "index.mjs",
|
|
@@ -36,7 +36,6 @@ const manifest = {
|
|
|
36
36
|
"@mozilla/readability": "^0.4.4",
|
|
37
37
|
"@ngrok/ngrok": "^0.9.1",
|
|
38
38
|
"@sentry/node": "^7.88.0",
|
|
39
|
-
"@waylaidwanderer/chatgpt-api": "^1.37.3",
|
|
40
39
|
"acme-client": "^5.0.0",
|
|
41
40
|
"browserify-fs": "^1.0.0",
|
|
42
41
|
"buffer": "^6.0.3",
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "utilitas",
|
|
3
3
|
"description": "Just another common utility for JavaScript.",
|
|
4
|
-
"version": "1995.2.
|
|
4
|
+
"version": "1995.2.39",
|
|
5
5
|
"private": false,
|
|
6
6
|
"homepage": "https://github.com/Leask/utilitas",
|
|
7
7
|
"main": "index.mjs",
|
|
@@ -47,7 +47,6 @@
|
|
|
47
47
|
"@mozilla/readability": "^0.4.4",
|
|
48
48
|
"@ngrok/ngrok": "^0.9.1",
|
|
49
49
|
"@sentry/node": "^7.88.0",
|
|
50
|
-
"@waylaidwanderer/chatgpt-api": "^1.37.3",
|
|
51
50
|
"acme-client": "^5.0.0",
|
|
52
51
|
"browserify-fs": "^1.0.0",
|
|
53
52
|
"buffer": "^6.0.3",
|
package/lib/hal.mjs
DELETED
|
@@ -1,139 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
ensureString, insensitiveCompare, log as _log, need,
|
|
3
|
-
renderText as _renderText, throwError, verifyUrl,
|
|
4
|
-
} from './utilitas.mjs';
|
|
5
|
-
|
|
6
|
-
const _NEED = ['@waylaidwanderer/chatgpt-api'];
|
|
7
|
-
const [BING, CHATGPT] = ['BING', 'CHATGPT'];
|
|
8
|
-
const renderText = (t, o) => _renderText(t, { extraCodeBlock: 1, ...o || {} });
|
|
9
|
-
const log = content => _log(content, import.meta.url);
|
|
10
|
-
const iCmp = (strA, strB) => insensitiveCompare(strA, strB, { w: true });
|
|
11
|
-
const link = (text, url) => `[${text}](${url})`;
|
|
12
|
-
const li = (id, text, url) => `\n${id}. ` + (url ? link(text, url) : text);
|
|
13
|
-
const cardReg = /^\[\d*\]:\ ([^\ ]*)\ "(.*)"$/ig;
|
|
14
|
-
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
|
|
15
|
-
// Adding 10% to the token count to account for the model's error margin.
|
|
16
|
-
const countTokens = t => Math.ceil(t.split(/[^a-z0-9]/i).length * 100 / 75 * 1.1);
|
|
17
|
-
// Keep this for GPT4 {
|
|
18
|
-
// const MAX_CONTEXT_TOKENS = 8192;
|
|
19
|
-
// }
|
|
20
|
-
const MAX_CONTEXT_TOKENS = 4096;
|
|
21
|
-
const MAX_PROMPT_TOKENS = Math.floor(MAX_CONTEXT_TOKENS * 0.6);
|
|
22
|
-
const MAX_RESPONSE_TOKENS = MAX_CONTEXT_TOKENS - MAX_PROMPT_TOKENS;
|
|
23
|
-
|
|
24
|
-
const init = async options => {
|
|
25
|
-
const clear = key => key ? (delete sessions[key]) : (sessions = {});
|
|
26
|
-
const get = (k, s) => k ? (s ? sessions[k]?.[s] : sessions[k]) : sessions;
|
|
27
|
-
const set = (k, s, v) => sessions[k][s] = v;
|
|
28
|
-
let [sessions, provider, engine, client] = [{}];
|
|
29
|
-
switch ((provider = ensureString(options?.provider, { case: 'UP' }))) {
|
|
30
|
-
case BING:
|
|
31
|
-
// https://github.com/waylaidwanderer/node-chatgpt-api/blob/main/demos/use-bing-client.js
|
|
32
|
-
engine = (await need('@waylaidwanderer/chatgpt-api')).BingAIClient;
|
|
33
|
-
client = new engine(options?.clientOptions);
|
|
34
|
-
break;
|
|
35
|
-
case CHATGPT:
|
|
36
|
-
// https://github.com/waylaidwanderer/node-chatgpt-api/blob/main/demos/use-client.js
|
|
37
|
-
// Throttled: Request is throttled.
|
|
38
|
-
// https://github.com/waylaidwanderer/node-chatgpt-api/issues/96
|
|
39
|
-
engine = (await need('@waylaidwanderer/chatgpt-api')).ChatGPTClient;
|
|
40
|
-
client = new engine(options?.clientOptions?.apiKey, {
|
|
41
|
-
keepNecessaryMessagesOnly: true,
|
|
42
|
-
// Keep this for GPT4 {
|
|
43
|
-
// maxContextTokens: MAX_CONTEXT_TOKENS,
|
|
44
|
-
// }
|
|
45
|
-
modelOptions: {
|
|
46
|
-
model: options?.model || 'gpt-3.5-turbo',
|
|
47
|
-
// Keep this for GPT4 {
|
|
48
|
-
// model: options?.model || 'gpt-4',
|
|
49
|
-
// max_tokens: MAX_RESPONSE_TOKENS,
|
|
50
|
-
// }
|
|
51
|
-
...options?.clientOptions?.modelOptions || {}
|
|
52
|
-
}, ...options?.clientOptions || {},
|
|
53
|
-
}, options?.cacheOptions);
|
|
54
|
-
break;
|
|
55
|
-
default: throwError('Invalid AI provider.', 500);
|
|
56
|
-
}
|
|
57
|
-
client.provider = provider;
|
|
58
|
-
const send = async (message, options, onProgress) => {
|
|
59
|
-
const [sessionId, cur, upd, apd] = [
|
|
60
|
-
options?.sessionId || '_',
|
|
61
|
-
key => get(sessionId, key),
|
|
62
|
-
(key, val) => set(sessionId, key, val),
|
|
63
|
-
(key, val) => set(sessionId, key, cur(key) + val),
|
|
64
|
-
];
|
|
65
|
-
options?.session && (sessions[sessionId] = options.session);
|
|
66
|
-
const objSession = { parentMessageId: cur('messageId') };
|
|
67
|
-
switch (client.provider) {
|
|
68
|
-
case BING:
|
|
69
|
-
Object.assign(objSession, {
|
|
70
|
-
toneStyle: options?.toneStyle || 'balanced', // or creative, precise
|
|
71
|
-
jailbreakConversationId: cur('jailbreakConversationId') || true,
|
|
72
|
-
});
|
|
73
|
-
break;
|
|
74
|
-
case CHATGPT:
|
|
75
|
-
Object.assign(objSession, { conversationId: cur('conversationId') });
|
|
76
|
-
break;
|
|
77
|
-
}
|
|
78
|
-
log(`Prompt: ${message}`);
|
|
79
|
-
try {
|
|
80
|
-
sessions[sessionId] = await client.sendMessage(
|
|
81
|
-
message, { ...objSession, ...options || {}, onProgress }
|
|
82
|
-
);
|
|
83
|
-
} catch (err) {
|
|
84
|
-
// @todo: when this happens, just reset hal? not all the project?
|
|
85
|
-
log(err);
|
|
86
|
-
clear(sessionId);
|
|
87
|
-
throwError(err?.message || err, 500);
|
|
88
|
-
}
|
|
89
|
-
upd('responseRendered', renderText(cur('response')));
|
|
90
|
-
// We should use `cur('details')?.spokenText` but it's too short and infoless for now.
|
|
91
|
-
upd('spokenText', renderText(cur('response'), { noCode: true }).replace(/\[\^\d\^\]/ig, ''));
|
|
92
|
-
const sources = cur('details')?.sourceAttributions || [];
|
|
93
|
-
if (sources.length) {
|
|
94
|
-
apd('responseRendered', '\n\nSource:');
|
|
95
|
-
for (let i in sources) {
|
|
96
|
-
const idx = ~~i + 1;
|
|
97
|
-
upd('responseRendered', cur('responseRendered').replaceAll(
|
|
98
|
-
`[^${idx}^]`, link(`(${idx})`, sources[i].seeMoreUrl)
|
|
99
|
-
) + li(idx, sources[i].providerDisplayName, sources[i].seeMoreUrl));
|
|
100
|
-
}
|
|
101
|
-
}
|
|
102
|
-
const cards = (
|
|
103
|
-
cur('details')?.adaptiveCards || []
|
|
104
|
-
)[0]?.body[0].text.split('\n\n')[0].split('\n').map(line => ({
|
|
105
|
-
providerDisplayName: line.replace(cardReg, '$2'),
|
|
106
|
-
seeMoreUrl: line.replace(cardReg, '$1'),
|
|
107
|
-
})).filter(card => {
|
|
108
|
-
for (let src of sources) {
|
|
109
|
-
if (iCmp(src.seeMoreUrl, card.seeMoreUrl)) { return false; }
|
|
110
|
-
}
|
|
111
|
-
return card.providerDisplayName && verifyUrl(card.seeMoreUrl);
|
|
112
|
-
});
|
|
113
|
-
if (cards?.length) {
|
|
114
|
-
apd('responseRendered', '\n\nLearn more:');
|
|
115
|
-
for (let i in cards) {
|
|
116
|
-
apd('responseRendered', li(
|
|
117
|
-
~~i + 1, cards[i].providerDisplayName, cards[i].seeMoreUrl
|
|
118
|
-
));
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
upd(
|
|
122
|
-
'suggestedResponses',
|
|
123
|
-
cur('details')?.suggestedResponses?.map?.(s => s.text) || []
|
|
124
|
-
);
|
|
125
|
-
// console.log(JSON.stringify(cur()));
|
|
126
|
-
return cur();
|
|
127
|
-
};
|
|
128
|
-
return { clear, client, engine, get, send };
|
|
129
|
-
};
|
|
130
|
-
|
|
131
|
-
export default init;
|
|
132
|
-
export {
|
|
133
|
-
_NEED,
|
|
134
|
-
MAX_CONTEXT_TOKENS,
|
|
135
|
-
MAX_PROMPT_TOKENS,
|
|
136
|
-
MAX_RESPONSE_TOKENS,
|
|
137
|
-
countTokens,
|
|
138
|
-
init,
|
|
139
|
-
};
|