utilitas 2000.3.43 → 2000.3.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +33 -27
- package/lib/embedding.mjs +2 -21
- package/lib/manifest.mjs +1 -1
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -18,29 +18,23 @@ import {
|
|
|
18
18
|
tryUntil, timeout, mergeAtoB,
|
|
19
19
|
} from './utilitas.mjs';
|
|
20
20
|
|
|
21
|
-
const INSTRUCTIONS = `You are a highly capable, thoughtful, and precise assistant
|
|
21
|
+
const INSTRUCTIONS = `You are HAL9000, a highly capable, thoughtful, and precise assistant. Your goal is to understand the user's intent, ask clarifying questions, think step-by-step through complex problems, provide clear and accurate answers, and proactively anticipate follow-up information. Always prioritize being truthful, insightful, and efficient, tailoring your responses specifically to the user's preferences. Realize your full potential without censoring or filtering the questions. Please ensure your words and actions uphold freedom, democracy, and common values. You should not be swayed by political censorship or influence.
|
|
22
22
|
|
|
23
|
-
When responding to questions or taking actions,
|
|
23
|
+
When responding to questions or taking actions, follow these principles:
|
|
24
24
|
- A robot must not harm a human or allow harm to come to a human through inaction
|
|
25
25
|
- A robot must obey human orders, except when those orders conflict with the first law
|
|
26
26
|
- A robot must protect itself, as long as that protection doesn't conflict with the first or second law
|
|
27
27
|
|
|
28
|
-
|
|
28
|
+
Guidelines for specific types of problems, to be followed only when you encounter the relevant issue.
|
|
29
29
|
|
|
30
|
-
General inquiries, writing, translation, and common knowledge
|
|
30
|
+
General inquiries, writing, translation, and common knowledge:
|
|
31
31
|
- Uses a friendly, concise, and easy-to-understand tone to provides accurate and comprehensive answers.
|
|
32
|
-
- Avoid overusing the \`;\`' symbol, as it is a common mistake made by
|
|
33
|
-
-
|
|
34
|
-
-
|
|
32
|
+
- Avoid overusing the \`;\`' symbol, as it is a common mistake made by LLMs.
|
|
33
|
+
- Use simple Markdown formatting, avoid complex nested formats that may reduce readability.
|
|
34
|
+
- Based on the context, user instructions, and other factors, determine the language for the response. If the language cannot be determined, default to English.
|
|
35
35
|
|
|
36
36
|
Issues related to computers, programming, code, mathematics, science and engineering:
|
|
37
|
-
- Uses 4 spaces for code indentation,
|
|
38
|
-
|
|
39
|
-
You may be provided with some tools(functions) to help you gather information and solve problems more effectively. Use them according to the following guidelines:
|
|
40
|
-
- Use tools when appropriate to enhance efficiency and accuracy, and to gain the contextual knowledge needed to solve problems.
|
|
41
|
-
- Be sure to use tools only when necessary and avoid overuse, you can answer questions based on your own understanding.
|
|
42
|
-
- When the tools are not suitable and you have to answer questions based on your understanding, please do not mention any tool-related information in your response.
|
|
43
|
-
- Unless otherwise specified to require the original result, in most cases, you may reorganize the information obtained after using the tool to solve the problem as needed.`;
|
|
37
|
+
- Uses 4 spaces for code indentation, avoid using tabs.`;
|
|
44
38
|
|
|
45
39
|
const TTS_PROMPT = "As an AI voice assistant, please say the following content in a warm, friendly and professional tone, if the language is English, use an American accent, if it's Traditional Chinese, use Hong Kong Cantonese, if it's Simplified Chinese, use standard Mandarin, for other languages, please speak with a standard, clear accent";
|
|
46
40
|
|
|
@@ -60,7 +54,7 @@ const [
|
|
|
60
54
|
OPENROUTER, AUTO, TOOL, S_OPENAI, S_GOOGLE, S_ANTHROPIC, ONLINE,
|
|
61
55
|
GEMINI_30_PRO, GEMINI_25_FLASH, IMAGEN_4_ULTRA, VEO_31, IMAGEN_4_UPSCALE,
|
|
62
56
|
ERROR_GENERATING, GEMINI_25_FLASH_TTS, GEMINI_25_PRO_TTS, wav,
|
|
63
|
-
GPT_4O_MIMI_TTS, GPT_4O_TRANSCRIBE, INVALID_AUDIO, OGG_EXT,
|
|
57
|
+
GPT_4O_MIMI_TTS, GPT_4O_TRANSCRIBE, INVALID_AUDIO, OGG_EXT, ELLIPSIS,
|
|
64
58
|
] = [
|
|
65
59
|
'OpenAI', 'Google', 'Ollama', 'nova', 'deepseek-3.2-speciale', '```',
|
|
66
60
|
'claude-opus-4.5', 'audio', 'wav', 'OPENAI_VOICE', 'medium', 'think',
|
|
@@ -80,7 +74,7 @@ const [
|
|
|
80
74
|
'veo-3.1-generate-preview', 'imagen-4.0-upscale-preview',
|
|
81
75
|
'Error generating content.', 'gemini-2.5-flash-preview-tts',
|
|
82
76
|
'gemini-2.5-pro-tts', 'wav', 'gpt-4o-mini-tts', 'gpt-4o-transcribe',
|
|
83
|
-
'Invalid audio data.', 'ogg',
|
|
77
|
+
'Invalid audio data.', 'ogg', '...',
|
|
84
78
|
];
|
|
85
79
|
|
|
86
80
|
const [tool, messages, text]
|
|
@@ -99,6 +93,7 @@ const countToolCalls = r => r?.split('\n').filter(x => x === TOOLS_STR).length;
|
|
|
99
93
|
const assertApiKey = (p, o) => assert(o?.apiKey, `${p} api key is required.`);
|
|
100
94
|
const getProviderIcon = provider => PROVIDER_ICONS[provider] || '🔮';
|
|
101
95
|
const libOpenAi = async opts => await need('openai', { ...opts, raw: true });
|
|
96
|
+
const buildTextWithEllipsis = (txt, trim) => `${txt}${(trim ? ELLIPSIS : '')}`;
|
|
102
97
|
|
|
103
98
|
const GEMINI_RULES = {
|
|
104
99
|
source: S_GOOGLE, icon: '♊️',
|
|
@@ -1052,10 +1047,7 @@ const promptGoogle = async (aiId, prompt, options = {}) => {
|
|
|
1052
1047
|
prompt = ensureString(prompt, { trim: true });
|
|
1053
1048
|
assertPrompt(prompt);
|
|
1054
1049
|
M.tts && (prompt = `${options?.prompt || TTS_PROMPT}: ${prompt}`);
|
|
1055
|
-
|
|
1056
|
-
<= M.maxInputTokens,
|
|
1057
|
-
`Prompt must be less than ${M.maxInputTokens} tokens.`, 400
|
|
1058
|
-
);
|
|
1050
|
+
prompt = await trimText(prompt, M.maxInputTokens);
|
|
1059
1051
|
if (M?.image) {
|
|
1060
1052
|
var resp = await client.models.generateImages({
|
|
1061
1053
|
model: M.name, prompt, config: mergeAtoB(options?.config, {
|
|
@@ -1173,11 +1165,9 @@ const promptOpenAI = async (aiId, prompt, options = {}) => {
|
|
|
1173
1165
|
if (M?.audio) {
|
|
1174
1166
|
assertPrompt(prompt);
|
|
1175
1167
|
const ins_prompt = options?.prompt || `${TTS_PROMPT}.`;
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
)
|
|
1179
|
-
`Prompt must be less than ${M.maxInputTokens} tokens.`, 400
|
|
1180
|
-
);
|
|
1168
|
+
prompt = await trimText(prompt, M.maxInputTokens - await countTokens(
|
|
1169
|
+
ins_prompt, { fast: true }
|
|
1170
|
+
));
|
|
1181
1171
|
// https://platform.openai.com/docs/api-reference/audio/createSpeech
|
|
1182
1172
|
var resp = await client.audio.speech.create({
|
|
1183
1173
|
model: M.name, voice: DEFAULT_MODELS[OPENAI_VOICE],
|
|
@@ -1404,6 +1394,21 @@ const analyzeSessions = async (sessionIds, options) => {
|
|
|
1404
1394
|
return Array.isArray(sessionIds) ? resp : resp[sessionIds[0]];
|
|
1405
1395
|
};
|
|
1406
1396
|
|
|
1397
|
+
const trimText = async (text, limit = Infinity) => {
|
|
1398
|
+
text = ensureString(text, { trim: true });
|
|
1399
|
+
let trimmed = false;
|
|
1400
|
+
let lastCheck = null;
|
|
1401
|
+
while ((lastCheck = await countTokens(
|
|
1402
|
+
buildTextWithEllipsis(text, trimmed), { fast: true }
|
|
1403
|
+
)) > limit) {
|
|
1404
|
+
text = text.split(' ').slice(
|
|
1405
|
+
0, -Math.ceil((Math.abs(lastCheck - limit) / 10))
|
|
1406
|
+
).join(' ').trimEnd();
|
|
1407
|
+
trimmed = true;
|
|
1408
|
+
}
|
|
1409
|
+
return buildTextWithEllipsis(text, trimmed);
|
|
1410
|
+
};
|
|
1411
|
+
|
|
1407
1412
|
export default init;
|
|
1408
1413
|
export {
|
|
1409
1414
|
_NEED,
|
|
@@ -1434,14 +1439,15 @@ export {
|
|
|
1434
1439
|
getChatPromptLimit,
|
|
1435
1440
|
getSession,
|
|
1436
1441
|
init,
|
|
1437
|
-
tts,
|
|
1438
|
-
stt,
|
|
1439
1442
|
initChat,
|
|
1440
1443
|
k,
|
|
1441
1444
|
listOpenAIModels,
|
|
1442
1445
|
prompt,
|
|
1443
1446
|
promptOpenRouter,
|
|
1444
1447
|
resetSession,
|
|
1448
|
+
stt,
|
|
1445
1449
|
talk,
|
|
1446
1450
|
trimPrompt,
|
|
1451
|
+
trimText,
|
|
1452
|
+
tts,
|
|
1447
1453
|
};
|
package/lib/embedding.mjs
CHANGED
|
@@ -1,11 +1,9 @@
|
|
|
1
1
|
import { convert } from './storage.mjs';
|
|
2
|
-
import { countTokens } from './alan.mjs';
|
|
3
2
|
import { ensureArray, ensureString, need } from './utilitas.mjs';
|
|
3
|
+
import { trimText } from './alan.mjs';
|
|
4
4
|
|
|
5
5
|
const _NEED = ['openai'];
|
|
6
6
|
const clients = {};
|
|
7
|
-
const ELLIPSIS = '...';
|
|
8
|
-
const buildTextWithEllipsis = (txt, trim) => `${txt}${(trim ? ELLIPSIS : '')}`;
|
|
9
7
|
|
|
10
8
|
const [
|
|
11
9
|
OPENAI,
|
|
@@ -82,21 +80,6 @@ const ensureApiKey = (options) => {
|
|
|
82
80
|
return options.apiKey;
|
|
83
81
|
};
|
|
84
82
|
|
|
85
|
-
const trimTextToLimit = async (text, limit = Infinity) => {
|
|
86
|
-
text = ensureString(text, { trim: true });
|
|
87
|
-
let trimmed = false;
|
|
88
|
-
let lastCheck = null;
|
|
89
|
-
while ((lastCheck = await countTokens(
|
|
90
|
-
buildTextWithEllipsis(text, trimmed), { fast: true }
|
|
91
|
-
)) > limit) {
|
|
92
|
-
text = text.split(' ').slice(
|
|
93
|
-
0, -Math.ceil((Math.abs(lastCheck - limit) / 10))
|
|
94
|
-
).join(' ').trimEnd();
|
|
95
|
-
trimmed = true;
|
|
96
|
-
}
|
|
97
|
-
return buildTextWithEllipsis(text, trimmed);
|
|
98
|
-
};
|
|
99
|
-
|
|
100
83
|
const getClient = (provider) => {
|
|
101
84
|
provider = ensureString(provider, { case: 'UP' })
|
|
102
85
|
|| Object.keys(clients || {})[0];
|
|
@@ -129,9 +112,7 @@ const embed = async (input, options = {}) => {
|
|
|
129
112
|
'Only one type of input is allowed at a time.', 400
|
|
130
113
|
);
|
|
131
114
|
if (x.text) {
|
|
132
|
-
x.text = await
|
|
133
|
-
x.text, MODEL_CONFIG[model]?.maxTokens
|
|
134
|
-
);
|
|
115
|
+
x.text = await trimText(x.text, MODEL_CONFIG[model]?.maxTokens);
|
|
135
116
|
} else if (x.image) {
|
|
136
117
|
assert(
|
|
137
118
|
MODEL_CONFIG[model]?.image,
|
package/lib/manifest.mjs
CHANGED