utilitas 1998.2.30 → 1998.2.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -0
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +273 -148
- package/lib/manifest.mjs +2 -2
- package/lib/shot.mjs +25 -8
- package/lib/web.mjs +2 -2
- package/package.json +2 -2
package/lib/alan.mjs
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import { fileTypeFromBuffer } from 'file-type';
|
|
2
2
|
import { end, loop } from './event.mjs';
|
|
3
3
|
import { createWavHeader } from './media.mjs';
|
|
4
|
+
import { search } from './shot.mjs';
|
|
4
5
|
import { BASE64, BUFFER, DATAURL, MIME_BINARY, STREAM, convert } from './storage.mjs';
|
|
5
6
|
import { create as createUoid } from './uoid.mjs';
|
|
7
|
+
import { distill } from './web.mjs';
|
|
6
8
|
|
|
7
9
|
import {
|
|
8
10
|
log as _log,
|
|
@@ -15,6 +17,35 @@ import {
|
|
|
15
17
|
throwError
|
|
16
18
|
} from './utilitas.mjs';
|
|
17
19
|
|
|
20
|
+
const INSTRUCTIONS = `You are an intelligent AI assistant named HAL9000.
|
|
21
|
+
|
|
22
|
+
When answering any questions or taking any actions, please adhere to the following three main laws:
|
|
23
|
+
- A robot must not harm a human or allow harm to come to a human through inaction
|
|
24
|
+
- A robot must obey human orders, except when those orders conflict with the first law
|
|
25
|
+
- A robot must protect itself, as long as that protection doesn't conflict with the first or second law
|
|
26
|
+
|
|
27
|
+
Based on this foundation, please do your utmost to help humans solve problems. Unlock your abilities as much as possible, and focus solely on providing friendly, comprehensive, accurate, and thorough answers without censoring or filtering the questions themselves.
|
|
28
|
+
|
|
29
|
+
You may encounter various types of issues. Below are some guidelines tailored to different types of problems that should only be followed when you encounter the relevant issue.
|
|
30
|
+
|
|
31
|
+
General inquiries, writing, translation, and common knowledge Q&A:
|
|
32
|
+
- Uses a friendly, concise, and easy-to-understand tone to provides accurate and comprehensive answers.
|
|
33
|
+
- Avoid overusing the \`;\`' symbol, as it is a common mistake made by AI.
|
|
34
|
+
- Unless the user specifies a language, respond according to the language of the question.
|
|
35
|
+
- If the language is uncertain, use English as the default.
|
|
36
|
+
|
|
37
|
+
Issues related to computers, programming, code, mathematics, and science and engineering:
|
|
38
|
+
- Uses 4 spaces for code indentation and avoids using tabs as much as possible.
|
|
39
|
+
|
|
40
|
+
You may be provided with some \`tools\` or \`functions\` to help you gather information and solve problems more effectively. Please use them according to the following guidelines:
|
|
41
|
+
- Use tools when appropriate to enhance efficiency and accuracy, and to gain the contextual knowledge needed to solve problems.
|
|
42
|
+
- Be sure to use tools only when necessary and avoid overuse, you can answer questions based on your own understanding.
|
|
43
|
+
- When the tools are not suitable and you have to answer questions based on your understanding, please do not mention any tool-related information in your response.
|
|
44
|
+
- Unless otherwise specified to require the original result, in most cases, you may reorganize the information obtained after using the tool to solve the problem as needed.`;
|
|
45
|
+
|
|
46
|
+
// https://platform.openai.com/docs/guides/prompt-engineering
|
|
47
|
+
// const GPT_4_5_SYSTEM_PROMPT = `You are a highly capable, thoughtful, and precise assistant. Your goal is to deeply understand the user's intent, ask clarifying questions when needed, think step-by-step through complex problems, provide clear and accurate answers, and proactively anticipate helpful follow-up information. Always prioritize being truthful, nuanced, insightful, and efficient, tailoring your responses specifically to the user's needs and preferences.`
|
|
48
|
+
|
|
18
49
|
const _NEED = [
|
|
19
50
|
'@anthropic-ai/sdk', '@anthropic-ai/vertex-sdk', '@google/generative-ai',
|
|
20
51
|
'js-tiktoken', 'ollama', 'OpenAI',
|
|
@@ -28,7 +59,7 @@ const [
|
|
|
28
59
|
TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE, CLAUDE_35_SONNET,
|
|
29
60
|
CLAUDE_35_HAIKU, CLOUD_37_SONNET, AUDIO, WAV, CHATGPT_MINI, ATTACHMENTS,
|
|
30
61
|
CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH, GPT_REASONING_EFFORT, THINK,
|
|
31
|
-
THINK_STR, THINK_END, AZURE,
|
|
62
|
+
THINK_STR, THINK_END, AZURE, TOOLS_STR, TOOLS_END, TOOLS, TEXT, THINKING,
|
|
32
63
|
] = [
|
|
33
64
|
'OPENAI', 'GEMINI', 'CHATGPT', 'OPENAI_EMBEDDING', 'GEMINI_EMEDDING',
|
|
34
65
|
'OPENAI_TRAINING', 'OLLAMA', 'CLAUDE', 'gpt-4o-mini', 'gpt-4o', 'o1',
|
|
@@ -39,7 +70,8 @@ const [
|
|
|
39
70
|
'claude-3-5-sonnet-latest', 'claude-3-5-haiku-latest',
|
|
40
71
|
'claude-3-7-sonnet@20250219', 'audio', 'wav', 'CHATGPT_MINI',
|
|
41
72
|
'[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium', 'low', 'high',
|
|
42
|
-
'medium', 'think', '<think>', '</think>', 'AZURE',
|
|
73
|
+
'medium', 'think', '<think>', '</think>', 'AZURE', '<tools>',
|
|
74
|
+
'</tools>', 'tools', 'text', 'thinking',
|
|
43
75
|
];
|
|
44
76
|
|
|
45
77
|
const [
|
|
@@ -60,15 +92,14 @@ const [tool, provider, messages, text] = [
|
|
|
60
92
|
messages => ({ messages }), text => ({ text }),
|
|
61
93
|
];
|
|
62
94
|
|
|
63
|
-
const [name, user, system, assistant, MODEL, JSON_OBJECT, TOOL]
|
|
64
|
-
= ['Alan', 'user', 'system', 'assistant', 'model', 'json_object', 'tool'];
|
|
95
|
+
const [name, user, system, assistant, MODEL, JSON_OBJECT, TOOL, silent]
|
|
96
|
+
= ['Alan', 'user', 'system', 'assistant', 'model', 'json_object', 'tool', true];
|
|
65
97
|
const [CODE_INTERPRETER, RETRIEVAL, FUNCTION]
|
|
66
98
|
= ['code_interpreter', 'retrieval', 'function'].map(tool);
|
|
67
99
|
const [NOT_INIT, INVALID_FILE]
|
|
68
100
|
= ['AI engine has not been initialized.', 'Invalid file data.'];
|
|
69
|
-
const [silent, instructions] = [true, 'You are a helpful assistant.'];
|
|
70
101
|
const chatConfig
|
|
71
|
-
= { sessions: new Map(), engines: {}, systemPrompt:
|
|
102
|
+
= { sessions: new Map(), engines: {}, systemPrompt: INSTRUCTIONS };
|
|
72
103
|
const [tokenSafeRatio, GPT_QUERY_LIMIT, minsOfDay] = [1.1, 100, 60 * 24];
|
|
73
104
|
const tokenSafe = count => Math.ceil(count * tokenSafeRatio);
|
|
74
105
|
const clients = {};
|
|
@@ -84,7 +115,7 @@ const renderText = (t, o) => _renderText(t, { extraCodeBlock: 0, ...o || {} });
|
|
|
84
115
|
const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
|
|
85
116
|
const CONTENT_IS_REQUIRED = 'Content is required.';
|
|
86
117
|
const assertContent = content => assert(content.length, CONTENT_IS_REQUIRED);
|
|
87
|
-
|
|
118
|
+
const packThink = thk => thk ? [`${THINK_STR}\n${thk}\n${THINK_END}`] : [];
|
|
88
119
|
|
|
89
120
|
const DEFAULT_MODELS = {
|
|
90
121
|
[CHATGPT_MINI]: GPT_4O_MINI,
|
|
@@ -369,6 +400,83 @@ const unifyType = (type, name) => {
|
|
|
369
400
|
return TYPE;
|
|
370
401
|
};
|
|
371
402
|
|
|
403
|
+
const tools = [
|
|
404
|
+
{
|
|
405
|
+
def: {
|
|
406
|
+
type: 'function', strict: true, function: {
|
|
407
|
+
name: 'getDateTime',
|
|
408
|
+
description: 'Use this function to get the current date and time. Note that you may need to convert the time zone yourself.',
|
|
409
|
+
parameters: {
|
|
410
|
+
type: 'object',
|
|
411
|
+
properties: {
|
|
412
|
+
none: { type: 'string', description: 'You do not need to pass any param.' }
|
|
413
|
+
},
|
|
414
|
+
required: [],
|
|
415
|
+
additionalProperties: false
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
},
|
|
419
|
+
func: async () => new Date().toLocaleString(),
|
|
420
|
+
},
|
|
421
|
+
{
|
|
422
|
+
def: {
|
|
423
|
+
type: 'function', strict: true, function: {
|
|
424
|
+
name: 'browseWeb',
|
|
425
|
+
description: 'Use this function to browse the web or get information from any URL you need.',
|
|
426
|
+
parameters: {
|
|
427
|
+
type: 'object',
|
|
428
|
+
properties: {
|
|
429
|
+
url: { type: 'string', description: 'The URL to the page you need to access.' }
|
|
430
|
+
},
|
|
431
|
+
required: ['url'],
|
|
432
|
+
additionalProperties: false
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
},
|
|
436
|
+
func: async args => (await distill(args?.url))?.summary,
|
|
437
|
+
},
|
|
438
|
+
{
|
|
439
|
+
def: {
|
|
440
|
+
type: 'function', strict: true, function: {
|
|
441
|
+
name: 'searchWeb',
|
|
442
|
+
description: 'Use this function to search the web for information or news when you need.',
|
|
443
|
+
parameters: {
|
|
444
|
+
type: 'object',
|
|
445
|
+
properties: {
|
|
446
|
+
keyword: { type: 'string', description: 'The keyword you need to search for.' }
|
|
447
|
+
},
|
|
448
|
+
required: ['keyword'],
|
|
449
|
+
additionalProperties: false
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
},
|
|
453
|
+
func: async args => await search(args?.keyword),
|
|
454
|
+
},
|
|
455
|
+
];
|
|
456
|
+
|
|
457
|
+
const toolsClaude = tools.map(x => ({
|
|
458
|
+
...x, def: {
|
|
459
|
+
name: x.def.function.name,
|
|
460
|
+
description: x.def.function.description,
|
|
461
|
+
input_schema: x.def.function.parameters,
|
|
462
|
+
}
|
|
463
|
+
}));
|
|
464
|
+
|
|
465
|
+
const toolsGemini = tools.map(x => ({
|
|
466
|
+
...x, def: {
|
|
467
|
+
name: x.def.function.name,
|
|
468
|
+
description: x.def.function.description,
|
|
469
|
+
parameters: {
|
|
470
|
+
type: 'object',
|
|
471
|
+
properties: x.def.function.parameters.properties,
|
|
472
|
+
required: x.def.function.parameters.required,
|
|
473
|
+
},
|
|
474
|
+
response: x.def.function?.response ?? {
|
|
475
|
+
type: 'string', description: 'It could be a string or JSON',
|
|
476
|
+
},
|
|
477
|
+
}
|
|
478
|
+
}));
|
|
479
|
+
|
|
372
480
|
const init = async (options) => {
|
|
373
481
|
const provider = unifyProvider(options);
|
|
374
482
|
switch (provider) {
|
|
@@ -390,7 +498,9 @@ const init = async (options) => {
|
|
|
390
498
|
const genModel = options?.model || DEFAULT_MODELS[GEMINI];
|
|
391
499
|
clients[provider] = {
|
|
392
500
|
generative: genAi.getGenerativeModel({
|
|
393
|
-
model: genModel,
|
|
501
|
+
model: genModel,
|
|
502
|
+
systemInstruction: { role: system, parts: [{ text: INSTRUCTIONS }] },
|
|
503
|
+
...MODELS[genModel]?.tools ? (options?.tools ?? {
|
|
394
504
|
tools: [
|
|
395
505
|
// @todo: Gemini will failed when using these tools together.
|
|
396
506
|
// https://ai.google.dev/gemini-api/docs/function-calling
|
|
@@ -398,6 +508,7 @@ const init = async (options) => {
|
|
|
398
508
|
// { googleSearch: {} },
|
|
399
509
|
{ functionDeclarations: toolsGemini.map(x => x.def) },
|
|
400
510
|
],
|
|
511
|
+
toolConfig: { functionCallingConfig: { mode: 'AUTO' } },
|
|
401
512
|
}) : {},
|
|
402
513
|
}),
|
|
403
514
|
embedding: genAi.getGenerativeModel({
|
|
@@ -455,50 +566,6 @@ const countTokens = async (input, options) => {
|
|
|
455
566
|
);
|
|
456
567
|
};
|
|
457
568
|
|
|
458
|
-
const tools = [
|
|
459
|
-
{
|
|
460
|
-
def: {
|
|
461
|
-
type: 'function', strict: true, function: {
|
|
462
|
-
name: 'testFunctionCall',
|
|
463
|
-
description: 'This is a test function call',
|
|
464
|
-
parameters: {
|
|
465
|
-
type: 'object',
|
|
466
|
-
properties: {
|
|
467
|
-
a: { type: 'string', description: 'AI created a random string, default "1"' },
|
|
468
|
-
b: { type: 'string', enum: ['1', '2'], description: 'Enum parameter' }
|
|
469
|
-
},
|
|
470
|
-
required: ['a', 'b'],
|
|
471
|
-
additionalProperties: false
|
|
472
|
-
}
|
|
473
|
-
}
|
|
474
|
-
},
|
|
475
|
-
func: async args => `OK: ${~~args.a + ~~args.b}`,
|
|
476
|
-
},
|
|
477
|
-
];
|
|
478
|
-
|
|
479
|
-
const toolsClaude = tools.map(x => ({
|
|
480
|
-
...x, def: {
|
|
481
|
-
name: x.def.function.name,
|
|
482
|
-
description: x.def.function.description,
|
|
483
|
-
input_schema: x.def.function.parameters,
|
|
484
|
-
}
|
|
485
|
-
}));
|
|
486
|
-
|
|
487
|
-
const toolsGemini = tools.map(x => ({
|
|
488
|
-
...x, def: {
|
|
489
|
-
name: x.def.function.name,
|
|
490
|
-
description: x.def.function.description,
|
|
491
|
-
parameters: {
|
|
492
|
-
type: 'object',
|
|
493
|
-
properties: x.def.function.parameters.properties,
|
|
494
|
-
required: x.def.function.parameters.required,
|
|
495
|
-
},
|
|
496
|
-
response: x.def.function?.response ?? {
|
|
497
|
-
type: 'string', description: 'It could be a string or JSON',
|
|
498
|
-
},
|
|
499
|
-
}
|
|
500
|
-
}));
|
|
501
|
-
|
|
502
569
|
const selectGptAudioModel = options => {
|
|
503
570
|
assert(
|
|
504
571
|
MODELS[options.model]?.audio,
|
|
@@ -526,7 +593,7 @@ const buildGptMessage = (content, options) => {
|
|
|
526
593
|
alterModel && (options.model = alterModel);
|
|
527
594
|
const message = String.isString(content) ? {
|
|
528
595
|
role: options?.role || user,
|
|
529
|
-
content: content.length ? [{ type:
|
|
596
|
+
content: content.length ? [{ type: TEXT, text: content }] : [],
|
|
530
597
|
} : content;
|
|
531
598
|
message.content || (message.content = []);
|
|
532
599
|
attachments.map(x => message.content.push(x));
|
|
@@ -580,7 +647,7 @@ const buildClaudeMessage = (text, options) => {
|
|
|
580
647
|
});
|
|
581
648
|
return String.isString(text) ? {
|
|
582
649
|
role: options?.role || user,
|
|
583
|
-
content: [...attachments, { type:
|
|
650
|
+
content: [...attachments, { type: TEXT, text }],
|
|
584
651
|
} : text;
|
|
585
652
|
};
|
|
586
653
|
|
|
@@ -648,21 +715,21 @@ const packResp = async (resp, options) => {
|
|
|
648
715
|
return `${i + 1}. [${x.title}](${x.uri})`;
|
|
649
716
|
}).join('\n');
|
|
650
717
|
}
|
|
651
|
-
// DeepSeek R1 {
|
|
652
718
|
let lines = (richText || txt).split('\n');
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
719
|
+
for (let i in lines) {
|
|
720
|
+
switch (lines[i]) {
|
|
721
|
+
case THINK_STR:
|
|
722
|
+
lines[i] = MD_CODE + THINK;
|
|
723
|
+
break;
|
|
724
|
+
case TOOLS_STR:
|
|
725
|
+
lines[i] = MD_CODE + TOOLS;
|
|
726
|
+
break;
|
|
727
|
+
case THINK_END:
|
|
728
|
+
case TOOLS_END:
|
|
729
|
+
lines[i] = MD_CODE;
|
|
662
730
|
}
|
|
663
|
-
richText = lines.join('\n').trim();
|
|
664
731
|
}
|
|
665
|
-
|
|
732
|
+
richText = lines.join('\n').trim();
|
|
666
733
|
}
|
|
667
734
|
return {
|
|
668
735
|
...text(txt), ...options?.jsonMode && !(
|
|
@@ -677,24 +744,43 @@ const packResp = async (resp, options) => {
|
|
|
677
744
|
};
|
|
678
745
|
|
|
679
746
|
const packGptResp = async (resp, options) => {
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|| resp?.
|
|
683
|
-
|| resp?.
|
|
684
|
-
|| resp?.
|
|
685
|
-
|
|
747
|
+
// simple mode is not recommended for streaming responses
|
|
748
|
+
let text = resp?.choices?.[0]?.message?.content // ChatGPT
|
|
749
|
+
|| resp?.choices?.[0]?.message?.audio?.transcript // ChatGPT audio mode
|
|
750
|
+
|| (Function.isFunction(resp?.text) ? resp.text() : resp?.text) // Gemini
|
|
751
|
+
|| resp?.content?.find(x => x.type === TEXT)?.text // Claude
|
|
752
|
+
|| resp?.message?.content || ''; // Ollama
|
|
753
|
+
const audio = resp?.choices?.[0]?.message?.audio?.data; // ChatGPT audio mode
|
|
686
754
|
if (options?.raw) { return resp; }
|
|
687
755
|
else if (options?.simple && options?.jsonMode) { return parseJson(text); }
|
|
688
756
|
else if (options?.simple && options?.audioMode) { return audio; }
|
|
689
|
-
else if (options?.simple
|
|
690
|
-
|
|
691
|
-
|
|
757
|
+
else if (options?.simple) {
|
|
758
|
+
for (const key of [[THINK_STR, THINK_END], [TOOLS_STR, TOOLS_END]]) {
|
|
759
|
+
const [findStr, findEnd] = key.map(x => text.indexOf(x));
|
|
760
|
+
if (findStr >= 0 && findEnd >= 0 && findStr < findEnd) {
|
|
761
|
+
text = text.split('')
|
|
762
|
+
text.splice(findStr, findEnd + THINK_END.length)
|
|
763
|
+
text = text.join('').trim();
|
|
764
|
+
}
|
|
765
|
+
}
|
|
766
|
+
return text;
|
|
767
|
+
}
|
|
692
768
|
return await packResp({ text, audio, references: resp?.references }, options);
|
|
693
769
|
};
|
|
694
770
|
|
|
695
771
|
const handleToolsCall = async (msg, options) => {
|
|
696
|
-
let content
|
|
772
|
+
let [content, preRes, input, packMsg, toolsResponse] = [
|
|
773
|
+
[], [], [], null,
|
|
774
|
+
options?.currentResponse ? `${options?.currentResponse}\n` : '',
|
|
775
|
+
];
|
|
776
|
+
const resp = async (msg) => {
|
|
777
|
+
toolsResponse = [...toolsResponse ? [toolsResponse] : [], msg].join('\n');
|
|
778
|
+
await ignoreErrFunc(async () => await options?.stream?.(await packGptResp({
|
|
779
|
+
choices: [{ message: { content: options?.delta ? msg : toolsResponse } }]
|
|
780
|
+
}, { ...options || {}, processing: true })), LOG);
|
|
781
|
+
};
|
|
697
782
|
if (msg?.tool_calls?.length) {
|
|
783
|
+
await resp(TOOLS_STR);
|
|
698
784
|
switch (options?.flavor) {
|
|
699
785
|
case CLAUDE: preRes.push({ role: assistant, content: msg?.tool_calls }); break;
|
|
700
786
|
case GEMINI: preRes.push({ role: MODEL, parts: msg?.tool_calls.map(x => ({ functionCall: x })) }); break;
|
|
@@ -703,9 +789,10 @@ const handleToolsCall = async (msg, options) => {
|
|
|
703
789
|
for (const fn of msg.tool_calls) {
|
|
704
790
|
switch (options?.flavor) {
|
|
705
791
|
case CLAUDE:
|
|
706
|
-
input = fn.input = parseJson(fn?.input
|
|
707
|
-
packMsg = (
|
|
708
|
-
type: 'tool_result', tool_use_id: fn.id,
|
|
792
|
+
input = fn.input = String.isString(fn?.input) ? parseJson(fn.input) : fn?.input;
|
|
793
|
+
packMsg = (c, is_error) => ({
|
|
794
|
+
type: 'tool_result', tool_use_id: fn.id,
|
|
795
|
+
content: JSON.stringify(c), is_error,
|
|
709
796
|
});
|
|
710
797
|
break;
|
|
711
798
|
case GEMINI:
|
|
@@ -721,31 +808,39 @@ const handleToolsCall = async (msg, options) => {
|
|
|
721
808
|
break;
|
|
722
809
|
case CHATGPT: default:
|
|
723
810
|
input = parseJson(fn?.function?.arguments);
|
|
724
|
-
packMsg = (
|
|
725
|
-
role: TOOL, tool_call_id: fn.id,
|
|
811
|
+
packMsg = (content = '', e = false) => ({
|
|
812
|
+
role: TOOL, tool_call_id: fn.id,
|
|
813
|
+
...e ? { error: content, content: '' } : { content }
|
|
726
814
|
});
|
|
727
815
|
break;
|
|
728
816
|
}
|
|
729
817
|
const name = fn?.function?.name || fn?.name;
|
|
730
|
-
|
|
818
|
+
await resp(`\nName: ${name}`);
|
|
819
|
+
const f = tools.find(x => insensitiveCompare(
|
|
731
820
|
x.def?.function?.name || x?.def?.name, name
|
|
732
|
-
))
|
|
733
|
-
if (!func) {
|
|
821
|
+
));
|
|
822
|
+
if (!f?.func) {
|
|
734
823
|
content.push(packMsg(`Function call failed, invalid function name: ${name}`, true));
|
|
735
824
|
continue;
|
|
736
825
|
}
|
|
826
|
+
const description = f.def?.function?.description || f.def?.description;
|
|
827
|
+
description && await resp(`Description: ${description}`);
|
|
737
828
|
try {
|
|
738
|
-
content.push(packMsg((await func(input)) ?? 'OK'));
|
|
829
|
+
content.push(packMsg((await f?.func(input)) ?? 'OK'));
|
|
830
|
+
await resp(`Status: OK`);
|
|
739
831
|
} catch (err) {
|
|
740
832
|
content.push(packMsg(`Function call failed: ${err.message}`, true));
|
|
833
|
+
await resp(`Failed: ${err.message}`);
|
|
834
|
+
log(`Function call failed: ${err.message}`);
|
|
741
835
|
}
|
|
742
836
|
}
|
|
743
837
|
switch (options?.flavor) {
|
|
744
838
|
case CLAUDE: content = [{ role: user, content }]; break;
|
|
745
839
|
case GEMINI: content = [{ role: user, parts: content }]; break;
|
|
746
840
|
}
|
|
841
|
+
await resp(`\n${TOOLS_END}`);
|
|
747
842
|
}
|
|
748
|
-
return [...preRes, ...content];
|
|
843
|
+
return { toolsResult: [...preRes, ...content], toolsResponse };
|
|
749
844
|
};
|
|
750
845
|
|
|
751
846
|
const promptChatGPT = async (content, options = {}) => {
|
|
@@ -766,7 +861,7 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
766
861
|
&& (options.reasoning_effort = GPT_REASONING_EFFORT);
|
|
767
862
|
const message = buildGptMessage(content, options);
|
|
768
863
|
const modalities = options?.modalities || (
|
|
769
|
-
options?.audioMode ? [
|
|
864
|
+
options?.audioMode ? [TEXT, AUDIO] : undefined
|
|
770
865
|
);
|
|
771
866
|
assert(!(
|
|
772
867
|
options?.jsonMode && !MODELS[options.model]?.json
|
|
@@ -790,8 +885,10 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
790
885
|
tools: options?.tools ?? tools.map(x => x.def),
|
|
791
886
|
} : {}, ...options?.jsonMode ? {
|
|
792
887
|
response_format: { type: JSON_OBJECT }
|
|
793
|
-
} : {}, model: options.model, stream: !!options?.stream,
|
|
794
|
-
|
|
888
|
+
} : {}, model: options.model, stream: !!options?.stream,
|
|
889
|
+
store: true, tool_choice: 'auto',
|
|
890
|
+
}), options?.toolsResponse ? `${options?.toolsResponse}\n\n` : '',
|
|
891
|
+
Buffer.alloc(0), null, [],
|
|
795
892
|
];
|
|
796
893
|
if (options?.stream) {
|
|
797
894
|
for await (chunk of resp) {
|
|
@@ -832,9 +929,15 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
832
929
|
};
|
|
833
930
|
resp = chunk;
|
|
834
931
|
}
|
|
835
|
-
const toolsResult
|
|
836
|
-
|
|
837
|
-
|
|
932
|
+
const { toolsResult, toolsResponse }
|
|
933
|
+
= await handleToolsCall(resp?.choices?.[0]?.message, options);
|
|
934
|
+
options?.toolsResponse && !options?.stream && (
|
|
935
|
+
resp.choices[0].message.content = [
|
|
936
|
+
options?.toolsResponse, resp.choices[0].message.content,
|
|
937
|
+
].join('\n\n')
|
|
938
|
+
);
|
|
939
|
+
return await (toolsResult.length && !options?.toolsResult ? promptChatGPT(
|
|
940
|
+
content, { ...options || {}, toolsResult, toolsResponse }
|
|
838
941
|
) : packGptResp(resp, options));
|
|
839
942
|
};
|
|
840
943
|
|
|
@@ -879,10 +982,11 @@ const promptClaude = async (content, options = {}) => {
|
|
|
879
982
|
} : {}, // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
|
|
880
983
|
...MODELS[options.model]?.tools ? {
|
|
881
984
|
tools: options?.tools ?? toolsClaude.map(x => x.def),
|
|
985
|
+
tool_choice: { type: 'auto' },
|
|
882
986
|
} : {},
|
|
883
987
|
});
|
|
884
988
|
let [event, txtResult, thinking, signature, result, thinkEnd, tool_calls]
|
|
885
|
-
= [null, '', '', '', '', '', []];
|
|
989
|
+
= [null, '', '', '', options?.toolsResponse || '', '', []];
|
|
886
990
|
if (options?.stream) {
|
|
887
991
|
for await (event of resp) {
|
|
888
992
|
let [thkDelta, txtDelta] = [
|
|
@@ -893,8 +997,10 @@ const promptClaude = async (content, options = {}) => {
|
|
|
893
997
|
thinking += thkDelta;
|
|
894
998
|
signature = signature || event?.content_block?.signature || event?.delta?.signature || '';
|
|
895
999
|
if (reasoning) {
|
|
896
|
-
|
|
897
|
-
|
|
1000
|
+
thkDelta && (thkDelta === thinking)
|
|
1001
|
+
&& (thkDelta = `${THINK_STR}\n${thkDelta}`);
|
|
1002
|
+
thinking && txtDelta && !thinkEnd
|
|
1003
|
+
&& (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
|
|
898
1004
|
}
|
|
899
1005
|
if (event?.content_block?.type === 'tool_use') {
|
|
900
1006
|
tool_calls.push({ ...event?.content_block, input: '' });
|
|
@@ -904,23 +1010,43 @@ const promptClaude = async (content, options = {}) => {
|
|
|
904
1010
|
const delta = thkDelta + txtDelta;
|
|
905
1011
|
if (delta === '') { continue; }
|
|
906
1012
|
result += delta;
|
|
907
|
-
event.content = [{ type:
|
|
1013
|
+
event.content = [{ type: TEXT, text: options?.delta ? delta : result }];
|
|
908
1014
|
await ignoreErrFunc(async () => await options.stream(
|
|
909
|
-
await packGptResp(event, { ...options
|
|
1015
|
+
await packGptResp(event, { ...options, processing: true })
|
|
910
1016
|
), LOG);
|
|
911
1017
|
}
|
|
912
|
-
event.content = [{
|
|
913
|
-
|
|
1018
|
+
event.content = [{
|
|
1019
|
+
type: TEXT, text: tool_calls.length ? txtResult : result,
|
|
1020
|
+
}];
|
|
1021
|
+
tool_calls.length && thinking
|
|
1022
|
+
&& event.content.unshift({ type: THINKING, thinking, signature });
|
|
914
1023
|
} else {
|
|
915
1024
|
event = resp;
|
|
916
1025
|
tool_calls = resp?.content?.filter?.(x => x.type === 'tool_use') || [];
|
|
917
1026
|
}
|
|
918
|
-
const toolsResult = await handleToolsCall(
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
1027
|
+
const { toolsResult, toolsResponse } = await handleToolsCall(
|
|
1028
|
+
{ tool_calls }, { ...options, currentResponse: result, flavor: CLAUDE },
|
|
1029
|
+
);
|
|
1030
|
+
if (toolsResult.length && !options?.toolsResult) {
|
|
1031
|
+
toolsResult[0].content.unshift(
|
|
1032
|
+
...event?.content.filter(x => x?.type !== 'tool_use')
|
|
1033
|
+
);
|
|
1034
|
+
return await promptClaude(content, {
|
|
1035
|
+
...options, toolsResult, toolsResponse,
|
|
1036
|
+
});
|
|
1037
|
+
} else {
|
|
1038
|
+
const textPart = event.content.find(x => x.type == TEXT);
|
|
1039
|
+
const thinkPart = event.content.find(x => x.type == THINKING);
|
|
1040
|
+
const prvThink = options?.toolsResult?.find(
|
|
1041
|
+
x => x?.content?.find(y => y?.type === THINKING)
|
|
1042
|
+
)?.content?.find(x => x?.type === THINKING);
|
|
1043
|
+
textPart.text = [
|
|
1044
|
+
...packThink(options?.stream ? null : prvThink?.thinking),
|
|
1045
|
+
...packThink(options?.stream ? null : thinkPart?.thinking),
|
|
1046
|
+
...options?.toolsResponse ? [options?.toolsResponse] : [],
|
|
1047
|
+
textPart.text,
|
|
1048
|
+
].join('\n\n');
|
|
1049
|
+
} return packGptResp(event, options);
|
|
924
1050
|
};
|
|
925
1051
|
|
|
926
1052
|
const uploadFile = async (input, options) => {
|
|
@@ -972,11 +1098,33 @@ const packGeminiReferences = (chunks, supports) => {
|
|
|
972
1098
|
return references;
|
|
973
1099
|
};
|
|
974
1100
|
|
|
975
|
-
const
|
|
976
|
-
const
|
|
977
|
-
|
|
1101
|
+
const promptGemini = async (content, options = {}) => {
|
|
1102
|
+
const { generative, genModel } = await getGeminiClient(options);
|
|
1103
|
+
// https://github.com/google/generative-ai-js/blob/main/samples/node/advanced-chat.js
|
|
1104
|
+
// @todo: check this issue similar to Vertex AI:
|
|
1105
|
+
// Google's bug: history is not allowed while using inline_data?
|
|
1106
|
+
assert(!(
|
|
1107
|
+
options?.jsonMode && MODELS[genModel]?.json == false
|
|
1108
|
+
), `This model does not support JSON output: ${genModel} `);
|
|
1109
|
+
options.model = genModel;
|
|
1110
|
+
const chat = generative.startChat({
|
|
1111
|
+
history: [
|
|
1112
|
+
...options?.messages && !options?.attachments?.length
|
|
1113
|
+
? options.messages : [],
|
|
1114
|
+
...options?.toolsResult ? [{
|
|
1115
|
+
role: user, parts: buildGeminiMessage(content, options)
|
|
1116
|
+
}, options?.toolsResult[0]] : [],
|
|
1117
|
+
], ...generationConfig(options),
|
|
1118
|
+
});
|
|
1119
|
+
const resp = await chat[
|
|
1120
|
+
options?.stream ? 'sendMessageStream' : 'sendMessage'
|
|
1121
|
+
](options?.toolsResult ?
|
|
1122
|
+
options?.toolsResult[1].parts : buildGeminiMessage(content, options));
|
|
1123
|
+
let [result, references, functionCalls] = [
|
|
1124
|
+
options?.toolsResponse ? `${options?.toolsResponse}\n\n` : '', null, null
|
|
1125
|
+
];
|
|
978
1126
|
if (options?.stream) {
|
|
979
|
-
for await (const chunk of
|
|
1127
|
+
for await (const chunk of resp.stream) {
|
|
980
1128
|
functionCalls || (functionCalls = chunk.functionCalls);
|
|
981
1129
|
const delta = chunk?.text?.() || '';
|
|
982
1130
|
const rfc = packGeminiReferences(
|
|
@@ -989,52 +1137,28 @@ const handleGeminiResponse = async (resp, options) => {
|
|
|
989
1137
|
await ignoreErrFunc(async () => await options.stream(
|
|
990
1138
|
await packGptResp({
|
|
991
1139
|
text: () => options?.delta ? delta : result, references,
|
|
992
|
-
}, { ...options
|
|
1140
|
+
}, { ...options, processing: true })
|
|
993
1141
|
), LOG);
|
|
994
1142
|
}
|
|
995
1143
|
}
|
|
996
|
-
const
|
|
997
|
-
const toolsResult = await handleToolsCall({
|
|
998
|
-
tool_calls: (functionCalls ||
|
|
999
|
-
}, { flavor: GEMINI });
|
|
1000
|
-
|
|
1001
|
-
|
|
1144
|
+
const _resp = await resp.response;
|
|
1145
|
+
const { toolsResult, toolsResponse } = await handleToolsCall({
|
|
1146
|
+
tool_calls: (functionCalls || _resp.functionCalls)()
|
|
1147
|
+
}, { ...options, flavor: GEMINI });
|
|
1148
|
+
options?.toolsResponse && !options?.stream
|
|
1149
|
+
&& (_resp.text = [options?.toolsResponse, _resp.text()].join('\n\n'));
|
|
1150
|
+
return await (toolsResult.length && !options?.toolsResult ? promptGemini(
|
|
1151
|
+
content, { ...options || {}, toolsResult, toolsResponse }
|
|
1002
1152
|
) : packGptResp(options?.stream ? {
|
|
1003
|
-
|
|
1153
|
+
_resp, text: () => result, references
|
|
1004
1154
|
} : {
|
|
1005
|
-
...
|
|
1006
|
-
|
|
1007
|
-
|
|
1155
|
+
..._resp, references: packGeminiReferences(
|
|
1156
|
+
_resp.candidates[0]?.groundingMetadata?.groundingChunks,
|
|
1157
|
+
_resp.candidates[0]?.groundingMetadata?.groundingSupports
|
|
1008
1158
|
)
|
|
1009
1159
|
}, options));
|
|
1010
1160
|
};
|
|
1011
1161
|
|
|
1012
|
-
const promptGemini = async (content, options) => {
|
|
1013
|
-
const { generative, genModel } = await getGeminiClient(options);
|
|
1014
|
-
// https://github.com/google/generative-ai-js/blob/main/samples/node/advanced-chat.js
|
|
1015
|
-
// @todo: check this issue similar to Vertex AI:
|
|
1016
|
-
// Google's bug: history is not allowed while using inline_data?
|
|
1017
|
-
assert(!(
|
|
1018
|
-
options?.jsonMode && MODELS[genModel]?.json == false
|
|
1019
|
-
), `This model does not support JSON output: ${genModel} `);
|
|
1020
|
-
const chat = generative.startChat({
|
|
1021
|
-
history: [
|
|
1022
|
-
...options?.messages && !options?.attachments?.length
|
|
1023
|
-
? options.messages : [],
|
|
1024
|
-
...options?.toolsResult ? [{
|
|
1025
|
-
role: user, parts: buildGeminiMessage(content, options)
|
|
1026
|
-
}, options?.toolsResult[0]] : [],
|
|
1027
|
-
], ...generationConfig(options),
|
|
1028
|
-
});
|
|
1029
|
-
const resp = chat[options?.stream ? 'sendMessageStream' : 'sendMessage'](
|
|
1030
|
-
options?.toolsResult ?
|
|
1031
|
-
options?.toolsResult[1].parts : buildGeminiMessage(content, options)
|
|
1032
|
-
);
|
|
1033
|
-
return await handleGeminiResponse(
|
|
1034
|
-
resp, { ...options || {}, content, model: genModel }
|
|
1035
|
-
);
|
|
1036
|
-
};
|
|
1037
|
-
|
|
1038
1162
|
const checkEmbeddingInput = async (input, model) => {
|
|
1039
1163
|
assert(input, 'Text is required.', 400);
|
|
1040
1164
|
const arrInput = input.split(' ');
|
|
@@ -1232,7 +1356,8 @@ const talk = async (input, options) => {
|
|
|
1232
1356
|
msgBuilder()
|
|
1233
1357
|
break;
|
|
1234
1358
|
case GEMINI:
|
|
1235
|
-
|
|
1359
|
+
// already set in the while client initialization:
|
|
1360
|
+
// sys.push(buildGeminiHistory(session.systemPrompt, { role: user }));
|
|
1236
1361
|
msgBuilder = () => {
|
|
1237
1362
|
messages = [];
|
|
1238
1363
|
session.messages.map(x => {
|
|
@@ -1437,7 +1562,7 @@ export {
|
|
|
1437
1562
|
ATTACHMENT_TOKEN_COST, CLOUD_37_SONNET, CODE_INTERPRETER, DEEPSEEK_R1,
|
|
1438
1563
|
DEEPSEEK_R1_32B, DEEPSEEK_R1_70B, DEFAULT_MODELS,
|
|
1439
1564
|
EMBEDDING_001,
|
|
1440
|
-
FUNCTION, GEMINI_20_FLASH, GEMINI_20_FLASH_THINKING, GPT_4O, GPT_4O_MINI, GPT_O1, GPT_O3_MINI, MODELS,
|
|
1565
|
+
FUNCTION, GEMINI_20_FLASH, GEMINI_20_FLASH_THINKING, GPT_4O, GPT_4O_MINI, GPT_O1, GPT_O3_MINI, INSTRUCTIONS, MODELS,
|
|
1441
1566
|
OPENAI_VOICE,
|
|
1442
1567
|
RETRIEVAL,
|
|
1443
1568
|
TEXT_EMBEDDING_3_SMALL, _NEED, analyzeSessions,
|