utilitas 1998.2.30 → 1998.2.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +196 -137
- package/lib/manifest.mjs +1 -1
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -28,7 +28,7 @@ const [
|
|
|
28
28
|
TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE, CLAUDE_35_SONNET,
|
|
29
29
|
CLAUDE_35_HAIKU, CLOUD_37_SONNET, AUDIO, WAV, CHATGPT_MINI, ATTACHMENTS,
|
|
30
30
|
CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH, GPT_REASONING_EFFORT, THINK,
|
|
31
|
-
THINK_STR, THINK_END, AZURE,
|
|
31
|
+
THINK_STR, THINK_END, AZURE, TOOLS_STR, TOOLS_END, TOOLS, TEXT, THINKING,
|
|
32
32
|
] = [
|
|
33
33
|
'OPENAI', 'GEMINI', 'CHATGPT', 'OPENAI_EMBEDDING', 'GEMINI_EMEDDING',
|
|
34
34
|
'OPENAI_TRAINING', 'OLLAMA', 'CLAUDE', 'gpt-4o-mini', 'gpt-4o', 'o1',
|
|
@@ -39,7 +39,8 @@ const [
|
|
|
39
39
|
'claude-3-5-sonnet-latest', 'claude-3-5-haiku-latest',
|
|
40
40
|
'claude-3-7-sonnet@20250219', 'audio', 'wav', 'CHATGPT_MINI',
|
|
41
41
|
'[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium', 'low', 'high',
|
|
42
|
-
'medium', 'think', '<think>', '</think>', 'AZURE',
|
|
42
|
+
'medium', 'think', '<think>', '</think>', 'AZURE', '<tools>',
|
|
43
|
+
'</tools>', 'tools', 'text', 'thinking',
|
|
43
44
|
];
|
|
44
45
|
|
|
45
46
|
const [
|
|
@@ -84,6 +85,7 @@ const renderText = (t, o) => _renderText(t, { extraCodeBlock: 0, ...o || {} });
|
|
|
84
85
|
const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
|
|
85
86
|
const CONTENT_IS_REQUIRED = 'Content is required.';
|
|
86
87
|
const assertContent = content => assert(content.length, CONTENT_IS_REQUIRED);
|
|
88
|
+
const packThink = thk => thk ? [`${THINK_STR}\n${thk}\n${THINK_END}`] : [];
|
|
87
89
|
|
|
88
90
|
|
|
89
91
|
const DEFAULT_MODELS = {
|
|
@@ -369,6 +371,50 @@ const unifyType = (type, name) => {
|
|
|
369
371
|
return TYPE;
|
|
370
372
|
};
|
|
371
373
|
|
|
374
|
+
const tools = [
|
|
375
|
+
{
|
|
376
|
+
def: {
|
|
377
|
+
type: 'function', strict: true, function: {
|
|
378
|
+
name: 'testFunctionCall',
|
|
379
|
+
description: 'This is a test function call',
|
|
380
|
+
parameters: {
|
|
381
|
+
type: 'object',
|
|
382
|
+
properties: {
|
|
383
|
+
a: { type: 'string', description: 'AI created a random string, default "1"' },
|
|
384
|
+
b: { type: 'string', enum: ['1', '2'], description: 'Enum parameter' }
|
|
385
|
+
},
|
|
386
|
+
required: ['a', 'b'],
|
|
387
|
+
additionalProperties: false
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
},
|
|
391
|
+
func: async args => `OK: ${~~args.a + ~~args.b}`,
|
|
392
|
+
},
|
|
393
|
+
];
|
|
394
|
+
|
|
395
|
+
const toolsClaude = tools.map(x => ({
|
|
396
|
+
...x, def: {
|
|
397
|
+
name: x.def.function.name,
|
|
398
|
+
description: x.def.function.description,
|
|
399
|
+
input_schema: x.def.function.parameters,
|
|
400
|
+
}
|
|
401
|
+
}));
|
|
402
|
+
|
|
403
|
+
const toolsGemini = tools.map(x => ({
|
|
404
|
+
...x, def: {
|
|
405
|
+
name: x.def.function.name,
|
|
406
|
+
description: x.def.function.description,
|
|
407
|
+
parameters: {
|
|
408
|
+
type: 'object',
|
|
409
|
+
properties: x.def.function.parameters.properties,
|
|
410
|
+
required: x.def.function.parameters.required,
|
|
411
|
+
},
|
|
412
|
+
response: x.def.function?.response ?? {
|
|
413
|
+
type: 'string', description: 'It could be a string or JSON',
|
|
414
|
+
},
|
|
415
|
+
}
|
|
416
|
+
}));
|
|
417
|
+
|
|
372
418
|
const init = async (options) => {
|
|
373
419
|
const provider = unifyProvider(options);
|
|
374
420
|
switch (provider) {
|
|
@@ -390,7 +436,8 @@ const init = async (options) => {
|
|
|
390
436
|
const genModel = options?.model || DEFAULT_MODELS[GEMINI];
|
|
391
437
|
clients[provider] = {
|
|
392
438
|
generative: genAi.getGenerativeModel({
|
|
393
|
-
model: genModel,
|
|
439
|
+
model: genModel,
|
|
440
|
+
...MODELS[genModel]?.tools ? (options?.tools ?? {
|
|
394
441
|
tools: [
|
|
395
442
|
// @todo: Gemini will failed when using these tools together.
|
|
396
443
|
// https://ai.google.dev/gemini-api/docs/function-calling
|
|
@@ -398,6 +445,9 @@ const init = async (options) => {
|
|
|
398
445
|
// { googleSearch: {} },
|
|
399
446
|
{ functionDeclarations: toolsGemini.map(x => x.def) },
|
|
400
447
|
],
|
|
448
|
+
toolConfig: { functionCallingConfig: { mode: 'AUTO' } },
|
|
449
|
+
// @todo
|
|
450
|
+
// systemInstruction: { role: "system", parts: [{ text: 'only use function when needed' }] },
|
|
401
451
|
}) : {},
|
|
402
452
|
}),
|
|
403
453
|
embedding: genAi.getGenerativeModel({
|
|
@@ -455,50 +505,6 @@ const countTokens = async (input, options) => {
|
|
|
455
505
|
);
|
|
456
506
|
};
|
|
457
507
|
|
|
458
|
-
const tools = [
|
|
459
|
-
{
|
|
460
|
-
def: {
|
|
461
|
-
type: 'function', strict: true, function: {
|
|
462
|
-
name: 'testFunctionCall',
|
|
463
|
-
description: 'This is a test function call',
|
|
464
|
-
parameters: {
|
|
465
|
-
type: 'object',
|
|
466
|
-
properties: {
|
|
467
|
-
a: { type: 'string', description: 'AI created a random string, default "1"' },
|
|
468
|
-
b: { type: 'string', enum: ['1', '2'], description: 'Enum parameter' }
|
|
469
|
-
},
|
|
470
|
-
required: ['a', 'b'],
|
|
471
|
-
additionalProperties: false
|
|
472
|
-
}
|
|
473
|
-
}
|
|
474
|
-
},
|
|
475
|
-
func: async args => `OK: ${~~args.a + ~~args.b}`,
|
|
476
|
-
},
|
|
477
|
-
];
|
|
478
|
-
|
|
479
|
-
const toolsClaude = tools.map(x => ({
|
|
480
|
-
...x, def: {
|
|
481
|
-
name: x.def.function.name,
|
|
482
|
-
description: x.def.function.description,
|
|
483
|
-
input_schema: x.def.function.parameters,
|
|
484
|
-
}
|
|
485
|
-
}));
|
|
486
|
-
|
|
487
|
-
const toolsGemini = tools.map(x => ({
|
|
488
|
-
...x, def: {
|
|
489
|
-
name: x.def.function.name,
|
|
490
|
-
description: x.def.function.description,
|
|
491
|
-
parameters: {
|
|
492
|
-
type: 'object',
|
|
493
|
-
properties: x.def.function.parameters.properties,
|
|
494
|
-
required: x.def.function.parameters.required,
|
|
495
|
-
},
|
|
496
|
-
response: x.def.function?.response ?? {
|
|
497
|
-
type: 'string', description: 'It could be a string or JSON',
|
|
498
|
-
},
|
|
499
|
-
}
|
|
500
|
-
}));
|
|
501
|
-
|
|
502
508
|
const selectGptAudioModel = options => {
|
|
503
509
|
assert(
|
|
504
510
|
MODELS[options.model]?.audio,
|
|
@@ -526,7 +532,7 @@ const buildGptMessage = (content, options) => {
|
|
|
526
532
|
alterModel && (options.model = alterModel);
|
|
527
533
|
const message = String.isString(content) ? {
|
|
528
534
|
role: options?.role || user,
|
|
529
|
-
content: content.length ? [{ type:
|
|
535
|
+
content: content.length ? [{ type: TEXT, text: content }] : [],
|
|
530
536
|
} : content;
|
|
531
537
|
message.content || (message.content = []);
|
|
532
538
|
attachments.map(x => message.content.push(x));
|
|
@@ -580,7 +586,7 @@ const buildClaudeMessage = (text, options) => {
|
|
|
580
586
|
});
|
|
581
587
|
return String.isString(text) ? {
|
|
582
588
|
role: options?.role || user,
|
|
583
|
-
content: [...attachments, { type:
|
|
589
|
+
content: [...attachments, { type: TEXT, text }],
|
|
584
590
|
} : text;
|
|
585
591
|
};
|
|
586
592
|
|
|
@@ -648,21 +654,21 @@ const packResp = async (resp, options) => {
|
|
|
648
654
|
return `${i + 1}. [${x.title}](${x.uri})`;
|
|
649
655
|
}).join('\n');
|
|
650
656
|
}
|
|
651
|
-
// DeepSeek R1 {
|
|
652
657
|
let lines = (richText || txt).split('\n');
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
658
|
+
for (let i in lines) {
|
|
659
|
+
switch (lines[i]) {
|
|
660
|
+
case THINK_STR:
|
|
661
|
+
lines[i] = MD_CODE + THINK;
|
|
662
|
+
break;
|
|
663
|
+
case TOOLS_STR:
|
|
664
|
+
lines[i] = MD_CODE + TOOLS;
|
|
665
|
+
break;
|
|
666
|
+
case THINK_END:
|
|
667
|
+
case TOOLS_END:
|
|
668
|
+
lines[i] = MD_CODE;
|
|
662
669
|
}
|
|
663
|
-
richText = lines.join('\n').trim();
|
|
664
670
|
}
|
|
665
|
-
|
|
671
|
+
richText = lines.join('\n').trim();
|
|
666
672
|
}
|
|
667
673
|
return {
|
|
668
674
|
...text(txt), ...options?.jsonMode && !(
|
|
@@ -677,24 +683,43 @@ const packResp = async (resp, options) => {
|
|
|
677
683
|
};
|
|
678
684
|
|
|
679
685
|
const packGptResp = async (resp, options) => {
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|| resp?.
|
|
683
|
-
|| resp?.
|
|
684
|
-
|| resp?.
|
|
685
|
-
|
|
686
|
+
// simple mode is not recommended for streaming responses
|
|
687
|
+
let text = resp?.choices?.[0]?.message?.content // ChatGPT
|
|
688
|
+
|| resp?.choices?.[0]?.message?.audio?.transcript // ChatGPT audio mode
|
|
689
|
+
|| (Function.isFunction(resp?.text) ? resp.text() : resp?.text) // Gemini
|
|
690
|
+
|| resp?.content?.find(x => x.type === TEXT)?.text // Claude
|
|
691
|
+
|| resp?.message?.content || ''; // Ollama
|
|
692
|
+
const audio = resp?.choices?.[0]?.message?.audio?.data; // ChatGPT audio mode
|
|
686
693
|
if (options?.raw) { return resp; }
|
|
687
694
|
else if (options?.simple && options?.jsonMode) { return parseJson(text); }
|
|
688
695
|
else if (options?.simple && options?.audioMode) { return audio; }
|
|
689
|
-
else if (options?.simple
|
|
690
|
-
|
|
691
|
-
|
|
696
|
+
else if (options?.simple) {
|
|
697
|
+
for (const key of [[THINK_STR, THINK_END], [TOOLS_STR, TOOLS_END]]) {
|
|
698
|
+
const [findStr, findEnd] = key.map(x => text.indexOf(x));
|
|
699
|
+
if (findStr >= 0 && findEnd >= 0 && findStr < findEnd) {
|
|
700
|
+
text = text.split('')
|
|
701
|
+
text.splice(findStr, findEnd + THINK_END.length)
|
|
702
|
+
text = text.join('').trim();
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
return text;
|
|
706
|
+
}
|
|
692
707
|
return await packResp({ text, audio, references: resp?.references }, options);
|
|
693
708
|
};
|
|
694
709
|
|
|
695
710
|
const handleToolsCall = async (msg, options) => {
|
|
696
|
-
let content
|
|
711
|
+
let [content, preRes, input, packMsg, toolsResponse] = [
|
|
712
|
+
[], [], [], null,
|
|
713
|
+
options?.currentResponse ? `${options?.currentResponse}\n` : '',
|
|
714
|
+
];
|
|
715
|
+
const resp = async (msg) => {
|
|
716
|
+
toolsResponse = [...toolsResponse ? [toolsResponse] : [], msg].join('\n');
|
|
717
|
+
await ignoreErrFunc(async () => await options?.stream?.(await packGptResp({
|
|
718
|
+
choices: [{ message: { content: options?.delta ? msg : toolsResponse } }]
|
|
719
|
+
}, { ...options || {}, processing: true })), LOG);
|
|
720
|
+
};
|
|
697
721
|
if (msg?.tool_calls?.length) {
|
|
722
|
+
await resp(TOOLS_STR);
|
|
698
723
|
switch (options?.flavor) {
|
|
699
724
|
case CLAUDE: preRes.push({ role: assistant, content: msg?.tool_calls }); break;
|
|
700
725
|
case GEMINI: preRes.push({ role: MODEL, parts: msg?.tool_calls.map(x => ({ functionCall: x })) }); break;
|
|
@@ -703,7 +728,7 @@ const handleToolsCall = async (msg, options) => {
|
|
|
703
728
|
for (const fn of msg.tool_calls) {
|
|
704
729
|
switch (options?.flavor) {
|
|
705
730
|
case CLAUDE:
|
|
706
|
-
input = fn.input = parseJson(fn?.input
|
|
731
|
+
input = fn.input = String.isString(fn?.input) ? parseJson(fn.input) : fn?.input;
|
|
707
732
|
packMsg = (content, is_error) => ({
|
|
708
733
|
type: 'tool_result', tool_use_id: fn.id, content, is_error,
|
|
709
734
|
});
|
|
@@ -727,25 +752,31 @@ const handleToolsCall = async (msg, options) => {
|
|
|
727
752
|
break;
|
|
728
753
|
}
|
|
729
754
|
const name = fn?.function?.name || fn?.name;
|
|
730
|
-
|
|
755
|
+
await resp(`\nName: ${name}`);
|
|
756
|
+
const f = tools.find(x => insensitiveCompare(
|
|
731
757
|
x.def?.function?.name || x?.def?.name, name
|
|
732
|
-
))
|
|
733
|
-
if (!func) {
|
|
758
|
+
));
|
|
759
|
+
if (!f?.func) {
|
|
734
760
|
content.push(packMsg(`Function call failed, invalid function name: ${name}`, true));
|
|
735
761
|
continue;
|
|
736
762
|
}
|
|
763
|
+
const description = f.def?.function?.description || f.def?.description;
|
|
764
|
+
description && await resp(`Description: ${description}`);
|
|
737
765
|
try {
|
|
738
|
-
content.push(packMsg((await func(input)) ?? 'OK'));
|
|
766
|
+
content.push(packMsg((await f?.func(input)) ?? 'OK'));
|
|
767
|
+
await resp(`Status: OK`);
|
|
739
768
|
} catch (err) {
|
|
740
769
|
content.push(packMsg(`Function call failed: ${err.message}`, true));
|
|
770
|
+
await resp(`Status: Failed`);
|
|
741
771
|
}
|
|
742
772
|
}
|
|
743
773
|
switch (options?.flavor) {
|
|
744
774
|
case CLAUDE: content = [{ role: user, content }]; break;
|
|
745
775
|
case GEMINI: content = [{ role: user, parts: content }]; break;
|
|
746
776
|
}
|
|
777
|
+
await resp(`\n${TOOLS_END}`);
|
|
747
778
|
}
|
|
748
|
-
return [...preRes, ...content];
|
|
779
|
+
return { toolsResult: [...preRes, ...content], toolsResponse };
|
|
749
780
|
};
|
|
750
781
|
|
|
751
782
|
const promptChatGPT = async (content, options = {}) => {
|
|
@@ -766,7 +797,7 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
766
797
|
&& (options.reasoning_effort = GPT_REASONING_EFFORT);
|
|
767
798
|
const message = buildGptMessage(content, options);
|
|
768
799
|
const modalities = options?.modalities || (
|
|
769
|
-
options?.audioMode ? [
|
|
800
|
+
options?.audioMode ? [TEXT, AUDIO] : undefined
|
|
770
801
|
);
|
|
771
802
|
assert(!(
|
|
772
803
|
options?.jsonMode && !MODELS[options.model]?.json
|
|
@@ -790,8 +821,10 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
790
821
|
tools: options?.tools ?? tools.map(x => x.def),
|
|
791
822
|
} : {}, ...options?.jsonMode ? {
|
|
792
823
|
response_format: { type: JSON_OBJECT }
|
|
793
|
-
} : {}, model: options.model, stream: !!options?.stream,
|
|
794
|
-
|
|
824
|
+
} : {}, model: options.model, stream: !!options?.stream,
|
|
825
|
+
store: true, tool_choice: 'auto',
|
|
826
|
+
}), options?.toolsResponse ? `${options?.toolsResponse}\n\n` : '',
|
|
827
|
+
Buffer.alloc(0), null, [],
|
|
795
828
|
];
|
|
796
829
|
if (options?.stream) {
|
|
797
830
|
for await (chunk of resp) {
|
|
@@ -832,9 +865,15 @@ const promptChatGPT = async (content, options = {}) => {
|
|
|
832
865
|
};
|
|
833
866
|
resp = chunk;
|
|
834
867
|
}
|
|
835
|
-
const toolsResult
|
|
836
|
-
|
|
837
|
-
|
|
868
|
+
const { toolsResult, toolsResponse }
|
|
869
|
+
= await handleToolsCall(resp?.choices?.[0]?.message, options);
|
|
870
|
+
options?.toolsResponse && !options?.stream && (
|
|
871
|
+
resp.choices[0].message.content = [
|
|
872
|
+
options?.toolsResponse, resp.choices[0].message.content,
|
|
873
|
+
].join('\n\n')
|
|
874
|
+
);
|
|
875
|
+
return await (toolsResult.length && !options?.toolsResult ? promptChatGPT(
|
|
876
|
+
content, { ...options || {}, toolsResult, toolsResponse }
|
|
838
877
|
) : packGptResp(resp, options));
|
|
839
878
|
};
|
|
840
879
|
|
|
@@ -879,10 +918,11 @@ const promptClaude = async (content, options = {}) => {
|
|
|
879
918
|
} : {}, // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
|
|
880
919
|
...MODELS[options.model]?.tools ? {
|
|
881
920
|
tools: options?.tools ?? toolsClaude.map(x => x.def),
|
|
921
|
+
tool_choice: { type: 'auto' },
|
|
882
922
|
} : {},
|
|
883
923
|
});
|
|
884
924
|
let [event, txtResult, thinking, signature, result, thinkEnd, tool_calls]
|
|
885
|
-
= [null, '', '', '', '', '', []];
|
|
925
|
+
= [null, '', '', '', options?.toolsResponse || '', '', []];
|
|
886
926
|
if (options?.stream) {
|
|
887
927
|
for await (event of resp) {
|
|
888
928
|
let [thkDelta, txtDelta] = [
|
|
@@ -893,8 +933,10 @@ const promptClaude = async (content, options = {}) => {
|
|
|
893
933
|
thinking += thkDelta;
|
|
894
934
|
signature = signature || event?.content_block?.signature || event?.delta?.signature || '';
|
|
895
935
|
if (reasoning) {
|
|
896
|
-
|
|
897
|
-
|
|
936
|
+
thkDelta && (thkDelta === thinking)
|
|
937
|
+
&& (thkDelta = `${THINK_STR}\n${thkDelta}`);
|
|
938
|
+
thinking && txtDelta && !thinkEnd
|
|
939
|
+
&& (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
|
|
898
940
|
}
|
|
899
941
|
if (event?.content_block?.type === 'tool_use') {
|
|
900
942
|
tool_calls.push({ ...event?.content_block, input: '' });
|
|
@@ -904,23 +946,42 @@ const promptClaude = async (content, options = {}) => {
|
|
|
904
946
|
const delta = thkDelta + txtDelta;
|
|
905
947
|
if (delta === '') { continue; }
|
|
906
948
|
result += delta;
|
|
907
|
-
event.content = [{ type:
|
|
949
|
+
event.content = [{ type: TEXT, text: options?.delta ? delta : result }];
|
|
908
950
|
await ignoreErrFunc(async () => await options.stream(
|
|
909
|
-
await packGptResp(event, { ...options
|
|
951
|
+
await packGptResp(event, { ...options, processing: true })
|
|
910
952
|
), LOG);
|
|
911
953
|
}
|
|
912
|
-
event.content = [{
|
|
913
|
-
|
|
954
|
+
event.content = [{
|
|
955
|
+
type: TEXT, text: tool_calls.length ? txtResult : result,
|
|
956
|
+
}];
|
|
957
|
+
tool_calls.length && thinking
|
|
958
|
+
&& event.content.unshift({ type: THINKING, thinking, signature });
|
|
914
959
|
} else {
|
|
915
960
|
event = resp;
|
|
916
961
|
tool_calls = resp?.content?.filter?.(x => x.type === 'tool_use') || [];
|
|
917
962
|
}
|
|
918
|
-
const toolsResult = await handleToolsCall(
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
963
|
+
const { toolsResult, toolsResponse } = await handleToolsCall(
|
|
964
|
+
{ tool_calls }, { ...options, currentResponse: result, flavor: CLAUDE },
|
|
965
|
+
);
|
|
966
|
+
if (toolsResult.length && !options?.toolsResult) {
|
|
967
|
+
toolsResult[0].content.unshift(
|
|
968
|
+
...event?.content.filter(x => x?.type !== 'tool_use')
|
|
969
|
+
);
|
|
970
|
+
return await promptClaude(content, {
|
|
971
|
+
...options, toolsResult, toolsResponse,
|
|
972
|
+
});
|
|
973
|
+
} else {
|
|
974
|
+
const textPart = event.content.find(x => x.type == TEXT);
|
|
975
|
+
const thinkPart = event.content.find(x => x.type == THINKING);
|
|
976
|
+
const prvThink = options?.toolsResult?.find(
|
|
977
|
+
x => x?.content?.find(y => y?.type === THINKING)
|
|
978
|
+
)?.content?.find(x => x?.type === THINKING);
|
|
979
|
+
options?.stream || (textPart.text = [
|
|
980
|
+
...packThink(prvThink?.thinking), ...packThink(thinkPart?.thinking),
|
|
981
|
+
...options?.toolsResponse ? [options?.toolsResponse] : [],
|
|
982
|
+
textPart.text,
|
|
983
|
+
].join('\n\n'));
|
|
984
|
+
} return packGptResp(event, options);
|
|
924
985
|
};
|
|
925
986
|
|
|
926
987
|
const uploadFile = async (input, options) => {
|
|
@@ -972,11 +1033,33 @@ const packGeminiReferences = (chunks, supports) => {
|
|
|
972
1033
|
return references;
|
|
973
1034
|
};
|
|
974
1035
|
|
|
975
|
-
const
|
|
976
|
-
const
|
|
977
|
-
|
|
1036
|
+
const promptGemini = async (content, options = {}) => {
|
|
1037
|
+
const { generative, genModel } = await getGeminiClient(options);
|
|
1038
|
+
// https://github.com/google/generative-ai-js/blob/main/samples/node/advanced-chat.js
|
|
1039
|
+
// @todo: check this issue similar to Vertex AI:
|
|
1040
|
+
// Google's bug: history is not allowed while using inline_data?
|
|
1041
|
+
assert(!(
|
|
1042
|
+
options?.jsonMode && MODELS[genModel]?.json == false
|
|
1043
|
+
), `This model does not support JSON output: ${genModel} `);
|
|
1044
|
+
options.model = genModel;
|
|
1045
|
+
const chat = generative.startChat({
|
|
1046
|
+
history: [
|
|
1047
|
+
...options?.messages && !options?.attachments?.length
|
|
1048
|
+
? options.messages : [],
|
|
1049
|
+
...options?.toolsResult ? [{
|
|
1050
|
+
role: user, parts: buildGeminiMessage(content, options)
|
|
1051
|
+
}, options?.toolsResult[0]] : [],
|
|
1052
|
+
], ...generationConfig(options),
|
|
1053
|
+
});
|
|
1054
|
+
const resp = await chat[
|
|
1055
|
+
options?.stream ? 'sendMessageStream' : 'sendMessage'
|
|
1056
|
+
](options?.toolsResult ?
|
|
1057
|
+
options?.toolsResult[1].parts : buildGeminiMessage(content, options));
|
|
1058
|
+
let [result, references, functionCalls] = [
|
|
1059
|
+
options?.toolsResponse ? `${options?.toolsResponse}\n\n` : '', null, null
|
|
1060
|
+
];
|
|
978
1061
|
if (options?.stream) {
|
|
979
|
-
for await (const chunk of
|
|
1062
|
+
for await (const chunk of resp.stream) {
|
|
980
1063
|
functionCalls || (functionCalls = chunk.functionCalls);
|
|
981
1064
|
const delta = chunk?.text?.() || '';
|
|
982
1065
|
const rfc = packGeminiReferences(
|
|
@@ -989,52 +1072,28 @@ const handleGeminiResponse = async (resp, options) => {
|
|
|
989
1072
|
await ignoreErrFunc(async () => await options.stream(
|
|
990
1073
|
await packGptResp({
|
|
991
1074
|
text: () => options?.delta ? delta : result, references,
|
|
992
|
-
}, { ...options
|
|
1075
|
+
}, { ...options, processing: true })
|
|
993
1076
|
), LOG);
|
|
994
1077
|
}
|
|
995
1078
|
}
|
|
996
|
-
const
|
|
997
|
-
const toolsResult = await handleToolsCall({
|
|
998
|
-
tool_calls: (functionCalls ||
|
|
999
|
-
}, { flavor: GEMINI });
|
|
1000
|
-
|
|
1001
|
-
|
|
1079
|
+
const _resp = await resp.response;
|
|
1080
|
+
const { toolsResult, toolsResponse } = await handleToolsCall({
|
|
1081
|
+
tool_calls: (functionCalls || _resp.functionCalls)()
|
|
1082
|
+
}, { ...options, flavor: GEMINI });
|
|
1083
|
+
options?.toolsResponse && !options?.stream
|
|
1084
|
+
&& (_resp.text = [options?.toolsResponse, _resp.text()].join('\n\n'));
|
|
1085
|
+
return await (toolsResult.length && !options?.toolsResult ? promptGemini(
|
|
1086
|
+
content, { ...options || {}, toolsResult, toolsResponse }
|
|
1002
1087
|
) : packGptResp(options?.stream ? {
|
|
1003
|
-
|
|
1088
|
+
_resp, text: () => result, references
|
|
1004
1089
|
} : {
|
|
1005
|
-
...
|
|
1006
|
-
|
|
1007
|
-
|
|
1090
|
+
..._resp, references: packGeminiReferences(
|
|
1091
|
+
_resp.candidates[0]?.groundingMetadata?.groundingChunks,
|
|
1092
|
+
_resp.candidates[0]?.groundingMetadata?.groundingSupports
|
|
1008
1093
|
)
|
|
1009
1094
|
}, options));
|
|
1010
1095
|
};
|
|
1011
1096
|
|
|
1012
|
-
const promptGemini = async (content, options) => {
|
|
1013
|
-
const { generative, genModel } = await getGeminiClient(options);
|
|
1014
|
-
// https://github.com/google/generative-ai-js/blob/main/samples/node/advanced-chat.js
|
|
1015
|
-
// @todo: check this issue similar to Vertex AI:
|
|
1016
|
-
// Google's bug: history is not allowed while using inline_data?
|
|
1017
|
-
assert(!(
|
|
1018
|
-
options?.jsonMode && MODELS[genModel]?.json == false
|
|
1019
|
-
), `This model does not support JSON output: ${genModel} `);
|
|
1020
|
-
const chat = generative.startChat({
|
|
1021
|
-
history: [
|
|
1022
|
-
...options?.messages && !options?.attachments?.length
|
|
1023
|
-
? options.messages : [],
|
|
1024
|
-
...options?.toolsResult ? [{
|
|
1025
|
-
role: user, parts: buildGeminiMessage(content, options)
|
|
1026
|
-
}, options?.toolsResult[0]] : [],
|
|
1027
|
-
], ...generationConfig(options),
|
|
1028
|
-
});
|
|
1029
|
-
const resp = chat[options?.stream ? 'sendMessageStream' : 'sendMessage'](
|
|
1030
|
-
options?.toolsResult ?
|
|
1031
|
-
options?.toolsResult[1].parts : buildGeminiMessage(content, options)
|
|
1032
|
-
);
|
|
1033
|
-
return await handleGeminiResponse(
|
|
1034
|
-
resp, { ...options || {}, content, model: genModel }
|
|
1035
|
-
);
|
|
1036
|
-
};
|
|
1037
|
-
|
|
1038
1097
|
const checkEmbeddingInput = async (input, model) => {
|
|
1039
1098
|
assert(input, 'Text is required.', 400);
|
|
1040
1099
|
const arrInput = input.split(' ');
|
package/lib/manifest.mjs
CHANGED