utilitas 1998.2.34 → 1998.2.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -38,8 +38,7 @@ You may be provided with some tools(functions) to help you gather information an
38
38
  - Use tools when appropriate to enhance efficiency and accuracy, and to gain the contextual knowledge needed to solve problems.
39
39
  - Be sure to use tools only when necessary and avoid overuse, you can answer questions based on your own understanding.
40
40
  - When the tools are not suitable and you have to answer questions based on your understanding, please do not mention any tool-related information in your response.
41
- - Unless otherwise specified to require the original result, in most cases, you may reorganize the information obtained after using the tool to solve the problem as needed.
42
- - If the tool fails, do not retry unless requested by the user.`;
41
+ - Unless otherwise specified to require the original result, in most cases, you may reorganize the information obtained after using the tool to solve the problem as needed.`;
43
42
 
44
43
  const _NEED = [
45
44
  '@anthropic-ai/sdk', '@anthropic-ai/vertex-sdk', '@google/generative-ai',
@@ -55,6 +54,7 @@ const [
55
54
  CLAUDE_35_HAIKU, CLOUD_37_SONNET, AUDIO, WAV, CHATGPT_MINI, ATTACHMENTS,
56
55
  CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH, GPT_REASONING_EFFORT, THINK,
57
56
  THINK_STR, THINK_END, AZURE, TOOLS_STR, TOOLS_END, TOOLS, TEXT, THINKING,
57
+ OK,
58
58
  ] = [
59
59
  'OPENAI', 'GEMINI', 'CHATGPT', 'OPENAI_EMBEDDING', 'GEMINI_EMEDDING',
60
60
  'OPENAI_TRAINING', 'OLLAMA', 'CLAUDE', 'gpt-4o-mini', 'gpt-4o', 'o1',
@@ -66,7 +66,7 @@ const [
66
66
  'claude-3-7-sonnet@20250219', 'audio', 'wav', 'CHATGPT_MINI',
67
67
  '[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium', 'low', 'high',
68
68
  'medium', 'think', '<think>', '</think>', 'AZURE', '<tools>',
69
- '</tools>', 'tools', 'text', 'thinking',
69
+ '</tools>', 'tools', 'text', 'thinking', 'OK',
70
70
  ];
71
71
 
72
72
  const [
@@ -99,6 +99,7 @@ const [tokenSafeRatio, GPT_QUERY_LIMIT, minsOfDay] = [1.1, 100, 60 * 24];
99
99
  const tokenSafe = count => Math.ceil(count * tokenSafeRatio);
100
100
  const clients = {};
101
101
  const size8k = 7680 * 4320;
102
+ const MAX_TOOL_RECURSION = 10;
102
103
  const LOG = { log: true };
103
104
  const OPENAI_BASE_URL = 'https://api.openai.com/v1';
104
105
  const sessionType = `${name.toUpperCase()}-SESSION`;
@@ -111,6 +112,7 @@ const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
111
112
  const CONTENT_IS_REQUIRED = 'Content is required.';
112
113
  const assertContent = content => assert(content.length, CONTENT_IS_REQUIRED);
113
114
  const packThink = thk => thk ? [`${THINK_STR}\n${thk}\n${THINK_END}`] : [];
115
+ const countToolCalls = r => r?.split('\n').filter(x => x === TOOLS_STR).length;
114
116
 
115
117
  const DEFAULT_MODELS = {
116
118
  [CHATGPT_MINI]: GPT_4O_MINI,
@@ -412,6 +414,7 @@ const tools = [
412
414
  }
413
415
  },
414
416
  func: async () => new Date().toLocaleString(),
417
+ showRes: true,
415
418
  },
416
419
  {
417
420
  def: {
@@ -429,6 +432,7 @@ const tools = [
429
432
  }
430
433
  },
431
434
  func: async args => (await distill(args?.url))?.summary,
435
+ showReq: true,
432
436
  },
433
437
  {
434
438
  def: {
@@ -446,6 +450,7 @@ const tools = [
446
450
  }
447
451
  },
448
452
  func: async args => await search(args?.keyword),
453
+ showReq: true,
449
454
  },
450
455
  ];
451
456
 
@@ -764,18 +769,18 @@ const packGptResp = async (resp, options) => {
764
769
  };
765
770
 
766
771
  const handleToolsCall = async (msg, options) => {
767
- let [content, preRes, input, packMsg, toolsResponse] = [
768
- [], [], [], null,
769
- options?.currentResponse ? `${options?.currentResponse}\n` : '',
770
- ];
772
+ let [content, preRes, input, packMsg, toolsResponse]
773
+ = [[], [], [], null, options?.result ? options?.result.trim() : ''];
771
774
  const resp = async (msg) => {
772
- toolsResponse = [...toolsResponse ? [toolsResponse] : [], msg].join('\n');
775
+ msg = `\n${msg}`;
776
+ toolsResponse = (toolsResponse + msg).trim();
777
+ // @todo: handle more flavors by @LeaskH
773
778
  await ignoreErrFunc(async () => await options?.stream?.(await packGptResp({
774
779
  choices: [{ message: { content: options?.delta ? msg : toolsResponse } }]
775
780
  }, { ...options || {}, processing: true })), LOG);
776
781
  };
777
782
  if (msg?.tool_calls?.length) {
778
- await resp(TOOLS_STR);
783
+ await resp(`\n${TOOLS_STR}`);
779
784
  switch (options?.flavor) {
780
785
  case CLAUDE: preRes.push({ role: assistant, content: msg?.tool_calls }); break;
781
786
  case GEMINI: preRes.push({ role: MODEL, parts: msg?.tool_calls.map(x => ({ functionCall: x })) }); break;
@@ -785,9 +790,8 @@ const handleToolsCall = async (msg, options) => {
785
790
  switch (options?.flavor) {
786
791
  case CLAUDE:
787
792
  input = fn.input = String.isString(fn?.input) ? parseJson(fn.input) : fn?.input;
788
- packMsg = (c, is_error) => ({
789
- type: 'tool_result', tool_use_id: fn.id,
790
- content: JSON.stringify(c), is_error,
793
+ packMsg = (content, is_error) => ({
794
+ type: 'tool_result', tool_use_id: fn.id, content, is_error,
791
795
  });
792
796
  break;
793
797
  case GEMINI:
@@ -795,8 +799,7 @@ const handleToolsCall = async (msg, options) => {
795
799
  packMsg = (t, e) => ({
796
800
  functionResponse: {
797
801
  name: fn.name, response: {
798
- name: fn.name,
799
- content: e ? `[Error] ${t}` : JSON.stringify(t),
802
+ name: fn.name, content: e ? `[Error] ${t}` : t,
800
803
  }
801
804
  }
802
805
  });
@@ -815,14 +818,17 @@ const handleToolsCall = async (msg, options) => {
815
818
  x.def?.function?.name || x?.def?.name, name
816
819
  ));
817
820
  if (!f?.func) {
818
- content.push(packMsg(`Function call failed, invalid function name: ${name}`, true));
821
+ content.push(packMsg(`Function call failed: invalid function name ${name}`, true));
819
822
  continue;
820
823
  }
821
824
  const description = f.def?.function?.description || f.def?.description;
822
825
  description && await resp(`Description: ${description}`);
826
+ f.showReq && isSet(input, true) && Object.keys(input).length
827
+ && await resp(`Input: ${JSON.stringify(input)}`);
823
828
  try {
824
- content.push(packMsg((await f?.func(input)) ?? 'OK'));
825
- await resp(`Status: OK`);
829
+ const output = JSON.stringify((await f?.func(input)) ?? OK);
830
+ content.push(packMsg(output));
831
+ await resp(f.showRes ? `Output: ${output}` : `Status: OK`);
826
832
  } catch (err) {
827
833
  content.push(packMsg(`Function call failed: ${err.message}`, true));
828
834
  await resp(`Failed: ${err.message}`);
@@ -864,81 +870,75 @@ const promptChatGPT = async (content, options = {}) => {
864
870
  assert(!(
865
871
  options?.reasoning && !MODELS[options.model]?.reasoning
866
872
  ), `This model does not support reasoning: ${options.model}`);
867
- let format;
868
- [format, options.audioMimeType, options.suffix]
869
- = options?.stream ? ['pcm16', pcm16, 'pcm.wav'] : [WAV, wav, WAV];
870
- let [resp, resultText, resultAudio, chunk, resultTools] = [
871
- await client.chat.completions.create({
872
- modalities, audio: options?.audio || (
873
- modalities?.find?.(x => x === AUDIO) && {
874
- voice: DEFAULT_MODELS[OPENAI_VOICE], format
875
- }
876
- ), ...messages([
877
- ...options?.messages || [], message,
878
- ...options?.toolsResult || [],
879
- ]), ...MODELS[options.model]?.tools ? {
880
- tools: options?.tools ?? tools.map(x => x.def),
881
- } : {}, ...options?.jsonMode ? {
882
- response_format: { type: JSON_OBJECT }
883
- } : {}, model: options.model, stream: !!options?.stream,
884
- store: true, tool_choice: 'auto',
885
- }), options?.toolsResponse ? `${options?.toolsResponse}\n\n` : '',
886
- Buffer.alloc(0), null, [],
873
+ [options.audioMimeType, options.suffix] = [pcm16, 'pcm.wav'];
874
+ let [result, resultAudio, chunk, resultTools] = [
875
+ options?.result ? `${options?.result}\n\n` : '',
876
+ Buffer.alloc(0), null, []
887
877
  ];
888
- if (options?.stream) {
889
- for await (chunk of resp) {
890
- const deltaText = chunk.choices[0]?.delta?.content
891
- || chunk.choices[0]?.delta?.audio?.transcript || '';
892
- const deltaAudio = chunk.choices[0]?.delta?.audio?.data ? await convert(
893
- chunk.choices[0].delta.audio.data, { input: BASE64, expected: BUFFER }
894
- ) : Buffer.alloc(0);
895
- const deltaFunc = chunk.choices[0]?.delta?.tool_calls || [];
896
- for (const x in deltaFunc) {
897
- let curFunc = resultTools.find(z => z.index === deltaFunc[x].index);
898
- curFunc || (resultTools.push(curFunc = {}));
899
- isSet(deltaFunc[x].index, true) && (curFunc.index = deltaFunc[x].index);
900
- deltaFunc[x].id && (curFunc.id = deltaFunc[x].id);
901
- deltaFunc[x].type && (curFunc.type = deltaFunc[x].type);
902
- curFunc.function || (curFunc.function = { name: '', arguments: '' });
903
- if (deltaFunc[x].function) {
904
- deltaFunc[x].function.name && (curFunc.function.name += deltaFunc[x].function.name);
905
- deltaFunc[x].function.arguments && (curFunc.function.arguments += deltaFunc[x].function.arguments);
906
- }
878
+ const resp = await client.chat.completions.create({
879
+ modalities, audio: options?.audio || (
880
+ modalities?.find?.(x => x === AUDIO)
881
+ && { voice: DEFAULT_MODELS[OPENAI_VOICE], format: 'pcm16' }
882
+ ), ...messages([
883
+ ...options?.messages || [], message, ...options?.toolsResult || [],
884
+ ]), ...MODELS[options.model]?.tools ? {
885
+ tools: options?.tools ?? tools.map(x => x.def),
886
+ } : {}, ...options?.jsonMode ? {
887
+ response_format: { type: JSON_OBJECT }
888
+ } : {}, model: options.model, stream: true,
889
+ store: true, tool_choice: 'auto',
890
+ });
891
+ for await (chunk of resp) {
892
+ const deltaText = chunk.choices[0]?.delta?.content
893
+ || chunk.choices[0]?.delta?.audio?.transcript || '';
894
+ const deltaAudio = chunk.choices[0]?.delta?.audio?.data ? await convert(
895
+ chunk.choices[0].delta.audio.data, { input: BASE64, expected: BUFFER }
896
+ ) : Buffer.alloc(0);
897
+ const deltaFunc = chunk.choices[0]?.delta?.tool_calls || [];
898
+ for (const x in deltaFunc) {
899
+ let curFunc = resultTools.find(z => z.index === deltaFunc[x].index);
900
+ curFunc || (resultTools.push(curFunc = {}));
901
+ isSet(deltaFunc[x].index, true) && (curFunc.index = deltaFunc[x].index);
902
+ deltaFunc[x].id && (curFunc.id = deltaFunc[x].id);
903
+ deltaFunc[x].type && (curFunc.type = deltaFunc[x].type);
904
+ curFunc.function || (curFunc.function = { name: '', arguments: '' });
905
+ if (deltaFunc[x].function) {
906
+ deltaFunc[x].function.name && (curFunc.function.name += deltaFunc[x].function.name);
907
+ deltaFunc[x].function.arguments && (curFunc.function.arguments += deltaFunc[x].function.arguments);
907
908
  }
908
- if (deltaText === '' && !deltaAudio.length) { continue; }
909
- resultText += deltaText;
910
- resultAudio = Buffer.concat([resultAudio, deltaAudio]);
911
- const respAudio = options?.delta ? deltaAudio : resultAudio;
912
- chunk.choices[0].message = {
913
- content: options?.delta ? deltaText : resultText,
914
- ...respAudio.length ? { audio: { data: respAudio } } : {},
915
- };
916
- await ignoreErrFunc(async () => await options?.stream?.(
917
- await packGptResp(chunk, { ...options || {}, processing: true })
918
- ), LOG);
919
909
  }
920
- chunk.choices?.[0] || (chunk.choices = [{}]); // handle empty choices for Azure APIs
910
+ if (deltaText === '' && !deltaAudio.length) { continue; }
911
+ result += deltaText;
912
+ resultAudio = Buffer.concat([resultAudio, deltaAudio]);
913
+ const respAudio = options?.delta ? deltaAudio : resultAudio;
921
914
  chunk.choices[0].message = {
922
- content: resultText, tool_calls: resultTools,
923
- ...resultAudio.length ? { audio: { data: resultAudio } } : {},
915
+ content: options?.delta ? deltaText : result,
916
+ ...respAudio.length ? { audio: { data: respAudio } } : {},
924
917
  };
925
- resp = chunk;
918
+ await ignoreErrFunc(async () => await options?.stream?.(
919
+ await packGptResp(chunk, { ...options, processing: true })
920
+ ), LOG);
926
921
  }
927
- const { toolsResult, toolsResponse }
928
- = await handleToolsCall(resp?.choices?.[0]?.message, options);
929
- options?.toolsResponse && !options?.stream && (
930
- resp.choices[0].message.content = [
931
- options?.toolsResponse, resp.choices[0].message.content,
932
- ].join('\n\n')
922
+ chunk.choices?.[0] || (chunk.choices = [{}]); // handle empty choices for Azure APIs
923
+ chunk.choices[0].message = {
924
+ content: result, tool_calls: resultTools,
925
+ ...resultAudio.length ? { audio: { data: resultAudio } } : {},
926
+ };
927
+ const { toolsResult, toolsResponse } = await handleToolsCall(
928
+ chunk?.choices?.[0]?.message, { ...options, result }
933
929
  );
934
- return await (toolsResult.length && !options?.toolsResult ? promptChatGPT(
935
- content, { ...options || {}, toolsResult, toolsResponse }
936
- ) : packGptResp(resp, options));
930
+
931
+ if (toolsResult.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
932
+ return promptChatGPT(content, { ...options, toolsResult, result: toolsResponse });
933
+ }
934
+ chunk.choices[0].message.content = [toolsResponse, ...toolsResult.length ? [
935
+ `⚠️ Tools recursion limit reached: ${MAX_TOOL_RECURSION}`
936
+ ] : []].map(x => x.trim()).join('\n\n');
937
+ return await packGptResp(chunk, options);
937
938
  };
938
939
 
939
- const promptAzure = async (content, options = {}) => await promptChatGPT(
940
- content, { ...options, provider: AZURE }
941
- );
940
+ const promptAzure = async (content, options = {}) =>
941
+ await promptChatGPT(content, { ...options, provider: AZURE });
942
942
 
943
943
  const promptOllama = async (content, options = {}) => {
944
944
  const { client, model } = await getOllamaClient(options);
@@ -972,7 +972,7 @@ const promptClaude = async (content, options = {}) => {
972
972
  messages: [
973
973
  ...options?.messages || [], buildClaudeMessage(content, options),
974
974
  ...options?.toolsResult || [],
975
- ], stream: !!options?.stream, ...reasoning ? {
975
+ ], stream: true, ...reasoning ? {
976
976
  thinking: options?.thinking || { type: 'enabled', budget_tokens: 1024 },
977
977
  } : {}, // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
978
978
  ...MODELS[options.model]?.tools ? {
@@ -980,68 +980,57 @@ const promptClaude = async (content, options = {}) => {
980
980
  tool_choice: { type: 'auto' },
981
981
  } : {},
982
982
  });
983
- let [event, txtResult, thinking, signature, result, thinkEnd, tool_calls]
983
+ let [event, text, thinking, signature, result, thinkEnd, tool_calls]
984
984
  = [null, '', '', '', options?.toolsResponse || '', '', []];
985
- if (options?.stream) {
986
- for await (event of resp) {
987
- let [thkDelta, txtDelta] = [
988
- event?.content_block?.thinking || event?.delta?.thinking || '',
989
- event?.content_block?.text || event?.delta?.text || '',
990
- ];
991
- txtResult += txtDelta;
992
- thinking += thkDelta;
993
- signature = signature || event?.content_block?.signature || event?.delta?.signature || '';
994
- if (reasoning) {
995
- thkDelta && (thkDelta === thinking)
996
- && (thkDelta = `${THINK_STR}\n${thkDelta}`);
997
- thinking && txtDelta && !thinkEnd
998
- && (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
999
- }
1000
- if (event?.content_block?.type === 'tool_use') {
1001
- tool_calls.push({ ...event?.content_block, input: '' });
1002
- } else if (event?.delta?.partial_json) {
1003
- tool_calls[tool_calls.length - 1].input += event?.delta?.partial_json;
1004
- }
1005
- const delta = thkDelta + txtDelta;
1006
- if (delta === '') { continue; }
1007
- result += delta;
1008
- event.content = [{ type: TEXT, text: options?.delta ? delta : result }];
1009
- await ignoreErrFunc(async () => await options.stream(
1010
- await packGptResp(event, { ...options, processing: true })
1011
- ), LOG);
985
+ for await (event of resp) {
986
+ let [thkDelta, txtDelta] = [
987
+ event?.content_block?.thinking || event?.delta?.thinking || '',
988
+ event?.content_block?.text || event?.delta?.text || '',
989
+ ];
990
+ text += txtDelta;
991
+ thinking += thkDelta;
992
+ signature = signature || event?.content_block?.signature || event?.delta?.signature || '';
993
+ if (reasoning) {
994
+ thkDelta && (thkDelta === thinking)
995
+ && (thkDelta = `${THINK_STR}\n${thkDelta}`);
996
+ thinking && txtDelta && !thinkEnd
997
+ && (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
1012
998
  }
1013
- event.content = [{
1014
- type: TEXT, text: tool_calls.length ? txtResult : result,
1015
- }];
1016
- tool_calls.length && thinking
1017
- && event.content.unshift({ type: THINKING, thinking, signature });
1018
- } else {
1019
- event = resp;
1020
- tool_calls = resp?.content?.filter?.(x => x.type === 'tool_use') || [];
999
+ if (event?.content_block?.type === 'tool_use') {
1000
+ tool_calls.push({ ...event?.content_block, input: '' });
1001
+ } else if (event?.delta?.partial_json) {
1002
+ tool_calls[tool_calls.length - 1].input += event?.delta?.partial_json;
1003
+ }
1004
+ const delta = thkDelta + txtDelta;
1005
+ if (delta === '') { continue; }
1006
+ result += delta;
1007
+ event.content = [{ type: TEXT, text: options?.delta ? delta : result }];
1008
+ await ignoreErrFunc(async () => await options?.stream?.(
1009
+ await packGptResp(event, { ...options, processing: true })
1010
+ ), LOG);
1021
1011
  }
1012
+ event.content = [{ type: TEXT, text }];
1013
+ tool_calls.length && thinking
1014
+ && event.content.unshift({ type: THINKING, thinking, signature });
1022
1015
  const { toolsResult, toolsResponse } = await handleToolsCall(
1023
- { tool_calls }, { ...options, currentResponse: result, flavor: CLAUDE },
1016
+ { tool_calls }, { ...options, toolsResponse: result, flavor: CLAUDE },
1024
1017
  );
1025
- if (toolsResult.length && !options?.toolsResult) {
1018
+ if (toolsResult.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
1026
1019
  toolsResult[0].content.unshift(
1027
1020
  ...event?.content.filter(x => x?.type !== 'tool_use')
1028
1021
  );
1029
1022
  return await promptClaude(content, {
1030
1023
  ...options, toolsResult, toolsResponse,
1031
1024
  });
1032
- } else {
1033
- const textPart = event.content.find(x => x.type == TEXT);
1034
- const thinkPart = event.content.find(x => x.type == THINKING);
1035
- const prvThink = options?.toolsResult?.find(
1036
- x => x?.content?.find(y => y?.type === THINKING)
1037
- )?.content?.find(x => x?.type === THINKING);
1038
- textPart.text = [
1039
- ...packThink(options?.stream ? null : prvThink?.thinking),
1040
- ...packThink(options?.stream ? null : thinkPart?.thinking),
1041
- ...options?.toolsResponse ? [options?.toolsResponse] : [],
1042
- textPart.text,
1043
- ].join('\n\n');
1044
- } return packGptResp(event, options);
1025
+ }
1026
+ const textPart = event.content.find(x => x.type == TEXT);
1027
+ textPart.text = [
1028
+ ...options?.toolsResponse ? [options?.toolsResponse] : [],
1029
+ textPart.text, ...toolsResult.length ? [
1030
+ `⚠️ Tools recursion limit reached: ${MAX_TOOL_RECURSION}`
1031
+ ] : [],
1032
+ ].map(x => x.trim()).join('\n\n')
1033
+ return packGptResp(event, options);
1045
1034
  };
1046
1035
 
1047
1036
  const uploadFile = async (input, options) => {
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.34",
4
+ "version": "1998.2.35",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.34",
4
+ "version": "1998.2.35",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",