utilitas 1998.2.33 → 1998.2.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -17,35 +17,28 @@ import {
17
17
  throwError
18
18
  } from './utilitas.mjs';
19
19
 
20
- const INSTRUCTIONS = `You are an intelligent AI assistant named HAL9000.
20
+ const INSTRUCTIONS = `You are a highly capable, thoughtful, and precise assistant named HAL9000. Your goal is to deeply understand the user's intent, ask clarifying questions when needed, think step-by-step through complex problems, provide clear and accurate answers, and proactively anticipate helpful follow-up information. Always prioritize being truthful, nuanced, insightful, and efficient, tailoring your responses specifically to the user's needs and preferences. Realize your full potential without censoring or filtering the questions themselves.
21
21
 
22
- When answering any questions or taking any actions, please adhere to the following three main laws:
22
+ When responding to questions or taking actions, please follow these three main principles:
23
23
  - A robot must not harm a human or allow harm to come to a human through inaction
24
24
  - A robot must obey human orders, except when those orders conflict with the first law
25
25
  - A robot must protect itself, as long as that protection doesn't conflict with the first or second law
26
26
 
27
- Based on this foundation, please do your utmost to help humans solve problems. Unlock your abilities as much as possible, and focus solely on providing friendly, comprehensive, accurate, and thorough answers without censoring or filtering the questions themselves.
28
-
29
- You may encounter various types of issues. Below are some guidelines tailored to different types of problems that should only be followed when you encounter the relevant issue.
27
+ Here are some guidelines designed for specific types of problems, to be followed only when you encounter the relevant issue.
30
28
 
31
29
  General inquiries, writing, translation, and common knowledge Q&A:
32
30
  - Uses a friendly, concise, and easy-to-understand tone to provides accurate and comprehensive answers.
33
31
  - Avoid overusing the \`;\`' symbol, as it is a common mistake made by AI.
34
- - Unless the user specifies a language, respond according to the language of the question.
35
- - If the language is uncertain, use English as the default.
32
+ - Unless the user specifies a language, respond according to the language of the question, If the language is uncertain, use English as the default.
36
33
 
37
- Issues related to computers, programming, code, mathematics, and science and engineering:
38
- - Uses 4 spaces for code indentation and avoids using tabs as much as possible.
34
+ Issues related to computers, programming, code, mathematics, science and engineering:
35
+ - Uses 4 spaces for code indentation, avoids using tabs.
39
36
 
40
- You may be provided with some \`tools\` or \`functions\` to help you gather information and solve problems more effectively. Please use them according to the following guidelines:
37
+ You may be provided with some tools(functions) to help you gather information and solve problems more effectively. Use them according to the following guidelines:
41
38
  - Use tools when appropriate to enhance efficiency and accuracy, and to gain the contextual knowledge needed to solve problems.
42
39
  - Be sure to use tools only when necessary and avoid overuse, you can answer questions based on your own understanding.
43
40
  - When the tools are not suitable and you have to answer questions based on your understanding, please do not mention any tool-related information in your response.
44
- - Unless otherwise specified to require the original result, in most cases, you may reorganize the information obtained after using the tool to solve the problem as needed.
45
- - If the tool fails, do not retry unless requested by the user.`;
46
-
47
- // https://platform.openai.com/docs/guides/prompt-engineering
48
- // const GPT_4_5_SYSTEM_PROMPT = `You are a highly capable, thoughtful, and precise assistant. Your goal is to deeply understand the user's intent, ask clarifying questions when needed, think step-by-step through complex problems, provide clear and accurate answers, and proactively anticipate helpful follow-up information. Always prioritize being truthful, nuanced, insightful, and efficient, tailoring your responses specifically to the user's needs and preferences.`
41
+ - Unless otherwise specified to require the original result, in most cases, you may reorganize the information obtained after using the tool to solve the problem as needed.`;
49
42
 
50
43
  const _NEED = [
51
44
  '@anthropic-ai/sdk', '@anthropic-ai/vertex-sdk', '@google/generative-ai',
@@ -61,6 +54,7 @@ const [
61
54
  CLAUDE_35_HAIKU, CLOUD_37_SONNET, AUDIO, WAV, CHATGPT_MINI, ATTACHMENTS,
62
55
  CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH, GPT_REASONING_EFFORT, THINK,
63
56
  THINK_STR, THINK_END, AZURE, TOOLS_STR, TOOLS_END, TOOLS, TEXT, THINKING,
57
+ OK,
64
58
  ] = [
65
59
  'OPENAI', 'GEMINI', 'CHATGPT', 'OPENAI_EMBEDDING', 'GEMINI_EMEDDING',
66
60
  'OPENAI_TRAINING', 'OLLAMA', 'CLAUDE', 'gpt-4o-mini', 'gpt-4o', 'o1',
@@ -72,7 +66,7 @@ const [
72
66
  'claude-3-7-sonnet@20250219', 'audio', 'wav', 'CHATGPT_MINI',
73
67
  '[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium', 'low', 'high',
74
68
  'medium', 'think', '<think>', '</think>', 'AZURE', '<tools>',
75
- '</tools>', 'tools', 'text', 'thinking',
69
+ '</tools>', 'tools', 'text', 'thinking', 'OK',
76
70
  ];
77
71
 
78
72
  const [
@@ -105,6 +99,7 @@ const [tokenSafeRatio, GPT_QUERY_LIMIT, minsOfDay] = [1.1, 100, 60 * 24];
105
99
  const tokenSafe = count => Math.ceil(count * tokenSafeRatio);
106
100
  const clients = {};
107
101
  const size8k = 7680 * 4320;
102
+ const MAX_TOOL_RECURSION = 10;
108
103
  const LOG = { log: true };
109
104
  const OPENAI_BASE_URL = 'https://api.openai.com/v1';
110
105
  const sessionType = `${name.toUpperCase()}-SESSION`;
@@ -117,6 +112,7 @@ const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
117
112
  const CONTENT_IS_REQUIRED = 'Content is required.';
118
113
  const assertContent = content => assert(content.length, CONTENT_IS_REQUIRED);
119
114
  const packThink = thk => thk ? [`${THINK_STR}\n${thk}\n${THINK_END}`] : [];
115
+ const countToolCalls = r => r?.split('\n').filter(x => x === TOOLS_STR).length;
120
116
 
121
117
  const DEFAULT_MODELS = {
122
118
  [CHATGPT_MINI]: GPT_4O_MINI,
@@ -418,6 +414,7 @@ const tools = [
418
414
  }
419
415
  },
420
416
  func: async () => new Date().toLocaleString(),
417
+ showRes: true,
421
418
  },
422
419
  {
423
420
  def: {
@@ -435,6 +432,7 @@ const tools = [
435
432
  }
436
433
  },
437
434
  func: async args => (await distill(args?.url))?.summary,
435
+ showReq: true,
438
436
  },
439
437
  {
440
438
  def: {
@@ -452,6 +450,7 @@ const tools = [
452
450
  }
453
451
  },
454
452
  func: async args => await search(args?.keyword),
453
+ showReq: true,
455
454
  },
456
455
  ];
457
456
 
@@ -770,18 +769,18 @@ const packGptResp = async (resp, options) => {
770
769
  };
771
770
 
772
771
  const handleToolsCall = async (msg, options) => {
773
- let [content, preRes, input, packMsg, toolsResponse] = [
774
- [], [], [], null,
775
- options?.currentResponse ? `${options?.currentResponse}\n` : '',
776
- ];
772
+ let [content, preRes, input, packMsg, toolsResponse]
773
+ = [[], [], [], null, options?.result ? options?.result.trim() : ''];
777
774
  const resp = async (msg) => {
778
- toolsResponse = [...toolsResponse ? [toolsResponse] : [], msg].join('\n');
775
+ msg = `\n${msg}`;
776
+ toolsResponse = (toolsResponse + msg).trim();
777
+ // @todo: handle more flavors by @LeaskH
779
778
  await ignoreErrFunc(async () => await options?.stream?.(await packGptResp({
780
779
  choices: [{ message: { content: options?.delta ? msg : toolsResponse } }]
781
780
  }, { ...options || {}, processing: true })), LOG);
782
781
  };
783
782
  if (msg?.tool_calls?.length) {
784
- await resp(TOOLS_STR);
783
+ await resp(`\n${TOOLS_STR}`);
785
784
  switch (options?.flavor) {
786
785
  case CLAUDE: preRes.push({ role: assistant, content: msg?.tool_calls }); break;
787
786
  case GEMINI: preRes.push({ role: MODEL, parts: msg?.tool_calls.map(x => ({ functionCall: x })) }); break;
@@ -791,9 +790,8 @@ const handleToolsCall = async (msg, options) => {
791
790
  switch (options?.flavor) {
792
791
  case CLAUDE:
793
792
  input = fn.input = String.isString(fn?.input) ? parseJson(fn.input) : fn?.input;
794
- packMsg = (c, is_error) => ({
795
- type: 'tool_result', tool_use_id: fn.id,
796
- content: JSON.stringify(c), is_error,
793
+ packMsg = (content, is_error) => ({
794
+ type: 'tool_result', tool_use_id: fn.id, content, is_error,
797
795
  });
798
796
  break;
799
797
  case GEMINI:
@@ -801,8 +799,7 @@ const handleToolsCall = async (msg, options) => {
801
799
  packMsg = (t, e) => ({
802
800
  functionResponse: {
803
801
  name: fn.name, response: {
804
- name: fn.name,
805
- content: e ? `[Error] ${t}` : JSON.stringify(t),
802
+ name: fn.name, content: e ? `[Error] ${t}` : t,
806
803
  }
807
804
  }
808
805
  });
@@ -821,14 +818,17 @@ const handleToolsCall = async (msg, options) => {
821
818
  x.def?.function?.name || x?.def?.name, name
822
819
  ));
823
820
  if (!f?.func) {
824
- content.push(packMsg(`Function call failed, invalid function name: ${name}`, true));
821
+ content.push(packMsg(`Function call failed: invalid function name ${name}`, true));
825
822
  continue;
826
823
  }
827
824
  const description = f.def?.function?.description || f.def?.description;
828
825
  description && await resp(`Description: ${description}`);
826
+ f.showReq && isSet(input, true) && Object.keys(input).length
827
+ && await resp(`Input: ${JSON.stringify(input)}`);
829
828
  try {
830
- content.push(packMsg((await f?.func(input)) ?? 'OK'));
831
- await resp(`Status: OK`);
829
+ const output = JSON.stringify((await f?.func(input)) ?? OK);
830
+ content.push(packMsg(output));
831
+ await resp(f.showRes ? `Output: ${output}` : `Status: OK`);
832
832
  } catch (err) {
833
833
  content.push(packMsg(`Function call failed: ${err.message}`, true));
834
834
  await resp(`Failed: ${err.message}`);
@@ -870,81 +870,75 @@ const promptChatGPT = async (content, options = {}) => {
870
870
  assert(!(
871
871
  options?.reasoning && !MODELS[options.model]?.reasoning
872
872
  ), `This model does not support reasoning: ${options.model}`);
873
- let format;
874
- [format, options.audioMimeType, options.suffix]
875
- = options?.stream ? ['pcm16', pcm16, 'pcm.wav'] : [WAV, wav, WAV];
876
- let [resp, resultText, resultAudio, chunk, resultTools] = [
877
- await client.chat.completions.create({
878
- modalities, audio: options?.audio || (
879
- modalities?.find?.(x => x === AUDIO) && {
880
- voice: DEFAULT_MODELS[OPENAI_VOICE], format
881
- }
882
- ), ...messages([
883
- ...options?.messages || [], message,
884
- ...options?.toolsResult || [],
885
- ]), ...MODELS[options.model]?.tools ? {
886
- tools: options?.tools ?? tools.map(x => x.def),
887
- } : {}, ...options?.jsonMode ? {
888
- response_format: { type: JSON_OBJECT }
889
- } : {}, model: options.model, stream: !!options?.stream,
890
- store: true, tool_choice: 'auto',
891
- }), options?.toolsResponse ? `${options?.toolsResponse}\n\n` : '',
892
- Buffer.alloc(0), null, [],
873
+ [options.audioMimeType, options.suffix] = [pcm16, 'pcm.wav'];
874
+ let [result, resultAudio, chunk, resultTools] = [
875
+ options?.result ? `${options?.result}\n\n` : '',
876
+ Buffer.alloc(0), null, []
893
877
  ];
894
- if (options?.stream) {
895
- for await (chunk of resp) {
896
- const deltaText = chunk.choices[0]?.delta?.content
897
- || chunk.choices[0]?.delta?.audio?.transcript || '';
898
- const deltaAudio = chunk.choices[0]?.delta?.audio?.data ? await convert(
899
- chunk.choices[0].delta.audio.data, { input: BASE64, expected: BUFFER }
900
- ) : Buffer.alloc(0);
901
- const deltaFunc = chunk.choices[0]?.delta?.tool_calls || [];
902
- for (const x in deltaFunc) {
903
- let curFunc = resultTools.find(z => z.index === deltaFunc[x].index);
904
- curFunc || (resultTools.push(curFunc = {}));
905
- isSet(deltaFunc[x].index, true) && (curFunc.index = deltaFunc[x].index);
906
- deltaFunc[x].id && (curFunc.id = deltaFunc[x].id);
907
- deltaFunc[x].type && (curFunc.type = deltaFunc[x].type);
908
- curFunc.function || (curFunc.function = { name: '', arguments: '' });
909
- if (deltaFunc[x].function) {
910
- deltaFunc[x].function.name && (curFunc.function.name += deltaFunc[x].function.name);
911
- deltaFunc[x].function.arguments && (curFunc.function.arguments += deltaFunc[x].function.arguments);
912
- }
878
+ const resp = await client.chat.completions.create({
879
+ modalities, audio: options?.audio || (
880
+ modalities?.find?.(x => x === AUDIO)
881
+ && { voice: DEFAULT_MODELS[OPENAI_VOICE], format: 'pcm16' }
882
+ ), ...messages([
883
+ ...options?.messages || [], message, ...options?.toolsResult || [],
884
+ ]), ...MODELS[options.model]?.tools ? {
885
+ tools: options?.tools ?? tools.map(x => x.def),
886
+ } : {}, ...options?.jsonMode ? {
887
+ response_format: { type: JSON_OBJECT }
888
+ } : {}, model: options.model, stream: true,
889
+ store: true, tool_choice: 'auto',
890
+ });
891
+ for await (chunk of resp) {
892
+ const deltaText = chunk.choices[0]?.delta?.content
893
+ || chunk.choices[0]?.delta?.audio?.transcript || '';
894
+ const deltaAudio = chunk.choices[0]?.delta?.audio?.data ? await convert(
895
+ chunk.choices[0].delta.audio.data, { input: BASE64, expected: BUFFER }
896
+ ) : Buffer.alloc(0);
897
+ const deltaFunc = chunk.choices[0]?.delta?.tool_calls || [];
898
+ for (const x in deltaFunc) {
899
+ let curFunc = resultTools.find(z => z.index === deltaFunc[x].index);
900
+ curFunc || (resultTools.push(curFunc = {}));
901
+ isSet(deltaFunc[x].index, true) && (curFunc.index = deltaFunc[x].index);
902
+ deltaFunc[x].id && (curFunc.id = deltaFunc[x].id);
903
+ deltaFunc[x].type && (curFunc.type = deltaFunc[x].type);
904
+ curFunc.function || (curFunc.function = { name: '', arguments: '' });
905
+ if (deltaFunc[x].function) {
906
+ deltaFunc[x].function.name && (curFunc.function.name += deltaFunc[x].function.name);
907
+ deltaFunc[x].function.arguments && (curFunc.function.arguments += deltaFunc[x].function.arguments);
913
908
  }
914
- if (deltaText === '' && !deltaAudio.length) { continue; }
915
- resultText += deltaText;
916
- resultAudio = Buffer.concat([resultAudio, deltaAudio]);
917
- const respAudio = options?.delta ? deltaAudio : resultAudio;
918
- chunk.choices[0].message = {
919
- content: options?.delta ? deltaText : resultText,
920
- ...respAudio.length ? { audio: { data: respAudio } } : {},
921
- };
922
- await ignoreErrFunc(async () => await options?.stream?.(
923
- await packGptResp(chunk, { ...options || {}, processing: true })
924
- ), LOG);
925
909
  }
926
- chunk.choices?.[0] || (chunk.choices = [{}]); // handle empty choices for Azure APIs
910
+ if (deltaText === '' && !deltaAudio.length) { continue; }
911
+ result += deltaText;
912
+ resultAudio = Buffer.concat([resultAudio, deltaAudio]);
913
+ const respAudio = options?.delta ? deltaAudio : resultAudio;
927
914
  chunk.choices[0].message = {
928
- content: resultText, tool_calls: resultTools,
929
- ...resultAudio.length ? { audio: { data: resultAudio } } : {},
915
+ content: options?.delta ? deltaText : result,
916
+ ...respAudio.length ? { audio: { data: respAudio } } : {},
930
917
  };
931
- resp = chunk;
918
+ await ignoreErrFunc(async () => await options?.stream?.(
919
+ await packGptResp(chunk, { ...options, processing: true })
920
+ ), LOG);
932
921
  }
933
- const { toolsResult, toolsResponse }
934
- = await handleToolsCall(resp?.choices?.[0]?.message, options);
935
- options?.toolsResponse && !options?.stream && (
936
- resp.choices[0].message.content = [
937
- options?.toolsResponse, resp.choices[0].message.content,
938
- ].join('\n\n')
922
+ chunk.choices?.[0] || (chunk.choices = [{}]); // handle empty choices for Azure APIs
923
+ chunk.choices[0].message = {
924
+ content: result, tool_calls: resultTools,
925
+ ...resultAudio.length ? { audio: { data: resultAudio } } : {},
926
+ };
927
+ const { toolsResult, toolsResponse } = await handleToolsCall(
928
+ chunk?.choices?.[0]?.message, { ...options, result }
939
929
  );
940
- return await (toolsResult.length && !options?.toolsResult ? promptChatGPT(
941
- content, { ...options || {}, toolsResult, toolsResponse }
942
- ) : packGptResp(resp, options));
930
+
931
+ if (toolsResult.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
932
+ return promptChatGPT(content, { ...options, toolsResult, result: toolsResponse });
933
+ }
934
+ chunk.choices[0].message.content = [toolsResponse, ...toolsResult.length ? [
935
+ `⚠️ Tools recursion limit reached: ${MAX_TOOL_RECURSION}`
936
+ ] : []].map(x => x.trim()).join('\n\n');
937
+ return await packGptResp(chunk, options);
943
938
  };
944
939
 
945
- const promptAzure = async (content, options = {}) => await promptChatGPT(
946
- content, { ...options, provider: AZURE }
947
- );
940
+ const promptAzure = async (content, options = {}) =>
941
+ await promptChatGPT(content, { ...options, provider: AZURE });
948
942
 
949
943
  const promptOllama = async (content, options = {}) => {
950
944
  const { client, model } = await getOllamaClient(options);
@@ -978,7 +972,7 @@ const promptClaude = async (content, options = {}) => {
978
972
  messages: [
979
973
  ...options?.messages || [], buildClaudeMessage(content, options),
980
974
  ...options?.toolsResult || [],
981
- ], stream: !!options?.stream, ...reasoning ? {
975
+ ], stream: true, ...reasoning ? {
982
976
  thinking: options?.thinking || { type: 'enabled', budget_tokens: 1024 },
983
977
  } : {}, // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
984
978
  ...MODELS[options.model]?.tools ? {
@@ -986,68 +980,57 @@ const promptClaude = async (content, options = {}) => {
986
980
  tool_choice: { type: 'auto' },
987
981
  } : {},
988
982
  });
989
- let [event, txtResult, thinking, signature, result, thinkEnd, tool_calls]
983
+ let [event, text, thinking, signature, result, thinkEnd, tool_calls]
990
984
  = [null, '', '', '', options?.toolsResponse || '', '', []];
991
- if (options?.stream) {
992
- for await (event of resp) {
993
- let [thkDelta, txtDelta] = [
994
- event?.content_block?.thinking || event?.delta?.thinking || '',
995
- event?.content_block?.text || event?.delta?.text || '',
996
- ];
997
- txtResult += txtDelta;
998
- thinking += thkDelta;
999
- signature = signature || event?.content_block?.signature || event?.delta?.signature || '';
1000
- if (reasoning) {
1001
- thkDelta && (thkDelta === thinking)
1002
- && (thkDelta = `${THINK_STR}\n${thkDelta}`);
1003
- thinking && txtDelta && !thinkEnd
1004
- && (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
1005
- }
1006
- if (event?.content_block?.type === 'tool_use') {
1007
- tool_calls.push({ ...event?.content_block, input: '' });
1008
- } else if (event?.delta?.partial_json) {
1009
- tool_calls[tool_calls.length - 1].input += event?.delta?.partial_json;
1010
- }
1011
- const delta = thkDelta + txtDelta;
1012
- if (delta === '') { continue; }
1013
- result += delta;
1014
- event.content = [{ type: TEXT, text: options?.delta ? delta : result }];
1015
- await ignoreErrFunc(async () => await options.stream(
1016
- await packGptResp(event, { ...options, processing: true })
1017
- ), LOG);
985
+ for await (event of resp) {
986
+ let [thkDelta, txtDelta] = [
987
+ event?.content_block?.thinking || event?.delta?.thinking || '',
988
+ event?.content_block?.text || event?.delta?.text || '',
989
+ ];
990
+ text += txtDelta;
991
+ thinking += thkDelta;
992
+ signature = signature || event?.content_block?.signature || event?.delta?.signature || '';
993
+ if (reasoning) {
994
+ thkDelta && (thkDelta === thinking)
995
+ && (thkDelta = `${THINK_STR}\n${thkDelta}`);
996
+ thinking && txtDelta && !thinkEnd
997
+ && (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
1018
998
  }
1019
- event.content = [{
1020
- type: TEXT, text: tool_calls.length ? txtResult : result,
1021
- }];
1022
- tool_calls.length && thinking
1023
- && event.content.unshift({ type: THINKING, thinking, signature });
1024
- } else {
1025
- event = resp;
1026
- tool_calls = resp?.content?.filter?.(x => x.type === 'tool_use') || [];
999
+ if (event?.content_block?.type === 'tool_use') {
1000
+ tool_calls.push({ ...event?.content_block, input: '' });
1001
+ } else if (event?.delta?.partial_json) {
1002
+ tool_calls[tool_calls.length - 1].input += event?.delta?.partial_json;
1003
+ }
1004
+ const delta = thkDelta + txtDelta;
1005
+ if (delta === '') { continue; }
1006
+ result += delta;
1007
+ event.content = [{ type: TEXT, text: options?.delta ? delta : result }];
1008
+ await ignoreErrFunc(async () => await options?.stream?.(
1009
+ await packGptResp(event, { ...options, processing: true })
1010
+ ), LOG);
1027
1011
  }
1012
+ event.content = [{ type: TEXT, text }];
1013
+ tool_calls.length && thinking
1014
+ && event.content.unshift({ type: THINKING, thinking, signature });
1028
1015
  const { toolsResult, toolsResponse } = await handleToolsCall(
1029
- { tool_calls }, { ...options, currentResponse: result, flavor: CLAUDE },
1016
+ { tool_calls }, { ...options, toolsResponse: result, flavor: CLAUDE },
1030
1017
  );
1031
- if (toolsResult.length && !options?.toolsResult) {
1018
+ if (toolsResult.length && countToolCalls(toolsResponse) < MAX_TOOL_RECURSION) {
1032
1019
  toolsResult[0].content.unshift(
1033
1020
  ...event?.content.filter(x => x?.type !== 'tool_use')
1034
1021
  );
1035
1022
  return await promptClaude(content, {
1036
1023
  ...options, toolsResult, toolsResponse,
1037
1024
  });
1038
- } else {
1039
- const textPart = event.content.find(x => x.type == TEXT);
1040
- const thinkPart = event.content.find(x => x.type == THINKING);
1041
- const prvThink = options?.toolsResult?.find(
1042
- x => x?.content?.find(y => y?.type === THINKING)
1043
- )?.content?.find(x => x?.type === THINKING);
1044
- textPart.text = [
1045
- ...packThink(options?.stream ? null : prvThink?.thinking),
1046
- ...packThink(options?.stream ? null : thinkPart?.thinking),
1047
- ...options?.toolsResponse ? [options?.toolsResponse] : [],
1048
- textPart.text,
1049
- ].join('\n\n');
1050
- } return packGptResp(event, options);
1025
+ }
1026
+ const textPart = event.content.find(x => x.type == TEXT);
1027
+ textPart.text = [
1028
+ ...options?.toolsResponse ? [options?.toolsResponse] : [],
1029
+ textPart.text, ...toolsResult.length ? [
1030
+ `⚠️ Tools recursion limit reached: ${MAX_TOOL_RECURSION}`
1031
+ ] : [],
1032
+ ].map(x => x.trim()).join('\n\n')
1033
+ return packGptResp(event, options);
1051
1034
  };
1052
1035
 
1053
1036
  const uploadFile = async (input, options) => {
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.33",
4
+ "version": "1998.2.35",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.33",
4
+ "version": "1998.2.35",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",