utilitas 1998.2.24 → 1998.2.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -219,6 +219,7 @@ const MODELS = {
219
219
  trainingData: 'August 2024',
220
220
  vision: true,
221
221
  json: true,
222
+ tools: true,
222
223
  supportedMimeTypes: [
223
224
  png, jpeg, mov, mpeg, mp4, mpg, avi, wmv, mpegps, flv, pdf, aac,
224
225
  flac, mp3, m4a, mpga, opus, pcm, wav, webm, tgpp,
@@ -237,7 +238,6 @@ const MODELS = {
237
238
  requestLimitsRPD: 1500,
238
239
  tokenLimitsTPM: 4 * 1000000,
239
240
  trainingData: 'August 2024',
240
- json: false,
241
241
  vision: true,
242
242
  reasoning: true,
243
243
  supportedMimeTypes: [
@@ -268,8 +268,6 @@ const MODELS = {
268
268
  maxOutputTokens: 32768,
269
269
  requestLimitsRPM: Infinity,
270
270
  tokenLimitsTPM: Infinity,
271
- json: false,
272
- vision: false,
273
271
  reasoning: true,
274
272
  },
275
273
  [TEXT_EMBEDDING_3_SMALL]: {
@@ -390,19 +388,17 @@ const init = async (options) => {
390
388
  const { GoogleGenerativeAI } = await need('@google/generative-ai');
391
389
  const genAi = new GoogleGenerativeAI(options.apiKey);
392
390
  const genModel = options?.model || DEFAULT_MODELS[GEMINI];
393
- const tools = options?.tools || { google: true, code: false };
394
391
  clients[provider] = {
395
392
  generative: genAi.getGenerativeModel({
396
- model: genModel,
397
- tools: [
398
- // @todo: https://cloud.google.com/vertex-ai/generative-ai/docs/gemini-v2?hl=en#search-tool
399
- ...tools.code ? [{
400
- codeExecution: tools.code === true ? {} : tools.code
401
- }] : [],
402
- ...tools.google ? [{
403
- googleSearch: tools.google === true ? {} : tools.code,
404
- }] : [],
405
- ],
393
+ model: genModel, ...MODELS[genModel]?.tools ? (options?.tools ?? {
394
+ tools: [
395
+ // @todo: Gemini will failed when using these tools together.
396
+ // https://ai.google.dev/gemini-api/docs/function-calling
397
+ // { codeExecution: {} },
398
+ // { googleSearch: {} },
399
+ { functionDeclarations: toolsGemini.map(x => x.def) },
400
+ ],
401
+ }) : {},
406
402
  }),
407
403
  embedding: genAi.getGenerativeModel({
408
404
  model: DEFAULT_MODELS[GEMINI_EMEDDING],
@@ -468,15 +464,15 @@ const tools = [
468
464
  parameters: {
469
465
  type: 'object',
470
466
  properties: {
471
- a: { type: 'string', description: 'Please create a random string' },
472
- b: { type: 'string', enum: ['A', 'B'], description: 'Enum parameter' }
467
+ a: { type: 'string', description: 'AI created a random string, default "1"' },
468
+ b: { type: 'string', enum: ['1', '2'], description: 'Enum parameter' }
473
469
  },
474
- required: ['a'],
470
+ required: ['a', 'b'],
475
471
  additionalProperties: false
476
472
  }
477
473
  }
478
474
  },
479
- func: async args => 'OK',
475
+ func: async args => `OK: ${~~args.a + ~~args.b}`,
480
476
  },
481
477
  ];
482
478
 
@@ -488,6 +484,21 @@ const toolsClaude = tools.map(x => ({
488
484
  }
489
485
  }));
490
486
 
487
+ const toolsGemini = tools.map(x => ({
488
+ ...x, def: {
489
+ name: x.def.function.name,
490
+ description: x.def.function.description,
491
+ parameters: {
492
+ type: 'object',
493
+ properties: x.def.function.parameters.properties,
494
+ required: x.def.function.parameters.required,
495
+ },
496
+ response: x.def.function?.response ?? {
497
+ type: 'string', description: 'It could be a string or JSON',
498
+ },
499
+ }
500
+ }));
501
+
491
502
  const selectGptAudioModel = options => {
492
503
  assert(
493
504
  MODELS[options.model]?.audio,
@@ -687,17 +698,30 @@ const handleToolsCall = async (msg, options) => {
687
698
  if (msg?.tool_calls?.length) {
688
699
  switch (options?.flavor) {
689
700
  case CLAUDE: preRes.push({ role: 'assistant', content: msg?.tool_calls }); break;
701
+ case GEMINI: preRes.push({ role: 'model', parts: msg?.tool_calls.map(x => ({ functionCall: x })) }); break;
690
702
  case CHATGPT: default: preRes.push({ role: 'assistant', ...msg });
691
703
  }
692
704
  for (const fn of msg.tool_calls) {
693
- input = parseJson(fn?.function?.arguments || fn?.input);
694
705
  switch (options?.flavor) {
695
706
  case CLAUDE:
696
- fn.input = input;
707
+ input = fn.input = parseJson(fn?.input);
697
708
  packMsg = (content, is_error) => ({
698
709
  type: 'tool_result', tool_use_id: fn.id, content, is_error,
699
- }); break;
710
+ });
711
+ break;
712
+ case GEMINI:
713
+ input = fn.args;
714
+ packMsg = (t, e) => ({
715
+ functionResponse: {
716
+ name: fn.name, response: {
717
+ name: fn.name,
718
+ content: e ? `[Error] ${t}` : JSON.stringify(t),
719
+ }
720
+ }
721
+ });
722
+ break;
700
723
  case CHATGPT: default:
724
+ input = parseJson(fn?.function?.arguments);
701
725
  packMsg = (t, e) => ({
702
726
  role: 'tool', tool_call_id: fn.id, [e ? 'error' : 'content']: t
703
727
  });
@@ -711,13 +735,14 @@ const handleToolsCall = async (msg, options) => {
711
735
  continue;
712
736
  }
713
737
  try {
714
- content.push(packMsg(await func(...Object.values(input))));
738
+ content.push(packMsg(await func(input)));
715
739
  } catch (err) {
716
740
  content.push(packMsg(`Function call failed: ${err.message}`, true));
717
741
  }
718
742
  }
719
743
  switch (options?.flavor) {
720
- case CLAUDE: content = [{ role: 'user', content }];
744
+ case CLAUDE: content = [{ role: 'user', content }]; break;
745
+ case GEMINI: content = [{ role: 'user', parts: content }]; break;
721
746
  }
722
747
  }
723
748
  return [...preRes, ...content];
@@ -847,7 +872,8 @@ const promptClaude = async (content, options = {}) => {
847
872
  const resp = await client.messages.create({
848
873
  model: options.model, max_tokens: MODELS[options.model].maxOutputTokens,
849
874
  messages: [
850
- ...options?.messages || [], buildClaudeMessage(content, options), ...options?.toolsResult || [],
875
+ ...options?.messages || [], buildClaudeMessage(content, options),
876
+ ...options?.toolsResult || [],
851
877
  ], stream: !!options?.stream, ...reasoning ? {
852
878
  thinking: options?.thinking || { type: 'enabled', budget_tokens: 1024 },
853
879
  } : {}, // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
@@ -859,7 +885,6 @@ const promptClaude = async (content, options = {}) => {
859
885
  = [null, '', '', '', '', '', []];
860
886
  if (options?.stream) {
861
887
  for await (event of resp) {
862
- print(event);
863
888
  let [thkDelta, txtDelta] = [
864
889
  event?.content_block?.thinking || event?.delta?.thinking || '',
865
890
  event?.content_block?.text || event?.delta?.text || '',
@@ -949,9 +974,10 @@ const packGeminiReferences = (chunks, supports) => {
949
974
 
950
975
  const handleGeminiResponse = async (resp, options) => {
951
976
  const _resp = await resp;
952
- let [result, references] = ['', null];
977
+ let [result, references, functionCalls] = ['', null, null];
953
978
  if (options?.stream) {
954
979
  for await (const chunk of _resp.stream) {
980
+ functionCalls || (functionCalls = chunk.functionCalls);
955
981
  const delta = chunk?.text?.() || '';
956
982
  const rfc = packGeminiReferences(
957
983
  chunk.candidates[0]?.groundingMetadata?.groundingChunks,
@@ -968,14 +994,19 @@ const handleGeminiResponse = async (resp, options) => {
968
994
  }
969
995
  }
970
996
  const __resp = await _resp.response;
971
- return await packGptResp(options?.stream ? {
997
+ const toolsResult = await handleToolsCall({
998
+ tool_calls: (functionCalls || __resp.functionCalls)()
999
+ }, { flavor: GEMINI });
1000
+ return await (toolsResult.length ? promptGemini(
1001
+ options?.content, { ...options || {}, toolsResult }
1002
+ ) : packGptResp(options?.stream ? {
972
1003
  __resp, text: () => result, references
973
1004
  } : {
974
1005
  ...__resp, references: packGeminiReferences(
975
1006
  __resp.candidates[0]?.groundingMetadata?.groundingChunks,
976
1007
  __resp.candidates[0]?.groundingMetadata?.groundingSupports
977
1008
  )
978
- }, options);
1009
+ }, options));
979
1010
  };
980
1011
 
981
1012
  const promptGemini = async (content, options) => {
@@ -987,15 +1018,20 @@ const promptGemini = async (content, options) => {
987
1018
  options?.jsonMode && MODELS[genModel]?.json == false
988
1019
  ), `This model does not support JSON output: ${genModel} `);
989
1020
  const chat = generative.startChat({
990
- history: options?.messages && !options?.attachments?.length
991
- ? options.messages : [],
1021
+ history: [
1022
+ ...options?.messages && !options?.attachments?.length ? options.messages : [],
1023
+ ...options?.toolsResult ? [
1024
+ { role: 'user', parts: buildGeminiMessage(content, options) },
1025
+ options?.toolsResult[0]
1026
+ ] : [],
1027
+ ],
992
1028
  ...generationConfig(options),
993
1029
  });
994
1030
  const resp = chat[options?.stream ? 'sendMessageStream' : 'sendMessage'](
995
- buildGeminiMessage(content, options)
1031
+ options?.toolsResult ? options?.toolsResult[1].parts : buildGeminiMessage(content, options)
996
1032
  );
997
1033
  return await handleGeminiResponse(
998
- resp, { ...options || {}, model: genModel }
1034
+ resp, { ...options || {}, content, model: genModel }
999
1035
  );
1000
1036
  };
1001
1037
 
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.24",
4
+ "version": "1998.2.25",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.24",
4
+ "version": "1998.2.25",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",