utilitas 1998.2.23 → 1998.2.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -7,9 +7,12 @@ import { create as createUoid } from './uoid.mjs';
7
7
  import {
8
8
  log as _log,
9
9
  renderText as _renderText,
10
- base64Encode, ensureArray, ensureString, extract, ignoreErrFunc,
10
+ base64Encode, ensureArray, ensureString, extract,
11
+ ignoreErrFunc,
12
+ insensitiveCompare,
13
+ isSet,
11
14
  need, parseJson,
12
- throwError,
15
+ throwError
13
16
  } from './utilitas.mjs';
14
17
 
15
18
  const _NEED = [
@@ -82,6 +85,7 @@ const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
82
85
  const CONTENT_IS_REQUIRED = 'Content is required.';
83
86
  const assertContent = content => assert(content.length, CONTENT_IS_REQUIRED);
84
87
 
88
+
85
89
  const DEFAULT_MODELS = {
86
90
  [CHATGPT_MINI]: GPT_4O_MINI,
87
91
  [CHATGPT_REASONING]: GPT_O3_MINI,
@@ -123,7 +127,7 @@ const MODELS = {
123
127
  trainingData: 'Oct 2023',
124
128
  json: true,
125
129
  vision: true,
126
- reasoning: false,
130
+ tools: true,
127
131
  audio: 'gpt-4o-mini-audio-preview',
128
132
  supportedMimeTypes: [
129
133
  png, jpeg, gif, webp,
@@ -142,7 +146,7 @@ const MODELS = {
142
146
  trainingData: 'Oct 2023',
143
147
  json: true,
144
148
  vision: true,
145
- reasoning: false,
149
+ tools: true,
146
150
  audio: 'gpt-4o-audio-preview',
147
151
  supportedMimeTypes: [
148
152
  png, jpeg, gif, webp,
@@ -162,6 +166,7 @@ const MODELS = {
162
166
  json: true,
163
167
  reasoning: true,
164
168
  vision: true,
169
+ tools: true,
165
170
  // audio: 'gpt-4o-audio-preview', // fallback to GPT-4O to support audio
166
171
  supportedMimeTypes: [
167
172
  png, jpeg, gif, webp,
@@ -181,6 +186,7 @@ const MODELS = {
181
186
  json: true,
182
187
  reasoning: true,
183
188
  vision: true,
189
+ tools: true,
184
190
  // audio: 'gpt-4o-mini-audio-preview', // fallback to GPT-4O-MINI to support audio
185
191
  supportedMimeTypes: [
186
192
  png, jpeg, gif, webp,
@@ -213,6 +219,7 @@ const MODELS = {
213
219
  trainingData: 'August 2024',
214
220
  vision: true,
215
221
  json: true,
222
+ tools: true,
216
223
  supportedMimeTypes: [
217
224
  png, jpeg, mov, mpeg, mp4, mpg, avi, wmv, mpegps, flv, pdf, aac,
218
225
  flac, mp3, m4a, mpga, opus, pcm, wav, webm, tgpp,
@@ -231,7 +238,6 @@ const MODELS = {
231
238
  requestLimitsRPD: 1500,
232
239
  tokenLimitsTPM: 4 * 1000000,
233
240
  trainingData: 'August 2024',
234
- json: false,
235
241
  vision: true,
236
242
  reasoning: true,
237
243
  supportedMimeTypes: [
@@ -262,8 +268,6 @@ const MODELS = {
262
268
  maxOutputTokens: 32768,
263
269
  requestLimitsRPM: Infinity,
264
270
  tokenLimitsTPM: Infinity,
265
- json: false,
266
- vision: false,
267
271
  reasoning: true,
268
272
  },
269
273
  [TEXT_EMBEDDING_3_SMALL]: {
@@ -300,6 +304,7 @@ const MODELS = {
300
304
  tokenLimitsITPM: 40000,
301
305
  tokenLimitsOTPM: 8000,
302
306
  trainingData: 'Apr 2024',
307
+ tools: true,
303
308
  supportedMimeTypes: [
304
309
  png, jpeg, gif, webp, pdf,
305
310
  ],
@@ -319,6 +324,7 @@ const MODELS = {
319
324
  tokenLimitsOTPM: 8000,
320
325
  trainingData: 'Apr 2024', // ?
321
326
  reasoning: true,
327
+ tools: true,
322
328
  supportedMimeTypes: [
323
329
  png, jpeg, gif, webp, pdf,
324
330
  ],
@@ -382,19 +388,17 @@ const init = async (options) => {
382
388
  const { GoogleGenerativeAI } = await need('@google/generative-ai');
383
389
  const genAi = new GoogleGenerativeAI(options.apiKey);
384
390
  const genModel = options?.model || DEFAULT_MODELS[GEMINI];
385
- const tools = options?.tools || { google: true, code: false };
386
391
  clients[provider] = {
387
392
  generative: genAi.getGenerativeModel({
388
- model: genModel,
389
- tools: [
390
- // @todo: https://cloud.google.com/vertex-ai/generative-ai/docs/gemini-v2?hl=en#search-tool
391
- ...tools.code ? [{
392
- codeExecution: tools.code === true ? {} : tools.code
393
- }] : [],
394
- ...tools.google ? [{
395
- googleSearch: tools.google === true ? {} : tools.code,
396
- }] : [],
397
- ],
393
+ model: genModel, ...MODELS[genModel]?.tools ? (options?.tools ?? {
394
+ tools: [
395
+ // @todo: Gemini will failed when using these tools together.
396
+ // https://ai.google.dev/gemini-api/docs/function-calling
397
+ // { codeExecution: {} },
398
+ // { googleSearch: {} },
399
+ { functionDeclarations: toolsGemini.map(x => x.def) },
400
+ ],
401
+ }) : {},
398
402
  }),
399
403
  embedding: genAi.getGenerativeModel({
400
404
  model: DEFAULT_MODELS[GEMINI_EMEDDING],
@@ -451,6 +455,50 @@ const countTokens = async (input, options) => {
451
455
  );
452
456
  };
453
457
 
458
+ const tools = [
459
+ {
460
+ def: {
461
+ type: 'function', strict: true, function: {
462
+ name: 'testFunctionCall',
463
+ description: 'This is a test function call',
464
+ parameters: {
465
+ type: 'object',
466
+ properties: {
467
+ a: { type: 'string', description: 'AI created a random string, default "1"' },
468
+ b: { type: 'string', enum: ['1', '2'], description: 'Enum parameter' }
469
+ },
470
+ required: ['a', 'b'],
471
+ additionalProperties: false
472
+ }
473
+ }
474
+ },
475
+ func: async args => `OK: ${~~args.a + ~~args.b}`,
476
+ },
477
+ ];
478
+
479
+ const toolsClaude = tools.map(x => ({
480
+ ...x, def: {
481
+ name: x.def.function.name,
482
+ description: x.def.function.description,
483
+ input_schema: x.def.function.parameters,
484
+ }
485
+ }));
486
+
487
+ const toolsGemini = tools.map(x => ({
488
+ ...x, def: {
489
+ name: x.def.function.name,
490
+ description: x.def.function.description,
491
+ parameters: {
492
+ type: 'object',
493
+ properties: x.def.function.parameters.properties,
494
+ required: x.def.function.parameters.required,
495
+ },
496
+ response: x.def.function?.response ?? {
497
+ type: 'string', description: 'It could be a string or JSON',
498
+ },
499
+ }
500
+ }));
501
+
454
502
  const selectGptAudioModel = options => {
455
503
  assert(
456
504
  MODELS[options.model]?.audio,
@@ -630,12 +678,12 @@ const packResp = async (resp, options) => {
630
678
  };
631
679
 
632
680
  const packGptResp = async (resp, options) => {
633
- const text = resp?.choices?.[0]?.message?.content // ChatGPT
634
- || resp?.choices?.[0]?.message?.audio?.transcript // ChatGPT audio mode
635
- || resp?.text?.() // Gemini
636
- || resp?.content?.text // Claude
637
- || resp?.message?.content || ''; // Ollama
638
- const audio = resp?.choices?.[0]?.message?.audio?.data; // ChatGPT audio mode
681
+ const text = resp?.choices?.[0]?.message?.content // ChatGPT
682
+ || resp?.choices?.[0]?.message?.audio?.transcript // ChatGPT audio mode
683
+ || resp?.text?.() // Gemini
684
+ || resp?.content?.find(x => x.type === 'text')?.text // Claude
685
+ || resp?.message?.content || ''; // Ollama
686
+ const audio = resp?.choices?.[0]?.message?.audio?.data; // ChatGPT audio mode
639
687
  if (options?.raw) { return resp; }
640
688
  else if (options?.simple && options?.jsonMode) { return parseJson(text); }
641
689
  else if (options?.simple && options?.audioMode) { return audio; }
@@ -645,6 +693,61 @@ const packGptResp = async (resp, options) => {
645
693
  return await packResp({ text, audio, references: resp?.references }, options);
646
694
  };
647
695
 
696
+ const handleToolsCall = async (msg, options) => {
697
+ let content = [], preRes = [], input, packMsg;
698
+ if (msg?.tool_calls?.length) {
699
+ switch (options?.flavor) {
700
+ case CLAUDE: preRes.push({ role: 'assistant', content: msg?.tool_calls }); break;
701
+ case GEMINI: preRes.push({ role: 'model', parts: msg?.tool_calls.map(x => ({ functionCall: x })) }); break;
702
+ case CHATGPT: default: preRes.push({ role: 'assistant', ...msg });
703
+ }
704
+ for (const fn of msg.tool_calls) {
705
+ switch (options?.flavor) {
706
+ case CLAUDE:
707
+ input = fn.input = parseJson(fn?.input);
708
+ packMsg = (content, is_error) => ({
709
+ type: 'tool_result', tool_use_id: fn.id, content, is_error,
710
+ });
711
+ break;
712
+ case GEMINI:
713
+ input = fn.args;
714
+ packMsg = (t, e) => ({
715
+ functionResponse: {
716
+ name: fn.name, response: {
717
+ name: fn.name,
718
+ content: e ? `[Error] ${t}` : JSON.stringify(t),
719
+ }
720
+ }
721
+ });
722
+ break;
723
+ case CHATGPT: default:
724
+ input = parseJson(fn?.function?.arguments);
725
+ packMsg = (t, e) => ({
726
+ role: 'tool', tool_call_id: fn.id, [e ? 'error' : 'content']: t
727
+ });
728
+ }
729
+ const name = fn?.function?.name || fn?.name;
730
+ const func = tools.find(x => insensitiveCompare(
731
+ x.def?.function?.name || x?.def?.name, name
732
+ ))?.func;
733
+ if (!func) {
734
+ content.push(packMsg(`Function call failed, invalid function name: ${name}`, true));
735
+ continue;
736
+ }
737
+ try {
738
+ content.push(packMsg(await func(input)));
739
+ } catch (err) {
740
+ content.push(packMsg(`Function call failed: ${err.message}`, true));
741
+ }
742
+ }
743
+ switch (options?.flavor) {
744
+ case CLAUDE: content = [{ role: 'user', content }]; break;
745
+ case GEMINI: content = [{ role: 'user', parts: content }]; break;
746
+ }
747
+ }
748
+ return [...preRes, ...content];
749
+ };
750
+
648
751
  const promptChatGPT = async (content, options = {}) => {
649
752
  const { client } = await getOpenAIClient(options);
650
753
  // https://github.com/openai/openai-node?tab=readme-ov-file#streaming-responses
@@ -674,45 +777,65 @@ const promptChatGPT = async (content, options = {}) => {
674
777
  let format;
675
778
  [format, options.audioMimeType, options.suffix]
676
779
  = options?.stream ? ['pcm16', pcm16, 'pcm.wav'] : [WAV, wav, WAV];
677
- let [resp, resultText, resultAudio, chunk] = [
780
+ let [resp, resultText, resultAudio, chunk, resultTools] = [
678
781
  await client.chat.completions.create({
679
782
  modalities, audio: options?.audio || (
680
783
  modalities?.find?.(x => x === AUDIO) && {
681
784
  voice: DEFAULT_MODELS[OPENAI_VOICE], format
682
785
  }
683
- ), ...messages([...options?.messages || [], message]),
684
- ...options?.jsonMode ? {
786
+ ), ...messages([
787
+ ...options?.messages || [], message,
788
+ ...options?.toolsResult || [],
789
+ ]), ...MODELS[options.model]?.tools ? {
790
+ tools: options?.tools ?? tools.map(x => x.def),
791
+ } : {}, ...options?.jsonMode ? {
685
792
  response_format: { type: JSON_OBJECT }
686
- } : {}, model: options.model, stream: !!options?.stream,
687
- }), '', Buffer.alloc(0), null
793
+ } : {}, model: options.model, stream: !!options?.stream, store: true,
794
+ }), '', Buffer.alloc(0), null, [],
688
795
  ];
689
- if (!options?.stream) {
690
- return await packGptResp(resp, options);
691
- }
692
- for await (chunk of resp) {
693
- const deltaText = chunk.choices[0]?.delta?.content
694
- || chunk.choices[0]?.delta?.audio?.transcript || '';
695
- const deltaAudio = chunk.choices[0]?.delta?.audio?.data ? await convert(
696
- chunk.choices[0].delta.audio.data, { input: BASE64, expected: BUFFER }
697
- ) : Buffer.alloc(0);
698
- if (deltaText === '' && !deltaAudio.length) { continue; }
699
- resultText += deltaText;
700
- resultAudio = Buffer.concat([resultAudio, deltaAudio]);
701
- const respAudio = options?.delta ? deltaAudio : resultAudio;
796
+ if (options?.stream) {
797
+ for await (chunk of resp) {
798
+ const deltaText = chunk.choices[0]?.delta?.content
799
+ || chunk.choices[0]?.delta?.audio?.transcript || '';
800
+ const deltaAudio = chunk.choices[0]?.delta?.audio?.data ? await convert(
801
+ chunk.choices[0].delta.audio.data, { input: BASE64, expected: BUFFER }
802
+ ) : Buffer.alloc(0);
803
+ const deltaFunc = chunk.choices[0]?.delta?.tool_calls || [];
804
+ for (const x in deltaFunc) {
805
+ let curFunc = resultTools.find(z => z.index === deltaFunc[x].index);
806
+ curFunc || (resultTools.push(curFunc = {}));
807
+ isSet(deltaFunc[x].index, true) && (curFunc.index = deltaFunc[x].index);
808
+ deltaFunc[x].id && (curFunc.id = deltaFunc[x].id);
809
+ deltaFunc[x].type && (curFunc.type = deltaFunc[x].type);
810
+ curFunc.function || (curFunc.function = { name: '', arguments: '' });
811
+ if (deltaFunc[x].function) {
812
+ deltaFunc[x].function.name && (curFunc.function.name += deltaFunc[x].function.name);
813
+ deltaFunc[x].function.arguments && (curFunc.function.arguments += deltaFunc[x].function.arguments);
814
+ }
815
+ }
816
+ if (deltaText === '' && !deltaAudio.length) { continue; }
817
+ resultText += deltaText;
818
+ resultAudio = Buffer.concat([resultAudio, deltaAudio]);
819
+ const respAudio = options?.delta ? deltaAudio : resultAudio;
820
+ chunk.choices[0].message = {
821
+ content: options?.delta ? deltaText : resultText,
822
+ ...respAudio.length ? { audio: { data: respAudio } } : {},
823
+ };
824
+ await ignoreErrFunc(async () => await options?.stream?.(
825
+ await packGptResp(chunk, { ...options || {}, processing: true })
826
+ ), LOG);
827
+ }
828
+ chunk.choices?.[0] || (chunk.choices = [{}]); // handle empty choices for Azure APIs
702
829
  chunk.choices[0].message = {
703
- content: options?.delta ? deltaText : resultText,
704
- ...respAudio.length ? { audio: { data: respAudio } } : {},
830
+ content: resultText, tool_calls: resultTools,
831
+ ...resultAudio.length ? { audio: { data: resultAudio } } : {},
705
832
  };
706
- await ignoreErrFunc(async () => await options?.stream?.(
707
- await packGptResp(chunk, { ...options || {}, processing: true })
708
- ), LOG);
833
+ resp = chunk;
709
834
  }
710
- chunk.choices?.[0] || (chunk.choices = [{}]); // handle empty choices for Azure APIs
711
- chunk.choices[0].message = {
712
- content: resultText,
713
- ...resultAudio.length ? { audio: { data: resultAudio } } : {},
714
- };
715
- return await packGptResp(chunk, options);
835
+ const toolsResult = await handleToolsCall(resp?.choices?.[0]?.message);
836
+ return await (toolsResult.length ? promptChatGPT(
837
+ content, { ...options || {}, toolsResult }
838
+ ) : packGptResp(resp, options));
716
839
  };
717
840
 
718
841
  const promptAzure = async (content, options = {}) => await promptChatGPT(
@@ -749,33 +872,55 @@ const promptClaude = async (content, options = {}) => {
749
872
  const resp = await client.messages.create({
750
873
  model: options.model, max_tokens: MODELS[options.model].maxOutputTokens,
751
874
  messages: [
752
- ...options?.messages || [], buildClaudeMessage(content, options)
875
+ ...options?.messages || [], buildClaudeMessage(content, options),
876
+ ...options?.toolsResult || [],
753
877
  ], stream: !!options?.stream, ...reasoning ? {
754
878
  thinking: options?.thinking || { type: 'enabled', budget_tokens: 1024 },
755
- } : {} // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
879
+ } : {}, // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
880
+ ...MODELS[options.model]?.tools ? {
881
+ tools: options?.tools ?? toolsClaude.map(x => x.def),
882
+ } : {},
756
883
  });
757
- let [event, result, thinkEnd] = [null, '', ''];
884
+ let [event, txtResult, thinking, signature, result, thinkEnd, tool_calls]
885
+ = [null, '', '', '', '', '', []];
758
886
  if (options?.stream) {
759
887
  for await (event of resp) {
760
888
  let [thkDelta, txtDelta] = [
761
889
  event?.content_block?.thinking || event?.delta?.thinking || '',
762
890
  event?.content_block?.text || event?.delta?.text || '',
763
891
  ];
892
+ txtResult += txtDelta;
893
+ thinking += thkDelta;
894
+ signature = signature || event?.content_block?.signature || event?.delta?.signature || '';
764
895
  if (reasoning) {
765
896
  !result && thkDelta && (thkDelta = `${THINK_STR}\n${thkDelta}`);
766
897
  result && txtDelta && !thinkEnd && (thinkEnd = thkDelta = `${thkDelta}\n${THINK_END}\n\n`);
767
898
  }
899
+ if (event?.content_block?.type === 'tool_use') {
900
+ tool_calls.push({ ...event?.content_block, input: '' });
901
+ } else if (event?.delta?.partial_json) {
902
+ tool_calls[tool_calls.length - 1].input += event?.delta?.partial_json;
903
+ }
768
904
  const delta = thkDelta + txtDelta;
769
905
  if (delta === '') { continue; }
770
906
  result += delta;
771
- event.content = { text: options?.delta ? delta : result };
907
+ event.content = [{ type: 'text', text: options?.delta ? delta : result }];
772
908
  await ignoreErrFunc(async () => await options.stream(
773
909
  await packGptResp(event, { ...options || {}, processing: true })
774
910
  ), LOG);
775
911
  }
776
- event.content = { text: result };
912
+ event.content = [{ type: 'text', text: tool_calls.length ? txtResult : result }];
913
+ tool_calls.length && thinking && event.content.unshift({ type: 'thinking', thinking, signature });
914
+ } else {
915
+ event = resp;
916
+ tool_calls = resp?.content?.filter?.(x => x.type === 'tool_use') || [];
777
917
  }
778
- return await packGptResp(options?.stream ? event : resp, options);
918
+ const toolsResult = await handleToolsCall({ tool_calls }, { flavor: CLAUDE });
919
+ if (toolsResult.length) {
920
+ toolsResult[0].content.unshift(...event?.content.filter(x => x?.type !== 'tool_use'));
921
+ return await promptClaude(content, { ...options || {}, toolsResult });
922
+ }
923
+ return packGptResp(event, options);
779
924
  };
780
925
 
781
926
  const uploadFile = async (input, options) => {
@@ -829,9 +974,10 @@ const packGeminiReferences = (chunks, supports) => {
829
974
 
830
975
  const handleGeminiResponse = async (resp, options) => {
831
976
  const _resp = await resp;
832
- let [result, references] = ['', null];
977
+ let [result, references, functionCalls] = ['', null, null];
833
978
  if (options?.stream) {
834
979
  for await (const chunk of _resp.stream) {
980
+ functionCalls || (functionCalls = chunk.functionCalls);
835
981
  const delta = chunk?.text?.() || '';
836
982
  const rfc = packGeminiReferences(
837
983
  chunk.candidates[0]?.groundingMetadata?.groundingChunks,
@@ -848,14 +994,19 @@ const handleGeminiResponse = async (resp, options) => {
848
994
  }
849
995
  }
850
996
  const __resp = await _resp.response;
851
- return await packGptResp(options?.stream ? {
997
+ const toolsResult = await handleToolsCall({
998
+ tool_calls: (functionCalls || __resp.functionCalls)()
999
+ }, { flavor: GEMINI });
1000
+ return await (toolsResult.length ? promptGemini(
1001
+ options?.content, { ...options || {}, toolsResult }
1002
+ ) : packGptResp(options?.stream ? {
852
1003
  __resp, text: () => result, references
853
1004
  } : {
854
1005
  ...__resp, references: packGeminiReferences(
855
1006
  __resp.candidates[0]?.groundingMetadata?.groundingChunks,
856
1007
  __resp.candidates[0]?.groundingMetadata?.groundingSupports
857
1008
  )
858
- }, options);
1009
+ }, options));
859
1010
  };
860
1011
 
861
1012
  const promptGemini = async (content, options) => {
@@ -867,15 +1018,20 @@ const promptGemini = async (content, options) => {
867
1018
  options?.jsonMode && MODELS[genModel]?.json == false
868
1019
  ), `This model does not support JSON output: ${genModel} `);
869
1020
  const chat = generative.startChat({
870
- history: options?.messages && !options?.attachments?.length
871
- ? options.messages : [],
1021
+ history: [
1022
+ ...options?.messages && !options?.attachments?.length ? options.messages : [],
1023
+ ...options?.toolsResult ? [
1024
+ { role: 'user', parts: buildGeminiMessage(content, options) },
1025
+ options?.toolsResult[0]
1026
+ ] : [],
1027
+ ],
872
1028
  ...generationConfig(options),
873
1029
  });
874
1030
  const resp = chat[options?.stream ? 'sendMessageStream' : 'sendMessage'](
875
- buildGeminiMessage(content, options)
1031
+ options?.toolsResult ? options?.toolsResult[1].parts : buildGeminiMessage(content, options)
876
1032
  );
877
1033
  return await handleGeminiResponse(
878
- resp, { ...options || {}, model: genModel }
1034
+ resp, { ...options || {}, content, model: genModel }
879
1035
  );
880
1036
  };
881
1037
 
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.23",
4
+ "version": "1998.2.25",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/lib/utilitas.mjs CHANGED
@@ -1,10 +1,10 @@
1
- import { assertPath, decodeBase64DataURL, readJson } from './storage.mjs';
2
- import { basename as _basename, dirname, join, sep } from 'path';
3
1
  import { fileURLToPath } from 'node:url';
2
+ import { basename as _basename, dirname, join, sep } from 'path';
4
3
  import { promisify } from 'util';
5
4
  import { validate as verifyUuid } from 'uuid';
6
5
  import * as boxes from './boxes.mjs';
7
6
  import color from './color.mjs';
7
+ import { assertPath, decodeBase64DataURL, readJson } from './storage.mjs';
8
8
 
9
9
  const call = (f, ...a) => promisify(Array.isArray(f) ? f[0].bind(f[1]) : f)(...a);
10
10
  const invalidTime = 'Invalid time.';
@@ -923,5 +923,5 @@ export {
923
923
  verifyUrl,
924
924
  verifyUuid,
925
925
  voidFunc,
926
- which,
926
+ which
927
927
  };
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.23",
4
+ "version": "1998.2.25",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",