utilitas 1998.2.62 → 1998.2.64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -49,14 +49,15 @@ const [
49
49
  OPENAI, GEMINI, CHATGPT, OPENAI_EMBEDDING, GEMINI_EMEDDING, OPENAI_TRAINING,
50
50
  OLLAMA, CLAUDE, GPT_4O_MINI, GPT_4O, GPT_O1, GPT_O3_MINI, GEMINI_20_FLASH,
51
51
  GEMINI_20_FLASH_THINKING, GEMINI_20_PRO, NOVA, EMBEDDING_001, DEEPSEEK_R1,
52
- DEEPSEEK_R1_70B, DEEPSEEK_R1_32B, MD_CODE, CHATGPT_REASONING, TEXT_EMBEDDING_3_SMALL,
53
- TEXT_EMBEDDING_3_LARGE, CLOUD_37_SONNET, AUDIO, WAV, CHATGPT_MINI,
54
- ATTACHMENTS, CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH, GPT_REASONING_EFFORT,
55
- THINK, THINK_STR, THINK_END, AZURE, TOOLS_STR, TOOLS_END, TOOLS, TEXT,
56
- THINKING, OK, FUNC, GPT_45, REDACTED_THINKING, GEMMA_3_27B, AZURE_OPENAI,
57
- ANTHROPIC, VERTEX_ANTHROPIC, GEMMA327B, size8k, ais, MAX_TOOL_RECURSION,
58
- LOG, name, user, system, assistant, MODEL, JSON_OBJECT, TOOL, silent,
59
- NOT_INIT, INVALID_FILE, tokenSafeRatio, GPT_QUERY_LIMIT, minsOfDay,
52
+ DEEPSEEK_R1_70B, DEEPSEEK_R1_32B, MD_CODE, CHATGPT_REASONING,
53
+ TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE, CLOUD_37_SONNET, AUDIO, WAV,
54
+ CHATGPT_MINI, ATTACHMENTS, CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH,
55
+ GPT_REASONING_EFFORT, THINK, THINK_STR, THINK_END, AZURE, TOOLS_STR,
56
+ TOOLS_END, TOOLS, TEXT, THINKING, OK, FUNC, GPT_45, REDACTED_THINKING,
57
+ GEMMA_3_27B, AZURE_OPENAI, ANTHROPIC, VERTEX_ANTHROPIC, GEMMA327B, size8k,
58
+ ais, MAX_TOOL_RECURSION, LOG, name, user, system, assistant, MODEL,
59
+ JSON_OBJECT, TOOL, silent, NOT_INIT, INVALID_FILE, tokenSafeRatio,
60
+ GPT_QUERY_LIMIT, minsOfDay, CONTENT_IS_REQUIRED,
60
61
  ] = [
61
62
  'OPENAI', 'GEMINI', 'CHATGPT', 'OPENAI_EMBEDDING', 'GEMINI_EMEDDING',
62
63
  'OPENAI_TRAINING', 'OLLAMA', 'CLAUDE', 'gpt-4o-mini', 'gpt-4o', 'o1',
@@ -73,7 +74,7 @@ const [
73
74
  7680 * 4320, {}, 10, { log: true }, 'Alan', 'user', 'system',
74
75
  'assistant', 'model', 'json_object', 'tool', true,
75
76
  'AI engine has not been initialized.', 'Invalid file data.', 1.1, 100,
76
- 60 * 24,
77
+ 60 * 24, 'Content is required.',
77
78
  ];
78
79
 
79
80
  const [
@@ -109,7 +110,6 @@ const unifyEngine = options => unifyType(options?.engine, 'AI engine');
109
110
  const trimTailing = text => text.replace(/[\.\s]*$/, '');
110
111
  const renderText = (t, o) => _renderText(t, { extraCodeBlock: 0, ...o || {} });
111
112
  const log = (cnt, opt) => _log(cnt, import.meta.url, { time: 1, ...opt || {} });
112
- const CONTENT_IS_REQUIRED = 'Content is required.';
113
113
  const assertContent = content => assert(content.length, CONTENT_IS_REQUIRED);
114
114
  const countToolCalls = r => r?.split('\n').filter(x => x === TOOLS_STR).length;
115
115
  const assertApiKey = (p, o) => assert(o?.apiKey, `${p} api key is required.`);
@@ -515,6 +515,7 @@ const init = async (options = {}) => {
515
515
  ais[id] = {
516
516
  id, provider, model, client: await OpenAI(options),
517
517
  prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
518
+ embedding: async (i, o) => await createOpenAIEmbedding(id, i, o),
518
519
  };
519
520
  break;
520
521
  case AZURE_OPENAI:
@@ -544,6 +545,7 @@ const init = async (options = {}) => {
544
545
  id, provider, model,
545
546
  client: new GoogleGenerativeAI(options.apiKey),
546
547
  prompt: async (cnt, opts) => await promptGemini(id, cnt, opts),
548
+ embedding: async (i, o) => await createGeminiEmbedding(id, i, o),
547
549
  };
548
550
  break;
549
551
  case ANTHROPIC:
@@ -700,10 +702,9 @@ const buildGeminiHistory = (text, options) => buildGeminiMessage(
700
702
  text, { ...options || {}, history: true }
701
703
  );
702
704
 
703
- const [getOpenAIClient, getGeminiClient, getClaudeClient]
704
- = [OPENAI, GEMINI, CLAUDE].map(
705
- x => async options => await init({ ...provider(x), ...options })
706
- );
705
+ const [getOpenAIClient] = [OPENAI].map(
706
+ x => async options => await init({ ...provider(x), ...options })
707
+ );
707
708
 
708
709
  const listOpenAIModels = async (options) => {
709
710
  const { client } = await getOpenAIClient(options);
@@ -1256,7 +1257,7 @@ const checkEmbeddingInput = async (input, model) => {
1256
1257
  return getInput();
1257
1258
  };
1258
1259
 
1259
- const createOpenAIEmbedding = async (input, options) => {
1260
+ const createOpenAIEmbedding = async (aiId, input, options) => {
1260
1261
  // args from vertex embedding may be useful uere
1261
1262
  // https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings
1262
1263
  // task_type Description
@@ -1265,7 +1266,7 @@ const createOpenAIEmbedding = async (input, options) => {
1265
1266
  // SEMANTIC_SIMILARITY Specifies the given text will be used for Semantic Textual Similarity(STS).
1266
1267
  // CLASSIFICATION Specifies that the embeddings will be used for classification.
1267
1268
  // CLUSTERING Specifies that the embeddings will be used for clustering.
1268
- const { client } = await getOpenAIClient(options);
1269
+ const { client } = await getAi(aiId);
1269
1270
  const model = options?.model || DEFAULT_MODELS[OPENAI_EMBEDDING];
1270
1271
  const resp = await client.embeddings.create({
1271
1272
  model, input: await checkEmbeddingInput(input, model),
@@ -1273,8 +1274,8 @@ const createOpenAIEmbedding = async (input, options) => {
1273
1274
  return options?.raw ? resp : resp?.data[0].embedding;
1274
1275
  };
1275
1276
 
1276
- const createGeminiEmbedding = async (input, options) => {
1277
- const { client } = await getGeminiClient(options);
1277
+ const createGeminiEmbedding = async (aiId, input, options) => {
1278
+ const { client } = await getAi(aiId);
1278
1279
  const model = options?.model || DEFAULT_MODELS[GEMINI_EMEDDING];
1279
1280
  const resp = await client.getGenerativeModel({ model }).embedContent(
1280
1281
  await checkEmbeddingInput(input, model)
@@ -1283,8 +1284,9 @@ const createGeminiEmbedding = async (input, options) => {
1283
1284
  };
1284
1285
 
1285
1286
  const buildGptTrainingCase = (prompt, response, options) => messages([
1286
- ...options?.systemPrompt ? [buildGptMessage(options.systemPrompt, { role: system })] : [],
1287
- buildGptMessage(prompt),
1287
+ ...options?.systemPrompt ? [
1288
+ buildGptMessage(options.systemPrompt, { role: system })
1289
+ ] : [], buildGptMessage(prompt),
1288
1290
  buildGptMessage(response, { role: assistant }),
1289
1291
  ]);
1290
1292
 
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.62",
4
+ "version": "1998.2.64",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1998.2.62",
4
+ "version": "1998.2.64",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",