utilitas 1999.1.17 → 1999.1.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/alan.mjs CHANGED
@@ -54,49 +54,46 @@ const [
54
54
  ];
55
55
 
56
56
  const [
57
- OPENAI, GEMINI, OPENAI_EMBEDDING, GEMINI_EMEDDING, OPENAI_TRAINING, OLLAMA,
58
- GPT_4O_MINI, GPT_4O, GPT_O1, GPT_O3_MINI, GEMINI_20_FLASH,
59
- GEMINI_20_FLASH_THINKING, GEMINI_20_PRO, NOVA, DEEPSEEK_R1, MD_CODE,
60
- TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE, CLOUD_37_SONNET, AUDIO, WAV,
61
- ATTACHMENTS, CHAT, OPENAI_VOICE, MEDIUM, LOW, HIGH, GPT_REASONING_EFFORT,
62
- THINK, THINK_STR, THINK_END, AZURE, TOOLS_STR, TOOLS_END, TOOLS, TEXT,
63
- THINKING, OK, FUNC, GPT_45, REDACTED_THINKING, GEMMA_3_27B, AZURE_OPENAI,
64
- ANTHROPIC, VERTEX_ANTHROPIC, GEMMA327B, v8k, ais, MAX_TOOL_RECURSION, LOG,
65
- name, user, system, assistant, MODEL, JSON_OBJECT, TOOL, silent,
66
- GEMINI_EMBEDDING_M, INVALID_FILE, tokenSafeRatio, GPT_QUERY_LIMIT,
67
- CONTENT_IS_REQUIRED, OPENAI_HI_RES_SIZE, k, kT, m, minute, hour,
68
- gb, trimTailing, EBD, GEMINI_20_FLASH_EXP, IMAGE, JINA_DEEPSEARCH,
69
- JINA_DEEPSEARCH_M, JINA_EMBEDDING, JINA_CLIP,
57
+ OPENAI, GEMINI, OPENAI_TRAINING, OLLAMA, GPT_4O_MINI, GPT_4O, GPT_O1,
58
+ GPT_O3_MINI, GEMINI_20_FLASH, GEMINI_20_FLASH_THINKING, GEMINI_20_PRO, NOVA,
59
+ DEEPSEEK_R1, MD_CODE, TEXT_EMBEDDING_3_SMALL, TEXT_EMBEDDING_3_LARGE,
60
+ CLOUD_37_SONNET, AUDIO, WAV, ATTACHMENTS, CHAT, OPENAI_VOICE, MEDIUM, LOW,
61
+ HIGH, GPT_REASONING_EFFORT, THINK, THINK_STR, THINK_END, AZURE, TOOLS_STR,
62
+ TOOLS_END, TOOLS, TEXT, THINKING, OK, FUNC, GPT_45, REDACTED_THINKING,
63
+ GEMMA_3_27B, AZURE_OPENAI, ANTHROPIC, VERTEX_ANTHROPIC, GEMMA327B, v8k, ais,
64
+ MAX_TOOL_RECURSION, LOG, name, user, system, assistant, MODEL, JSON_OBJECT,
65
+ TOOL, silent, GEMINI_EMBEDDING_M, INVALID_FILE, tokenSafeRatio,
66
+ GPT_QUERY_LIMIT, CONTENT_IS_REQUIRED, OPENAI_HI_RES_SIZE, k, kT, m, minute,
67
+ hour, gb, trimTailing, EBD, GEMINI_20_FLASH_EXP, IMAGE, JINA,
68
+ JINA_DEEPSEARCH, JINA_CLIP,
70
69
  ] = [
71
- 'OpenAI', 'Gemini', 'OPENAI_EMBEDDING', 'GEMINI_EMEDDING',
72
- 'OPENAI_TRAINING', 'Ollama', 'gpt-4o-mini', 'gpt-4o', 'o1', 'o3-mini',
73
- 'gemini-2.0-flash', 'gemini-2.0-flash-thinking-exp',
74
- 'gemini-2.0-pro-exp', 'nova', 'deepseek-r1', '```',
75
- 'text-embedding-3-small', 'text-embedding-3-large',
76
- 'claude-3-7-sonnet@20250219', 'audio', 'wav', '[ATTACHMENTS]', 'CHAT',
77
- 'OPENAI_VOICE', 'medium', 'low', 'high', 'medium', 'think', '<think>',
78
- '</think>', 'AZURE', '<tools>', '</tools>', 'tools', 'text', 'thinking',
79
- 'OK', 'function', 'gpt-4.5-preview', 'redacted_thinking',
80
- 'gemma-3-27b-it', 'Azure Openai', 'Anthropic', 'Vertex Anthropic',
81
- 'gemma3:27b', 7680 * 4320, [], 10, { log: true }, 'Alan', 'user',
82
- 'system', 'assistant', 'model', 'json_object', 'tool', true,
70
+ 'OpenAI', 'Gemini', 'OPENAI_TRAINING', 'Ollama', 'gpt-4o-mini',
71
+ 'gpt-4o', 'o1', 'o3-mini', 'gemini-2.0-flash',
72
+ 'gemini-2.0-flash-thinking-exp', 'gemini-2.0-pro-exp', 'nova',
73
+ 'deepseek-r1', '```', 'text-embedding-3-small',
74
+ 'text-embedding-3-large', 'claude-3-7-sonnet@20250219', 'audio', 'wav',
75
+ '[ATTACHMENTS]', 'CHAT', 'OPENAI_VOICE', 'medium', 'low', 'high',
76
+ 'medium', 'think', '<think>', '</think>', 'AZURE', '<tools>',
77
+ '</tools>', 'tools', 'text', 'thinking', 'OK', 'function',
78
+ 'gpt-4.5-preview', 'redacted_thinking', 'gemma-3-27b-it',
79
+ 'Azure Openai', 'Anthropic', 'Vertex Anthropic', 'gemma3:27b',
80
+ 7680 * 4320, [], 10, { log: true }, 'Alan', 'user', 'system',
81
+ 'assistant', 'model', 'json_object', 'tool', true,
83
82
  'gemini-embedding-exp-03-07', 'Invalid file data.', 1.1, 100,
84
83
  'Content is required.', 2000 * 768, x => 1024 * x, x => 1000 * x,
85
84
  x => 1024 * 1024 * x, x => 60 * x, x => 60 * 60 * x,
86
85
  x => 1024 * 1024 * 1024 * x, x => x.replace(/[\.\s]*$/, ''),
87
- { embedding: true }, 'gemini-2.0-flash-exp', 'image', 'Jina Deepsearch',
88
- 'jina-deepsearch-v1', 'JINA_EMBEDDING', 'jina-clip-v2',
86
+ { embedding: true }, 'gemini-2.0-flash-exp', 'image', 'Jina',
87
+ 'jina-deepsearch-v1', 'jina-clip-v2',
89
88
  ];
90
89
 
91
90
  const [tool, messages, text]
92
91
  = [type => ({ type }), messages => ({ messages }), text => ({ text })];
93
92
  const [CODE_INTERPRETER, RETRIEVAL, FUNCTION]
94
93
  = ['code_interpreter', 'retrieval', FUNC].map(tool);
95
- const [sessionType, aiType]
96
- = [`${name.toUpperCase()}-SESSION`, `${name.toUpperCase()}-AI`];
97
- const [newSessionId, newAiId]
98
- = [sessionType, aiType].map(type => () => createUoid({ type }));
99
94
  const _NO_RENDER = ['INSTRUCTIONS', 'MODELS', 'DEFAULT_MODELS'];
95
+ const sessionType = `${name.toUpperCase()}-SESSION`;
96
+ const newSessionId = () => createUoid({ type: sessionType });
100
97
  const chatConfig = { sessions: new Map(), systemPrompt: INSTRUCTIONS };
101
98
  const tokenSafe = count => Math.ceil(count * tokenSafeRatio);
102
99
  const renderText = (t, o) => _renderText(t, { extraCodeBlock: 0, ...o || {} });
@@ -117,7 +114,7 @@ const OPENAI_RULES = {
117
114
  imageCostTokens: ~~(OPENAI_HI_RES_SIZE / (512 * 512) * 170 + 85),
118
115
  maxFileSize: m(20), maxImageSize: OPENAI_HI_RES_SIZE,
119
116
  supportedMimeTypes: [png, jpeg, gif, webp],
120
- json: true, tools: true, vision: true,
117
+ json: true, tools: true, vision: true, defaultProvider: OPENAI,
121
118
  };
122
119
 
123
120
  const GEMINI_RULES = {
@@ -128,7 +125,7 @@ const GEMINI_RULES = {
128
125
  maxVideoPerPrompt: 10, vision: true, supportedMimeTypes: [
129
126
  png, jpeg, mov, mpeg, mp4, mpg, avi, wmv, mpegps, flv, pdf, aac,
130
127
  flac, mp3, m4a, mpga, opus, pcm, wav, webm, tgpp,
131
- ],
128
+ ], defaultProvider: GEMINI,
132
129
  };
133
130
 
134
131
  // https://platform.openai.com/docs/models
@@ -164,13 +161,13 @@ const MODELS = {
164
161
  contextWindow: kT(128), maxOutputTokens: k(8),
165
162
  imageCostTokens: 256, maxImageSize: 896 * 896,
166
163
  supportedMimeTypes: [png, jpeg, gif],
167
- fast: true, json: true, vision: true,
164
+ fast: true, json: true, vision: true, defaultProvider: GEMINI,
168
165
  },
169
- [JINA_DEEPSEARCH_M]: {
166
+ [JINA_DEEPSEARCH]: {
170
167
  contextWindow: Infinity, maxInputTokens: Infinity,
171
168
  maxOutputTokens: Infinity, imageCostTokens: 0, maxImageSize: Infinity,
172
169
  supportedMimeTypes: [png, jpeg, MIME_TEXT, webp, pdf],
173
- reasoning: true, json: true, vision: true,
170
+ reasoning: true, json: true, vision: true, defaultProvider: JINA,
174
171
  },
175
172
  [DEEPSEEK_R1]: {
176
173
  contextWindow: kT(128), maxOutputTokens: k(32),
@@ -189,6 +186,7 @@ const MODELS = {
189
186
  maxImagePerPrompt: 100, maxImageSize: 2000 * 2000,
190
187
  supportedMimeTypes: [png, jpeg, gif, webp, pdf],
191
188
  json: true, reasoning: true, tools: true, vision: true,
189
+ defaultProvider: ANTHROPIC,
192
190
  }, // https://docs.anthropic.com/en/docs/build-with-claude/vision
193
191
  };
194
192
 
@@ -207,12 +205,15 @@ for (const n in MODELS) {
207
205
  ) : MODELS[n].imageCostTokens;
208
206
  }
209
207
  }
210
- MODELS[GEMMA327B] = MODELS[GEMMA_3_27B]; // Ollama Alias
208
+
211
209
  MODELS[GEMINI_20_FLASH].image = GEMINI_20_FLASH_EXP;
212
210
  MODELS[GEMINI_20_FLASH_EXP] = {
213
211
  ...MODELS[GEMINI_20_FLASH],
214
212
  name: GEMINI_20_FLASH_EXP, image: true, tools: false,
215
213
  };
214
+ MODELS[GEMMA327B] = { // Ollama Alias
215
+ ...MODELS[GEMMA_3_27B], name: GEMMA327B, defaultProvider: OLLAMA
216
+ };
216
217
 
217
218
  // Default models for each provider
218
219
  const DEFAULT_MODELS = {
@@ -221,15 +222,17 @@ const DEFAULT_MODELS = {
221
222
  [GEMINI]: GEMINI_20_FLASH,
222
223
  [ANTHROPIC]: CLOUD_37_SONNET,
223
224
  [VERTEX_ANTHROPIC]: CLOUD_37_SONNET,
224
- [JINA_DEEPSEARCH]: JINA_DEEPSEARCH_M,
225
+ [JINA]: JINA_DEEPSEARCH,
225
226
  [OLLAMA]: GEMMA327B,
226
227
  [OPENAI_VOICE]: NOVA,
227
- [OPENAI_EMBEDDING]: TEXT_EMBEDDING_3_SMALL,
228
- [GEMINI_EMEDDING]: GEMINI_EMBEDDING_M,
229
- [JINA_EMBEDDING]: JINA_CLIP,
230
228
  [OPENAI_TRAINING]: GPT_4O_MINI, // https://platform.openai.com/docs/guides/fine-tuning
231
229
  };
232
- DEFAULT_MODELS[CHAT] = DEFAULT_MODELS[GEMINI];
230
+
231
+ const DEFAULT_EMBEDDING = {
232
+ [OPENAI]: TEXT_EMBEDDING_3_SMALL,
233
+ [GEMINI]: GEMINI_EMBEDDING_M,
234
+ [JINA]: JINA_CLIP,
235
+ };
233
236
 
234
237
  const tokenRatioByWords = Math.min(
235
238
  100 / 75, // ChatGPT: https://platform.openai.com/tokenizer
@@ -247,7 +250,7 @@ let tokeniser;
247
250
  const unifyProvider = provider => {
248
251
  assert(provider = (provider || '').trim(), 'AI provider is required.');
249
252
  for (let type of [OPENAI, AZURE_OPENAI, AZURE, GEMINI, ANTHROPIC,
250
- VERTEX_ANTHROPIC, JINA_DEEPSEARCH, OLLAMA]) {
253
+ VERTEX_ANTHROPIC, JINA, OLLAMA]) {
251
254
  if (insensitiveCompare(provider, type)) { return type; }
252
255
  }
253
256
  throwError(`Invalid AI provider: ${provider}.`);
@@ -347,112 +350,139 @@ const toolsGemini = async () => (await toolsOpenAI()).map(x => ({
347
350
  }
348
351
  }));
349
352
 
353
+ const buildAiId = (provider, model) => [provider, model].map(
354
+ x => ensureString(x, { case: 'SNAKE' })
355
+ ).join('_');
356
+
357
+ const setupAi = ai => {
358
+ const id = buildAiId(ai.provider, ai.model.name);
359
+ ais.push({
360
+ id, initOrder: ais.length,
361
+ priority: DEFAULT_MODELS[ai.provider] === ai.model.name ? -1 : 0,
362
+ modelEmbedding: MODELS[DEFAULT_EMBEDDING[ai.provider]], ...ai,
363
+ prompt: ai.prompt && (async (c, o) => await ai.prompt(id, c, o)),
364
+ embedding: ai.embedding && (async (c, o) => await ai.embedding(id, c, o)),
365
+ });
366
+ };
367
+
350
368
  const init = async (options = {}) => {
351
- const id = options.id || newAiId();
352
369
  const provider = unifyProvider(options?.provider);
353
- const modelName = options.model || DEFAULT_MODELS[provider];
354
- assert(modelName, `Model is required for provider: ${provider}.`);
355
- let model = options.modelConfig || MODELS[modelName];
356
- assert(model, `The model has not been configured yet: ${modelName}.`);
357
- model = { name: modelName, ...model };
370
+ let models;
371
+ if (options.model === '*') { // All models
372
+ models = Object.values(MODELS).filter(x => x.defaultProvider === provider);
373
+ } else if (options.model) { // Specific model
374
+ models = Object.values(MODELS).filter(x => ensureArray(options.model).includes(x.name));
375
+ } else if (DEFAULT_MODELS[provider]) { // Default model
376
+ models = [MODELS[DEFAULT_MODELS[provider]]];
377
+ } else if (options.modelConfig) {
378
+ models = ensureArray(options.modelConfig);
379
+ }
380
+ assert(models.length,
381
+ `Model name or description is required for provider: ${provider}.`);
358
382
  switch (provider) {
359
383
  case OPENAI:
360
384
  assertApiKey(provider, options);
361
- ais.push({
362
- id, provider, model, priority: 0, initOrder: ais.length,
363
- client: await OpenAI(options),
364
- prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
365
- embedding: async (i, o) => await createOpenAIEmbedding(id, i, o),
366
- });
385
+ var client = await OpenAI(options);
386
+ for (let model of models) {
387
+ setupAi({
388
+ provider, model, client,
389
+ prompt: promptOpenAI, embedding: createOpenAIEmbedding,
390
+ });
391
+ }
367
392
  break;
368
393
  case AZURE_OPENAI:
369
394
  assertApiKey(provider, options);
370
395
  assert(options.endpoint,
371
396
  `${provider} api endpoint and deployment are required.`);
372
- ais.push({
373
- id, provider, model, priority: 0, initOrder: ais.length,
374
- client: await AzureOpenAI({
375
- apiVersion: '2025-01-01-preview',
376
- deployment: model.name, ...options,
377
- }),
378
- prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
397
+ var model = models[0];
398
+ var client = await AzureOpenAI({
399
+ apiVersion: '2025-01-01-preview',
400
+ deployment: model.name, ...options,
379
401
  });
402
+ setupAi({ provider, model, client, prompt: promptOpenAI });
380
403
  break;
381
404
  case AZURE:
382
405
  assertApiKey(provider, options);
383
406
  assert(options.baseURL, `${provider} api endpoint is required.`);
384
- ais.push({
385
- id, provider, model, priority: 0, initOrder: ais.length,
386
- client: await OpenAI(options),
387
- prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
388
- });
407
+ var model = models[0];
408
+ var client = await OpenAI(options);
409
+ setupAi({ provider, model, client, prompt: promptOpenAI });
389
410
  break;
390
411
  case GEMINI:
391
412
  assertApiKey(provider, options);
392
413
  const { GoogleGenerativeAI } = await need('@google/generative-ai');
393
- ais.push({
394
- id, provider, model, priority: 0, initOrder: ais.length,
395
- client: new GoogleGenerativeAI(options.apiKey),
396
- prompt: async (cnt, opts) => await promptGemini(id, cnt, opts),
397
- embedding: async (i, o) => await createGeminiEmbedding(id, i, o),
398
- });
414
+ var client = new GoogleGenerativeAI(options.apiKey);
415
+ for (let model of models) {
416
+ setupAi({
417
+ provider, model, client,
418
+ prompt: promptGemini, embedding: createGeminiEmbedding,
419
+ });
420
+ }
399
421
  break;
400
422
  case ANTHROPIC:
401
423
  assertApiKey(provider, options);
402
- const Anthropic = (await need('@anthropic-ai/sdk')).Anthropic;
403
- ais.push({
404
- id, provider, model, priority: 0, initOrder: ais.length,
405
- client: new Anthropic(options),
406
- prompt: async (cnt, opts) => await promptAnthropic(id, cnt, opts),
407
- });
424
+ var client = new ((
425
+ await need('@anthropic-ai/sdk')
426
+ ).Anthropic)(options)
427
+ for (let model of models) {
428
+ setupAi({ provider, model, client, prompt: promptAnthropic });
429
+ }
408
430
  break;
409
431
  case VERTEX_ANTHROPIC:
410
432
  // https://github.com/anthropics/anthropic-sdk-typescript/tree/main/packages/vertex-sdk
411
433
  assert(options?.credentials, `${provider} api credentials are required.`);
412
- const AnthropicVertex = (await need('@anthropic-ai/vertex-sdk')).AnthropicVertex;
413
434
  process.env['GOOGLE_APPLICATION_CREDENTIALS'] = options.credentials;
414
435
  process.env['ANTHROPIC_VERTEX_PROJECT_ID'] = options.projectId;
415
- ais.push({
416
- id, provider, model, priority: 0, initOrder: ais.length,
417
- client: new AnthropicVertex({ region: options?.region || 'us-east5' }),
418
- prompt: async (cnt, opts) => await promptAnthropic(id, cnt, opts),
419
- });
436
+ var model = models[0];
437
+ var client = new ((
438
+ await need('@anthropic-ai/vertex-sdk')
439
+ ).AnthropicVertex)({ region: options?.region || 'us-east5' });
440
+ setupAi({ provider, model, client, prompt: promptAnthropic });
420
441
  break;
421
- case JINA_DEEPSEARCH:
442
+ case JINA:
422
443
  assertApiKey(provider, options);
423
- ais.push({
424
- id, provider, model, priority: 0, initOrder: ais.length,
425
- client: await OpenAI({
426
- baseURL: 'https://deepsearch.jina.ai/v1/', ...options,
427
- }),
428
- prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
429
- embedding: async (i, o) => await createJinaEmbedding(await OpenAI({
430
- baseURL: 'https://api.jina.ai/v1/', ...options,
431
- }), i, o),
432
- });
444
+ var [client, clientEmbedding] = [await OpenAI({
445
+ baseURL: 'https://deepsearch.jina.ai/v1/', ...options,
446
+ }), await OpenAI({
447
+ baseURL: 'https://api.jina.ai/v1/', ...options,
448
+ })];
449
+ for (let model of models) {
450
+ setupAi({
451
+ provider, model, client, clientEmbedding,
452
+ prompt: promptOpenAI, embedding: createOpenAIEmbedding,
453
+ });
454
+ }
433
455
  break;
434
456
  case OLLAMA:
435
457
  // https://github.com/ollama/ollama/blob/main/docs/openai.md
436
458
  const baseURL = 'http://localhost:11434/v1/';
437
- ais.push({
438
- id, provider, model, priority: 0, initOrder: ais.length,
439
- client: await OpenAI({ baseURL, apiKey: 'ollama', ...options }),
440
- prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
441
- });
442
459
  const phLog = m => log(`Ollama preheat: ${m?.message || m}`);
443
- ignoreErrFunc(async () => {
444
- phLog(await (await fetch(`${baseURL}completions`, {
445
- method: 'POST', body: JSON.stringify({
446
- model: model.name, prompt: '', keep_alive: -1
447
- })
448
- })).text());
449
- }, { log: phLog });
460
+ var client = await OpenAI({ baseURL, apiKey: 'ollama', ...options });
461
+ for (let model of models) {
462
+ setupAi({ provider, model, client, prompt: promptOpenAI });
463
+ ignoreErrFunc(async () => {
464
+ phLog(await (await fetch(`${baseURL}completions`, {
465
+ method: 'POST', body: JSON.stringify({
466
+ model: model.name, prompt: '', keep_alive: -1
467
+ })
468
+ })).text());
469
+ }, { log: phLog });
470
+ }
450
471
  break;
451
472
  default:
452
473
  throwError(`Invalid AI provider: ${options.provider || 'null'}.`);
453
474
  }
454
475
  ais.sort((a, b) => a.priority - b.priority || a.initOrder - b.initOrder);
455
- return ais.find(x => x.id === id);
476
+ return ais;
477
+ };
478
+
479
+ const packAi = (ais, options = {}) => {
480
+ const res = options.basic ? ais.map(x => ({
481
+ id: x.id, initOrder: x.initOrder, priority: x.priority,
482
+ provider: x.provider, model: x.model, modelEmbedding: x.modelEmbedding,
483
+ prompt: !!x.prompt, embedding: !!x.embedding,
484
+ })) : ais;
485
+ return options.all ? res : res[0];
456
486
  };
457
487
 
458
488
  const getAi = async (id, options = {}) => {
@@ -472,12 +502,12 @@ const getAi = async (id, options = {}) => {
472
502
  select && (res.push(x));
473
503
  }
474
504
  const best = options.select?.fast ? res.filter(x => x.model.fast) : res;
475
- if (best.length) { return options.all ? best : best[0]; }
505
+ if (best.length) { return packAi(best, options); }
476
506
  assert(res.length, 'AI not found.');
477
507
  log(`Best match AI not found, fallbacked: ${JSON.stringify(options.select)}.`);
478
- return options.all ? res : res[0];
508
+ return packAi(res, options);
479
509
  }
480
- const result = options.all ? ais : ais[0];
510
+ const result = packAi(ais, options);
481
511
  assert(result?.length || result?.id, 'AI not found.');
482
512
  return result;
483
513
  };
@@ -1173,7 +1203,7 @@ const checkEmbeddingInput = async (input, model) => {
1173
1203
  return getInput();
1174
1204
  };
1175
1205
 
1176
- const createOpenAIEmbedding = async (client, input, options) => {
1206
+ const createOpenAIEmbedding = async (aiId, input, options) => {
1177
1207
  // args from vertex embedding may be useful uere
1178
1208
  // https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings
1179
1209
  // task_type Description
@@ -1182,23 +1212,20 @@ const createOpenAIEmbedding = async (client, input, options) => {
1182
1212
  // SEMANTIC_SIMILARITY Specifies the given text will be used for Semantic Textual Similarity(STS).
1183
1213
  // CLASSIFICATION Specifies that the embeddings will be used for classification.
1184
1214
  // CLUSTERING Specifies that the embeddings will be used for clustering.
1185
- String.isString(client) && (client = (await getAi(client)).client);
1186
- const model = options?.model || DEFAULT_MODELS[OPENAI_EMBEDDING];
1187
- const resp = await client.embeddings.create({
1215
+ let { client, modelEmbedding, clientEmbedding } = await getAi(aiId);
1216
+ const model = options?.model || modelEmbedding.name;
1217
+ const resp = await (clientEmbedding || client).embeddings.create({
1188
1218
  model, input: await checkEmbeddingInput(input, model),
1189
1219
  });
1190
1220
  return options?.raw ? resp : resp?.data[0].embedding;
1191
1221
  };
1192
1222
 
1193
- const createJinaEmbedding = async (client, input, options) =>
1194
- await createOpenAIEmbedding(client, input, {
1195
- model: DEFAULT_MODELS[JINA_EMBEDDING], ...options || {}
1196
- });
1197
-
1198
1223
  const createGeminiEmbedding = async (aiId, input, options) => {
1199
- const { client } = await getAi(aiId);
1200
- const model = options?.model || DEFAULT_MODELS[GEMINI_EMEDDING];
1201
- const resp = await client.getGenerativeModel({ model }).embedContent(
1224
+ const { client, modelEmbedding, clientEmbedding } = await getAi(aiId);
1225
+ const model = options?.model || modelEmbedding.name;
1226
+ const resp = await (
1227
+ clientEmbedding || client
1228
+ ).getGenerativeModel({ model }).embedContent(
1202
1229
  await checkEmbeddingInput(input, model)
1203
1230
  );
1204
1231
  return options?.raw ? resp : resp?.embedding.values;
package/lib/manifest.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  const manifest = {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1999.1.17",
4
+ "version": "1999.1.19",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "utilitas",
3
3
  "description": "Just another common utility for JavaScript.",
4
- "version": "1999.1.17",
4
+ "version": "1999.1.19",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/Leask/utilitas",
7
7
  "main": "index.mjs",