utilitas 1998.2.63 → 1998.2.65
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -13
- package/dist/utilitas.lite.mjs +1 -1
- package/dist/utilitas.lite.mjs.map +1 -1
- package/lib/alan.mjs +29 -31
- package/lib/manifest.mjs +1 -1
- package/package.json +1 -1
package/lib/alan.mjs
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { fileTypeFromBuffer } from 'file-type';
|
|
2
2
|
import { end, loop } from './event.mjs';
|
|
3
3
|
import { createWavHeader } from './media.mjs';
|
|
4
|
-
import { checkSearch, search } from './shot.mjs';
|
|
4
|
+
import get, { checkSearch, search } from './shot.mjs';
|
|
5
5
|
import { BASE64, BUFFER, DATAURL, MIME_BINARY, STREAM, convert } from './storage.mjs';
|
|
6
6
|
import { create as createUoid } from './uoid.mjs';
|
|
7
7
|
import { distill } from './web.mjs';
|
|
@@ -515,6 +515,7 @@ const init = async (options = {}) => {
|
|
|
515
515
|
ais[id] = {
|
|
516
516
|
id, provider, model, client: await OpenAI(options),
|
|
517
517
|
prompt: async (cnt, opts) => await promptOpenAI(id, cnt, opts),
|
|
518
|
+
embedding: async (i, o) => await createOpenAIEmbedding(id, i, o),
|
|
518
519
|
};
|
|
519
520
|
break;
|
|
520
521
|
case AZURE_OPENAI:
|
|
@@ -544,6 +545,7 @@ const init = async (options = {}) => {
|
|
|
544
545
|
id, provider, model,
|
|
545
546
|
client: new GoogleGenerativeAI(options.apiKey),
|
|
546
547
|
prompt: async (cnt, opts) => await promptGemini(id, cnt, opts),
|
|
548
|
+
embedding: async (i, o) => await createGeminiEmbedding(id, i, o),
|
|
547
549
|
};
|
|
548
550
|
break;
|
|
549
551
|
case ANTHROPIC:
|
|
@@ -700,12 +702,8 @@ const buildGeminiHistory = (text, options) => buildGeminiMessage(
|
|
|
700
702
|
text, { ...options || {}, history: true }
|
|
701
703
|
);
|
|
702
704
|
|
|
703
|
-
const
|
|
704
|
-
|
|
705
|
-
);
|
|
706
|
-
|
|
707
|
-
const listOpenAIModels = async (options) => {
|
|
708
|
-
const { client } = await getOpenAIClient(options);
|
|
705
|
+
const listOpenAIModels = async (aiId, options) => {
|
|
706
|
+
const { client } = await getAi(aiId);
|
|
709
707
|
const resp = await client.models.list();
|
|
710
708
|
return options?.raw ? resp : resp.data;
|
|
711
709
|
};
|
|
@@ -1137,8 +1135,8 @@ const promptAnthropic = async (aiId, content, options = {}) => {
|
|
|
1137
1135
|
return packResp({ text: mergeMsgs(toolsResponse, tool_use) }, options);
|
|
1138
1136
|
};
|
|
1139
1137
|
|
|
1140
|
-
const uploadFile = async (input, options) => {
|
|
1141
|
-
const { client } = await
|
|
1138
|
+
const uploadFile = async (aiId, input, options) => {
|
|
1139
|
+
const { client } = await getAi(aiId);
|
|
1142
1140
|
const { content: file, cleanup } = await convert(input, {
|
|
1143
1141
|
input: options?.input, ...options || {}, expected: STREAM,
|
|
1144
1142
|
errorMessage: INVALID_FILE, suffix: options?.suffix,
|
|
@@ -1149,20 +1147,20 @@ const uploadFile = async (input, options) => {
|
|
|
1149
1147
|
return resp;
|
|
1150
1148
|
};
|
|
1151
1149
|
|
|
1152
|
-
const uploadFileForFineTuning = async (content, options) => await uploadFile(
|
|
1153
|
-
content, { suffix: 'jsonl', ...options, params: { purpose: 'fine-tune' } }
|
|
1150
|
+
const uploadFileForFineTuning = async (aiId, content, options) => await uploadFile(
|
|
1151
|
+
aiId, content, { suffix: 'jsonl', ...options, params: { purpose: 'fine-tune' } }
|
|
1154
1152
|
);
|
|
1155
1153
|
|
|
1156
|
-
const listFiles = async (options) => {
|
|
1157
|
-
const { client } = await
|
|
1154
|
+
const listFiles = async (aiId, options) => {
|
|
1155
|
+
const { client } = await getAi(aiId);
|
|
1158
1156
|
const files = [];
|
|
1159
1157
|
const list = await client.files.list(options?.params || {});
|
|
1160
1158
|
for await (const file of list) { files.push(file); }
|
|
1161
1159
|
return files;
|
|
1162
1160
|
};
|
|
1163
1161
|
|
|
1164
|
-
const deleteFile = async (file_id, options) => {
|
|
1165
|
-
const { client } = await
|
|
1162
|
+
const deleteFile = async (aiId, file_id, options) => {
|
|
1163
|
+
const { client } = await getAi(aiId);
|
|
1166
1164
|
return await client.files.del(file_id);
|
|
1167
1165
|
};
|
|
1168
1166
|
|
|
@@ -1255,7 +1253,7 @@ const checkEmbeddingInput = async (input, model) => {
|
|
|
1255
1253
|
return getInput();
|
|
1256
1254
|
};
|
|
1257
1255
|
|
|
1258
|
-
const createOpenAIEmbedding = async (input, options) => {
|
|
1256
|
+
const createOpenAIEmbedding = async (aiId, input, options) => {
|
|
1259
1257
|
// args from vertex embedding may be useful uere
|
|
1260
1258
|
// https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings
|
|
1261
1259
|
// task_type Description
|
|
@@ -1264,7 +1262,7 @@ const createOpenAIEmbedding = async (input, options) => {
|
|
|
1264
1262
|
// SEMANTIC_SIMILARITY Specifies the given text will be used for Semantic Textual Similarity(STS).
|
|
1265
1263
|
// CLASSIFICATION Specifies that the embeddings will be used for classification.
|
|
1266
1264
|
// CLUSTERING Specifies that the embeddings will be used for clustering.
|
|
1267
|
-
const { client } = await
|
|
1265
|
+
const { client } = await getAi(aiId);
|
|
1268
1266
|
const model = options?.model || DEFAULT_MODELS[OPENAI_EMBEDDING];
|
|
1269
1267
|
const resp = await client.embeddings.create({
|
|
1270
1268
|
model, input: await checkEmbeddingInput(input, model),
|
|
@@ -1272,8 +1270,8 @@ const createOpenAIEmbedding = async (input, options) => {
|
|
|
1272
1270
|
return options?.raw ? resp : resp?.data[0].embedding;
|
|
1273
1271
|
};
|
|
1274
1272
|
|
|
1275
|
-
const createGeminiEmbedding = async (input, options) => {
|
|
1276
|
-
const { client } = await
|
|
1273
|
+
const createGeminiEmbedding = async (aiId, input, options) => {
|
|
1274
|
+
const { client } = await getAi(aiId);
|
|
1277
1275
|
const model = options?.model || DEFAULT_MODELS[GEMINI_EMEDDING];
|
|
1278
1276
|
const resp = await client.getGenerativeModel({ model }).embedContent(
|
|
1279
1277
|
await checkEmbeddingInput(input, model)
|
|
@@ -1292,48 +1290,48 @@ const buildGptTrainingCases = (cases, opts) => cases.map(x => JSON.stringify(
|
|
|
1292
1290
|
buildGptTrainingCase(x.prompt, x.response, { ...x.options, ...opts })
|
|
1293
1291
|
)).join('\n');
|
|
1294
1292
|
|
|
1295
|
-
const createGptFineTuningJob = async (training_file, options) => {
|
|
1296
|
-
const { client } = await
|
|
1293
|
+
const createGptFineTuningJob = async (aiId, training_file, options) => {
|
|
1294
|
+
const { client } = await getAi(aiId);
|
|
1297
1295
|
return await client.fineTuning.jobs.create({
|
|
1298
1296
|
training_file, model: options?.model || DEFAULT_MODELS[OPENAI_TRAINING],
|
|
1299
1297
|
})
|
|
1300
1298
|
};
|
|
1301
1299
|
|
|
1302
|
-
const getGptFineTuningJob = async (job_id, options) => {
|
|
1303
|
-
const { client } = await
|
|
1300
|
+
const getGptFineTuningJob = async (aiId, job_id, options) => {
|
|
1301
|
+
const { client } = await getAi(aiId);
|
|
1304
1302
|
// https://platform.openai.com/finetune/[job_id]?filter=all
|
|
1305
1303
|
return await client.fineTuning.jobs.retrieve(job_id);
|
|
1306
1304
|
};
|
|
1307
1305
|
|
|
1308
|
-
const cancelGptFineTuningJob = async (job_id, options) => {
|
|
1309
|
-
const { client } = await
|
|
1306
|
+
const cancelGptFineTuningJob = async (aiId, job_id, options) => {
|
|
1307
|
+
const { client } = await getAi(aiId);
|
|
1310
1308
|
return await client.fineTuning.jobs.cancel(job_id);
|
|
1311
1309
|
};
|
|
1312
1310
|
|
|
1313
|
-
const listGptFineTuningJobs = async (options) => {
|
|
1314
|
-
const { client } = await
|
|
1311
|
+
const listGptFineTuningJobs = async (aiId, options) => {
|
|
1312
|
+
const { client } = await getAi(aiId);
|
|
1315
1313
|
const resp = await client.fineTuning.jobs.list({
|
|
1316
1314
|
limit: GPT_QUERY_LIMIT, ...options?.params
|
|
1317
1315
|
});
|
|
1318
1316
|
return options?.raw ? resp : resp.data;
|
|
1319
1317
|
};
|
|
1320
1318
|
|
|
1321
|
-
const listGptFineTuningEvents = async (job_id, options) => {
|
|
1322
|
-
const { client } = await
|
|
1319
|
+
const listGptFineTuningEvents = async (aiId, job_id, options) => {
|
|
1320
|
+
const { client } = await getAi(aiId);
|
|
1323
1321
|
const resp = await client.fineTuning.jobs.listEvents(job_id, {
|
|
1324
1322
|
limit: GPT_QUERY_LIMIT, ...options?.params,
|
|
1325
1323
|
});
|
|
1326
1324
|
return options?.raw ? resp : resp.data;
|
|
1327
1325
|
};
|
|
1328
1326
|
|
|
1329
|
-
const tailGptFineTuningEvents = async (job_id, options) => {
|
|
1327
|
+
const tailGptFineTuningEvents = async (aiId, job_id, options) => {
|
|
1330
1328
|
assert(job_id, 'Job ID is required.');
|
|
1331
1329
|
const [loopName, listOpts] = [`GPT - ${job_id} `, {
|
|
1332
1330
|
...options, params: { ...options?.params, order: 'ascending' }
|
|
1333
1331
|
}];
|
|
1334
1332
|
let lastEvent;
|
|
1335
1333
|
return await loop(async () => {
|
|
1336
|
-
const resp = await listGptFineTuningEvents(job_id, {
|
|
1334
|
+
const resp = await listGptFineTuningEvents(aiId, job_id, {
|
|
1337
1335
|
...listOpts, params: {
|
|
1338
1336
|
...listOpts?.params,
|
|
1339
1337
|
...(lastEvent ? { after: lastEvent.id } : {}),
|
package/lib/manifest.mjs
CHANGED