@promptbook/cli 0.76.0 → 0.77.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +5 -1
  2. package/esm/index.es.js +238 -31
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +10 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +6 -0
  8. package/esm/typings/src/_packages/utils.index.d.ts +2 -2
  9. package/esm/typings/src/_packages/vercel.index.d.ts +4 -0
  10. package/esm/typings/src/execution/AvailableModel.d.ts +5 -1
  11. package/esm/typings/src/execution/CommonToolsOptions.d.ts +9 -0
  12. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +12 -0
  15. package/esm/typings/src/llm-providers/google/createGoogleExecutionTools.d.ts +18 -0
  16. package/esm/typings/src/llm-providers/google/register-configuration.d.ts +13 -0
  17. package/esm/typings/src/llm-providers/google/register-constructor.d.ts +14 -0
  18. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -10
  19. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +1 -1
  20. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Prompt_Request.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -1
  22. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/vercel/VercelExecutionToolsOptions.d.ts +22 -0
  24. package/esm/typings/src/llm-providers/vercel/VercelProvider.d.ts +13 -0
  25. package/esm/typings/src/llm-providers/vercel/createExecutionToolsFromVercelProvider.d.ts +8 -0
  26. package/esm/typings/src/llm-providers/vercel/playground/playground.d.ts +6 -0
  27. package/esm/typings/src/utils/{$currentDate.d.ts → $getCurrentDate.d.ts} +1 -1
  28. package/package.json +2 -1
  29. package/umd/index.umd.js +239 -30
  30. package/umd/index.umd.js.map +1 -1
  31. package/esm/typings/src/_packages/gemini.index.d.ts +0 -2
  32. package/esm/typings/src/utils/getCurrentIsoDate.d.ts +0 -7
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/cli`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
@@ -294,7 +298,7 @@ Or you can install them separately:
294
298
  - **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
295
299
  - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
296
300
  - **[@promptbook/vercel](https://www.npmjs.com/package/@promptbook/vercel)** - Adapter for Vercel functionalities
297
- - **[@promptbook/gemini](https://www.npmjs.com/package/@promptbook/gemini)** - Integration with Google's Gemini API
301
+ - **[@promptbook/google](https://www.npmjs.com/package/@promptbook/google)** - Integration with Google's Gemini API
298
302
  - **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API
299
303
  - **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK
300
304
  - **[@promptbook/fake-llm](https://www.npmjs.com/package/@promptbook/fake-llm)** - Mocked execution tools for testing the library and saving the tokens
package/esm/index.es.js CHANGED
@@ -37,7 +37,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
37
37
  *
38
38
  * @see https://github.com/webgptorg/promptbook
39
39
  */
40
- var PROMPTBOOK_ENGINE_VERSION = '0.75.10';
40
+ var PROMPTBOOK_ENGINE_VERSION = '0.77.0-2';
41
41
  /**
42
42
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
43
43
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -5645,7 +5645,7 @@ function preparePipeline(pipeline, tools, options) {
5645
5645
  llmToolsWithUsage = countTotalUsage(llmTools);
5646
5646
  currentPreparation = {
5647
5647
  id: 1,
5648
- // TODO: [🍥]> date: $currentDate(),
5648
+ // TODO: [🍥]> date: $getCurrentDate(),
5649
5649
  promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
5650
5650
  usage: ZERO_USAGE,
5651
5651
  };
@@ -10549,7 +10549,7 @@ var MemoryStorage = /** @class */ (function () {
10549
10549
  * @returns string_date branded type
10550
10550
  * @public exported from `@promptbook/utils`
10551
10551
  */
10552
- function $currentDate() {
10552
+ function $getCurrentDate() {
10553
10553
  return new Date().toISOString();
10554
10554
  }
10555
10555
 
@@ -10624,7 +10624,7 @@ function cacheLlmTools(llmTools, options) {
10624
10624
  // TODO: [🧠] !!!!! How to do timing in mixed cache / non-cache situation
10625
10625
  // promptResult.timing: FromtoItems
10626
10626
  return [4 /*yield*/, storage.setItem(key, {
10627
- date: $currentDate(),
10627
+ date: $getCurrentDate(),
10628
10628
  promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
10629
10629
  prompt: prompt,
10630
10630
  promptResult: promptResult,
@@ -12198,15 +12198,6 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
12198
12198
  * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
12199
12199
  */
12200
12200
 
12201
- /**
12202
- * Get current date in ISO 8601 format
12203
- *
12204
- * @private internal utility
12205
- */
12206
- function getCurrentIsoDate() {
12207
- return new Date().toISOString();
12208
- }
12209
-
12210
12201
  /**
12211
12202
  * Function computeUsage will create price per one token based on the string value found on openai page
12212
12203
  *
@@ -12481,7 +12472,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
12481
12472
  ],
12482
12473
  // TODO: Is here some equivalent of user identification?> user: this.options.user,
12483
12474
  };
12484
- start = getCurrentIsoDate();
12475
+ start = $getCurrentDate();
12485
12476
  if (this.options.isVerbose) {
12486
12477
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
12487
12478
  }
@@ -12508,7 +12499,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
12508
12499
  }
12509
12500
  resultContent = contentBlock.text;
12510
12501
  // eslint-disable-next-line prefer-const
12511
- complete = getCurrentIsoDate();
12502
+ complete = $getCurrentDate();
12512
12503
  usage = computeAnthropicClaudeUsage(rawPromptContent || '', resultContent || '', rawResponse);
12513
12504
  return [2 /*return*/, $asDeeplyFrozenSerializableJson('AnthropicClaudeExecutionTools ChatPromptResult', {
12514
12505
  content: resultContent,
@@ -12557,7 +12548,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
12557
12548
  prompt: rawPromptContent,
12558
12549
  user: this.options.user,
12559
12550
  };
12560
- const start: string_date_iso8601 = getCurrentIsoDate();
12551
+ const start: string_date_iso8601 = $getCurrentDate();
12561
12552
  let complete: string_date_iso8601;
12562
12553
 
12563
12554
  if (this.options.isVerbose) {
@@ -12586,7 +12577,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
12586
12577
 
12587
12578
  const resultContent = rawResponse.choices[0].text;
12588
12579
  // eslint-disable-next-line prefer-const
12589
- complete = getCurrentIsoDate();
12580
+ complete = $getCurrentDate();
12590
12581
  const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
12591
12582
 
12592
12583
 
@@ -12648,7 +12639,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
12648
12639
  */
12649
12640
  var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
12650
12641
  if (options.isProxied) {
12651
- return new RemoteLlmExecutionTools(__assign(__assign({}, options), { userId: null, isAnonymous: true, llmToolsConfiguration: [
12642
+ return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
12652
12643
  {
12653
12644
  title: 'Anthropic Claude (proxied)',
12654
12645
  packageName: '@promptbook/anthropic-claude',
@@ -13257,7 +13248,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
13257
13248
  content: rawPromptContent,
13258
13249
  },
13259
13250
  ], false);
13260
- start = getCurrentIsoDate();
13251
+ start = $getCurrentDate();
13261
13252
  complete = void 0;
13262
13253
  if (this.options.isVerbose) {
13263
13254
  console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
@@ -13286,7 +13277,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
13286
13277
  }
13287
13278
  resultContent = rawResponse.choices[0].message.content;
13288
13279
  // eslint-disable-next-line prefer-const
13289
- complete = getCurrentIsoDate();
13280
+ complete = $getCurrentDate();
13290
13281
  usage = {
13291
13282
  price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
13292
13283
  input: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.promptTokens) }, computeUsageCounts(prompt.content)),
@@ -13347,7 +13338,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
13347
13338
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
13348
13339
  // <- Note: [🧆]
13349
13340
  };
13350
- start = getCurrentIsoDate();
13341
+ start = $getCurrentDate();
13351
13342
  complete = void 0;
13352
13343
  if (this.options.isVerbose) {
13353
13344
  console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
@@ -13379,7 +13370,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
13379
13370
  }
13380
13371
  resultContent = rawResponse.choices[0].text;
13381
13372
  // eslint-disable-next-line prefer-const
13382
- complete = getCurrentIsoDate();
13373
+ complete = $getCurrentDate();
13383
13374
  usage = {
13384
13375
  price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
13385
13376
  input: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.promptTokens) }, computeUsageCounts(prompt.content)),
@@ -13475,6 +13466,222 @@ var _AzureOpenAiRegistration = $llmToolsRegister.register(createAzureOpenAiExecu
13475
13466
  * Note: [💞] Ignore a discrepancy between file name and entity name
13476
13467
  */
13477
13468
 
13469
+ /**
13470
+ * Registration of LLM provider metadata
13471
+ *
13472
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
13473
+ *
13474
+ * @public exported from `@promptbook/core`
13475
+ * @public exported from `@promptbook/cli`
13476
+ */
13477
+ var _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
13478
+ title: 'Google Gemini',
13479
+ packageName: '@promptbook/google',
13480
+ className: 'GoogleExecutionTools',
13481
+ getBoilerplateConfiguration: function () {
13482
+ return {
13483
+ title: 'Google Gemini (boilerplate)',
13484
+ packageName: '@promptbook/google',
13485
+ className: 'GoogleExecutionTools',
13486
+ options: {
13487
+ apiKey: 'sk-ant-api03-',
13488
+ isProxied: true,
13489
+ remoteUrl: DEFAULT_REMOTE_URL,
13490
+ path: DEFAULT_REMOTE_URL_PATH,
13491
+ },
13492
+ };
13493
+ },
13494
+ createConfigurationFromEnv: function (env) {
13495
+ // Note: Note using `process.env` BUT `env` to pass in the environment variables dynamically
13496
+ if (typeof env.GOOGLE_GEMINI_API_KEY === 'string') {
13497
+ return {
13498
+ title: 'Google Gemini (from env)',
13499
+ packageName: '@promptbook/google',
13500
+ className: 'GoogleExecutionTools',
13501
+ options: {
13502
+ apiKey: env.GOOGLE_GEMINI_API_KEY,
13503
+ },
13504
+ };
13505
+ }
13506
+ return null;
13507
+ },
13508
+ });
13509
+ /**
13510
+ * Note: [💞] Ignore a discrepancy between file name and entity name
13511
+ */
13512
+
13513
+ /**
13514
+ * !!!!!!
13515
+ *
13516
+ * @public exported from `@promptbook/vercel`
13517
+ */
13518
+ function createExecutionToolsFromVercelProvider(options) {
13519
+ var vercelProvider = options.vercelProvider, availableModels = options.availableModels, userId = options.userId, _a = options.additionalChatSettings, additionalChatSettings = _a === void 0 ? {} : _a;
13520
+ return {
13521
+ title: '!!!',
13522
+ description: "!!! (through Vercel)",
13523
+ checkConfiguration: function () {
13524
+ // TODO: !!!!!!
13525
+ return Promise.resolve();
13526
+ },
13527
+ listModels: function () {
13528
+ return __awaiter(this, void 0, void 0, function () {
13529
+ return __generator(this, function (_a) {
13530
+ return [2 /*return*/, availableModels];
13531
+ });
13532
+ });
13533
+ },
13534
+ callChatModel: function (prompt) {
13535
+ var _a;
13536
+ return __awaiter(this, void 0, void 0, function () {
13537
+ var content, parameters, modelRequirements, modelName, model, rawPromptContent, rawRequest, start, rawResponse, complete, usage;
13538
+ return __generator(this, function (_b) {
13539
+ switch (_b.label) {
13540
+ case 0:
13541
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
13542
+ if (modelRequirements.modelVariant !== 'CHAT') {
13543
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
13544
+ }
13545
+ modelName = modelRequirements.modelName ||
13546
+ ((_a = availableModels.find(function (_a) {
13547
+ var modelVariant = _a.modelVariant;
13548
+ return modelVariant === 'CHAT';
13549
+ })) === null || _a === void 0 ? void 0 : _a.modelName);
13550
+ if (!modelName) {
13551
+ throw new PipelineExecutionError(spaceTrim$1("\n Can not determine which model to use.\n\n You need to provide at least one of:\n 1) In `createExecutionToolsFromVercelProvider` options, provide `availableModels` with at least one model\n 2) In `prompt.modelRequirements`, provide `modelName` with the name of the model to use\n \n "));
13552
+ }
13553
+ return [4 /*yield*/, vercelProvider.chat(modelName, __assign({ user: (userId === null || userId === void 0 ? void 0 : userId.toString()) || undefined }, additionalChatSettings))];
13554
+ case 1:
13555
+ model = _b.sent();
13556
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
13557
+ rawRequest = {
13558
+ // <- TODO: [☂]
13559
+ inputFormat: 'messages',
13560
+ mode: {
13561
+ type: 'regular',
13562
+ tools: [
13563
+ /* !!!!!! */
13564
+ ],
13565
+ },
13566
+ prompt: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
13567
+ ? []
13568
+ : [
13569
+ {
13570
+ role: 'system',
13571
+ content: modelRequirements.systemMessage,
13572
+ },
13573
+ ])), false), [
13574
+ {
13575
+ role: 'user',
13576
+ content: [
13577
+ {
13578
+ type: 'text',
13579
+ text: rawPromptContent,
13580
+ },
13581
+ ],
13582
+ },
13583
+ ], false),
13584
+ };
13585
+ start = $getCurrentDate();
13586
+ if (options.isVerbose) {
13587
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
13588
+ }
13589
+ return [4 /*yield*/, model.doGenerate(rawRequest)];
13590
+ case 2:
13591
+ rawResponse = _b.sent();
13592
+ /*
13593
+ TODO: !!!!!! Handle errors
13594
+ .catch((error) => {
13595
+ if (options.isVerbose) {
13596
+ console.info(colors.bgRed('error'), error);
13597
+ }
13598
+ throw error;
13599
+ });
13600
+ */
13601
+ if (options.isVerbose) {
13602
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
13603
+ }
13604
+ if (rawResponse.text === undefined) {
13605
+ throw new PipelineExecutionError('No response message');
13606
+ }
13607
+ complete = $getCurrentDate();
13608
+ usage = UNCERTAIN_USAGE;
13609
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('createExecutionToolsFromVercelProvider ChatPromptResult', {
13610
+ content: rawResponse.text,
13611
+ modelName: modelName,
13612
+ timing: {
13613
+ start: start,
13614
+ complete: complete,
13615
+ },
13616
+ usage: usage,
13617
+ rawPromptContent: rawPromptContent,
13618
+ rawRequest: rawRequest,
13619
+ rawResponse: {
13620
+ /* TODO: !!!!!! UnexpectedError: createExecutionToolsFromVercelProvider ChatPromptResult.rawResponse.response.timestamp is Date */
13621
+ },
13622
+ // <- [🗯]
13623
+ })];
13624
+ }
13625
+ });
13626
+ });
13627
+ },
13628
+ };
13629
+ }
13630
+
13631
+ /**
13632
+ * Execution Tools for calling Google Gemini API.
13633
+ *
13634
+ * @public exported from `@promptbook/google`
13635
+ */
13636
+ var createGoogleExecutionTools = Object.assign(function (options) {
13637
+ // Note: [🔘] There is a compatibility when using import from '@ai-sdk/google'
13638
+ // eslint-disable-next-line @typescript-eslint/no-var-requires
13639
+ var createGoogleGenerativeAI = require('@ai-sdk/google').createGoogleGenerativeAI;
13640
+ var googleGeminiVercelProvider = createGoogleGenerativeAI(__assign({}, options));
13641
+ return createExecutionToolsFromVercelProvider(__assign({ vercelProvider: googleGeminiVercelProvider, availableModels: [
13642
+ // TODO: !!!!!! Maybe list models in same way as in other providers
13643
+ 'gemini-1.5-flash',
13644
+ 'gemini-1.5-flash-latest',
13645
+ 'gemini-1.5-flash-001',
13646
+ 'gemini-1.5-flash-002',
13647
+ 'gemini-1.5-flash-exp-0827',
13648
+ 'gemini-1.5-flash-8b',
13649
+ 'gemini-1.5-flash-8b-latest',
13650
+ 'gemini-1.5-flash-8b-exp-0924',
13651
+ 'gemini-1.5-flash-8b-exp-0827',
13652
+ 'gemini-1.5-pro-latest',
13653
+ 'gemini-1.5-pro',
13654
+ 'gemini-1.5-pro-001',
13655
+ 'gemini-1.5-pro-002',
13656
+ 'gemini-1.5-pro-exp-0827',
13657
+ 'gemini-1.0-pro',
13658
+ ].map(function (modelName) { return ({ modelName: modelName, modelVariant: 'CHAT' }); }) }, options));
13659
+ }, {
13660
+ packageName: '@promptbook/google',
13661
+ className: 'GoogleExecutionTools',
13662
+ });
13663
+ /**
13664
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
13665
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new GoogleExecutionTools` -> `createGoogleExecutionTools` in manual
13666
+ * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
13667
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
13668
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
13669
+ */
13670
+
13671
+ /**
13672
+ * Registration of LLM provider
13673
+ *
13674
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
13675
+ *
13676
+ * @public exported from `@promptbook/google`
13677
+ * @public exported from `@promptbook/cli`
13678
+ */
13679
+ var _GoogleRegistration = $llmToolsRegister.register(createGoogleExecutionTools);
13680
+ /**
13681
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
13682
+ * Note: [💞] Ignore a discrepancy between file name and entity name
13683
+ */
13684
+
13478
13685
  /**
13479
13686
  * Registration of LLM provider metadata
13480
13687
  *
@@ -13747,7 +13954,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
13747
13954
  content: rawPromptContent,
13748
13955
  },
13749
13956
  ], false), user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
13750
- start = getCurrentIsoDate();
13957
+ start = $getCurrentDate();
13751
13958
  if (this.options.isVerbose) {
13752
13959
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
13753
13960
  }
@@ -13771,7 +13978,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
13771
13978
  }
13772
13979
  resultContent = rawResponse.choices[0].message.content;
13773
13980
  // eslint-disable-next-line prefer-const
13774
- complete = getCurrentIsoDate();
13981
+ complete = $getCurrentDate();
13775
13982
  usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
13776
13983
  if (resultContent === null) {
13777
13984
  throw new PipelineExecutionError('No response message from OpenAI');
@@ -13826,7 +14033,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
13826
14033
  };
13827
14034
  rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
13828
14035
  rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
13829
- start = getCurrentIsoDate();
14036
+ start = $getCurrentDate();
13830
14037
  if (this.options.isVerbose) {
13831
14038
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
13832
14039
  }
@@ -13850,7 +14057,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
13850
14057
  }
13851
14058
  resultContent = rawResponse.choices[0].text;
13852
14059
  // eslint-disable-next-line prefer-const
13853
- complete = getCurrentIsoDate();
14060
+ complete = $getCurrentDate();
13854
14061
  usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
13855
14062
  return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools CompletionPromptResult', {
13856
14063
  content: resultContent,
@@ -13896,7 +14103,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
13896
14103
  input: rawPromptContent,
13897
14104
  model: modelName,
13898
14105
  };
13899
- start = getCurrentIsoDate();
14106
+ start = $getCurrentDate();
13900
14107
  if (this.options.isVerbose) {
13901
14108
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
13902
14109
  }
@@ -13916,7 +14123,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
13916
14123
  }
13917
14124
  resultContent = rawResponse.data[0].embedding;
13918
14125
  // eslint-disable-next-line prefer-const
13919
- complete = getCurrentIsoDate();
14126
+ complete = $getCurrentDate();
13920
14127
  usage = computeOpenAiUsage(content || '', '',
13921
14128
  // <- Note: Embedding does not have result content
13922
14129
  rawResponse);
@@ -14069,7 +14276,7 @@ var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
14069
14276
  },
14070
14277
  // <- TODO: Add user identification here> user: this.options.user,
14071
14278
  };
14072
- start = getCurrentIsoDate();
14279
+ start = $getCurrentDate();
14073
14280
  if (this.options.isVerbose) {
14074
14281
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
14075
14282
  }
@@ -14120,7 +14327,7 @@ var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
14120
14327
  resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
14121
14328
  // <- TODO: [🧠] There are also annotations, maybe use them
14122
14329
  // eslint-disable-next-line prefer-const
14123
- complete = getCurrentIsoDate();
14330
+ complete = $getCurrentDate();
14124
14331
  usage = UNCERTAIN_USAGE;
14125
14332
  // <- TODO: [🥘] Compute real usage for assistant
14126
14333
  // ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
@@ -15378,5 +15585,5 @@ var _WebsiteScraperRegistration = $scrapersRegister.register(createWebsiteScrape
15378
15585
  * Note: [💞] Ignore a discrepancy between file name and entity name
15379
15586
  */
15380
15587
 
15381
- export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION, _AnthropicClaudeMetadataRegistration, _AnthropicClaudeRegistration, _AzureOpenAiMetadataRegistration, _AzureOpenAiRegistration, _CLI, _DocumentScraperMetadataRegistration, _DocumentScraperRegistration, _LegacyDocumentScraperMetadataRegistration, _LegacyDocumentScraperRegistration, _MarkdownScraperMetadataRegistration, _MarkdownScraperRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiAssistantRegistration, _OpenAiMetadataRegistration, _OpenAiRegistration, _PdfScraperMetadataRegistration, _PdfScraperRegistration, _WebsiteScraperMetadataRegistration, _WebsiteScraperRegistration };
15588
+ export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION, _AnthropicClaudeMetadataRegistration, _AnthropicClaudeRegistration, _AzureOpenAiMetadataRegistration, _AzureOpenAiRegistration, _CLI, _DocumentScraperMetadataRegistration, _DocumentScraperRegistration, _GoogleMetadataRegistration, _GoogleRegistration, _LegacyDocumentScraperMetadataRegistration, _LegacyDocumentScraperRegistration, _MarkdownScraperMetadataRegistration, _MarkdownScraperRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiAssistantRegistration, _OpenAiMetadataRegistration, _OpenAiRegistration, _PdfScraperMetadataRegistration, _PdfScraperRegistration, _WebsiteScraperMetadataRegistration, _WebsiteScraperRegistration };
15382
15589
  //# sourceMappingURL=index.es.js.map