@promptbook/core 0.92.0-31 β†’ 0.92.0-33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-31';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-33';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
@@ -2368,6 +2368,7 @@ function assertsTaskSuccessful(executionResult) {
2368
2368
  */
2369
2369
  function createTask(options) {
2370
2370
  const { taskType, taskProcessCallback } = options;
2371
+ let { title } = options;
2371
2372
  // TODO: [πŸ™] DRY
2372
2373
  const taskId = `${taskType.toLowerCase().substring(0, 4)}-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid simmilar char conflicts */)}`;
2373
2374
  let status = 'RUNNING';
@@ -2379,6 +2380,10 @@ function createTask(options) {
2379
2380
  const partialResultSubject = new Subject();
2380
2381
  // <- Note: Not using `BehaviorSubject` because on error we can't access the last value
2381
2382
  const finalResultPromise = /* not await */ taskProcessCallback((newOngoingResult) => {
2383
+ if (newOngoingResult.title) {
2384
+ title = newOngoingResult.title;
2385
+ }
2386
+ updatedAt = new Date();
2382
2387
  Object.assign(currentValue, newOngoingResult);
2383
2388
  // <- TODO: assign deep
2384
2389
  partialResultSubject.next(newOngoingResult);
@@ -2424,17 +2429,24 @@ function createTask(options) {
2424
2429
  return {
2425
2430
  taskType,
2426
2431
  taskId,
2432
+ get promptbookVersion() {
2433
+ return PROMPTBOOK_ENGINE_VERSION;
2434
+ },
2435
+ get title() {
2436
+ return title;
2437
+ // <- Note: [1] Theese must be getters to allow changing the value in the future
2438
+ },
2427
2439
  get status() {
2428
2440
  return status;
2429
- // <- Note: [1] Theese must be getters to allow changing the value in the future
2441
+ // <- Note: [1] --||--
2430
2442
  },
2431
2443
  get createdAt() {
2432
2444
  return createdAt;
2433
- // <- Note: [1]
2445
+ // <- Note: [1] --||--
2434
2446
  },
2435
2447
  get updatedAt() {
2436
2448
  return updatedAt;
2437
- // <- Note: [1]
2449
+ // <- Note: [1] --||--
2438
2450
  },
2439
2451
  asPromise,
2440
2452
  asObservable() {
@@ -2442,15 +2454,15 @@ function createTask(options) {
2442
2454
  },
2443
2455
  get errors() {
2444
2456
  return errors;
2445
- // <- Note: [1]
2457
+ // <- Note: [1] --||--
2446
2458
  },
2447
2459
  get warnings() {
2448
2460
  return warnings;
2449
- // <- Note: [1]
2461
+ // <- Note: [1] --||--
2450
2462
  },
2451
2463
  get currentValue() {
2452
2464
  return currentValue;
2453
- // <- Note: [1]
2465
+ // <- Note: [1] --||--
2454
2466
  },
2455
2467
  };
2456
2468
  }
@@ -3198,23 +3210,17 @@ class MultipleLlmExecutionTools {
3198
3210
  * Check the configuration of all execution tools
3199
3211
  */
3200
3212
  async checkConfiguration() {
3201
- // TODO: Maybe do it in parallel
3202
- for (const llmExecutionTools of this.llmExecutionTools) {
3203
- await llmExecutionTools.checkConfiguration();
3204
- }
3213
+ // Note: Run checks in parallel
3214
+ await Promise.all(this.llmExecutionTools.map((tools) => tools.checkConfiguration()));
3205
3215
  }
3206
3216
  /**
3207
3217
  * List all available models that can be used
3208
3218
  * This lists is a combination of all available models from all execution tools
3209
3219
  */
3210
3220
  async listModels() {
3211
- const availableModels = [];
3212
- for (const llmExecutionTools of this.llmExecutionTools) {
3213
- // TODO: [πŸͺ‚] Obtain models in parallel
3214
- const models = await llmExecutionTools.listModels();
3215
- availableModels.push(...models);
3216
- }
3217
- return availableModels;
3221
+ // Obtain all models in parallel and flatten
3222
+ const modelArrays = await Promise.all(this.llmExecutionTools.map((tools) => tools.listModels()));
3223
+ return modelArrays.flat();
3218
3224
  }
3219
3225
  /**
3220
3226
  * Calls the best available chat model
@@ -4590,7 +4596,7 @@ async function getKnowledgeForTask(options) {
4590
4596
  });
4591
4597
  const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4592
4598
  const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4593
- console.log('!!! Embedding', {
4599
+ console.log('!!! `getKnowledgeForTask` Embedding', {
4594
4600
  task,
4595
4601
  taskEmbeddingPrompt,
4596
4602
  taskEmbeddingResult,
@@ -4626,6 +4632,7 @@ async function getKnowledgeForTask(options) {
4626
4632
  */
4627
4633
  async function getReservedParametersForTask(options) {
4628
4634
  const { tools, preparedPipeline, task, parameters, pipelineIdentification } = options;
4635
+ console.log('!!! getReservedParametersForTask', options);
4629
4636
  const context = await getContextForTask(); // <- [🏍]
4630
4637
  const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task, parameters });
4631
4638
  const examples = await getExamplesForTask();
@@ -4662,6 +4669,7 @@ async function getReservedParametersForTask(options) {
4662
4669
  */
4663
4670
  async function executeTask(options) {
4664
4671
  const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
4672
+ console.log('!!! executeTask', options);
4665
4673
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
4666
4674
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
4667
4675
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
@@ -4685,14 +4693,15 @@ async function executeTask(options) {
4685
4693
 
4686
4694
  `));
4687
4695
  }
4696
+ const reservedParameters = await getReservedParametersForTask({
4697
+ tools,
4698
+ preparedPipeline,
4699
+ task: currentTask,
4700
+ pipelineIdentification,
4701
+ parameters: parametersToPass,
4702
+ });
4688
4703
  const definedParameters = Object.freeze({
4689
- ...(await getReservedParametersForTask({
4690
- tools,
4691
- preparedPipeline,
4692
- task: currentTask,
4693
- pipelineIdentification,
4694
- parameters: parametersToPass,
4695
- })),
4704
+ ...reservedParameters,
4696
4705
  ...parametersToPass,
4697
4706
  });
4698
4707
  const definedParameterNames = new Set(Object.keys(definedParameters));
@@ -5139,6 +5148,7 @@ function createPipelineExecutor(options) {
5139
5148
  };
5140
5149
  const pipelineExecutor = (inputParameters) => createTask({
5141
5150
  taskType: 'EXECUTION',
5151
+ title: pipeline.title,
5142
5152
  taskProcessCallback(updateOngoingResult) {
5143
5153
  return pipelineExecutorWithCallback(inputParameters, async (newOngoingResult) => {
5144
5154
  updateOngoingResult(newOngoingResult);
@@ -10870,6 +10880,7 @@ const _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register(
10870
10880
  apiKey: 'sk-ant-api03-',
10871
10881
  isProxied: true,
10872
10882
  remoteServerUrl: DEFAULT_REMOTE_SERVER_URL,
10883
+ maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
10873
10884
  },
10874
10885
  };
10875
10886
  },
@@ -10905,7 +10916,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10905
10916
  title: 'Azure Open AI',
10906
10917
  packageName: '@promptbook/azure-openai',
10907
10918
  className: 'AzureOpenAiExecutionTools',
10908
- envVariables: ['AZUREOPENAI_RESOURCE_NAME', 'AZUREOPENAI_DEPLOYMENT_NAME', 'AZUREOPENAI_API_KEY'],
10919
+ envVariables: ['AZUREOPENAI_API_KEY', 'AZUREOPENAI_RESOURCE_NAME', 'AZUREOPENAI_DEPLOYMENT_NAME'],
10909
10920
  trustLevel: 'CLOSED_BUSINESS',
10910
10921
  order: MODEL_ORDERS.NORMAL,
10911
10922
  getBoilerplateConfiguration() {
@@ -10915,6 +10926,9 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10915
10926
  className: 'AzureOpenAiExecutionTools',
10916
10927
  options: {
10917
10928
  apiKey: 'sk-',
10929
+ resourceName: 'my-resource-name',
10930
+ deploymentName: 'my-deployment-name',
10931
+ maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
10918
10932
  },
10919
10933
  };
10920
10934
  },
@@ -10928,15 +10942,15 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10928
10942
  packageName: '@promptbook/azure-openai',
10929
10943
  className: 'AzureOpenAiExecutionTools',
10930
10944
  options: {
10945
+ apiKey: env.AZUREOPENAI_API_KEY,
10931
10946
  resourceName: env.AZUREOPENAI_RESOURCE_NAME,
10932
10947
  deploymentName: env.AZUREOPENAI_DEPLOYMENT_NAME,
10933
- apiKey: env.AZUREOPENAI_API_KEY,
10934
10948
  },
10935
10949
  };
10936
10950
  }
10937
- else if (typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' ||
10938
- typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string' ||
10939
- typeof env.AZUREOPENAI_API_KEY === 'string') {
10951
+ else if (typeof env.AZUREOPENAI_API_KEY === 'string' ||
10952
+ typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' ||
10953
+ typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string') {
10940
10954
  return null;
10941
10955
  /*
10942
10956
  Note: [πŸ—¨] Partial configuration is handled more gracefully elsewhere
@@ -11149,6 +11163,7 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
11149
11163
  options: {
11150
11164
  apiKey: 'sk-',
11151
11165
  assistantId: 'asst_',
11166
+ maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
11152
11167
  },
11153
11168
  };
11154
11169
  },