@promptbook/remote-server 0.88.0 → 0.89.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +119 -82
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/types.index.d.ts +16 -4
  6. package/esm/typings/src/cli/cli-commands/login.d.ts +15 -0
  7. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +2 -2
  8. package/esm/typings/src/execution/PromptResult.d.ts +2 -2
  9. package/esm/typings/src/execution/{PromptResultUsage.d.ts → Usage.d.ts} +5 -5
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +2 -2
  11. package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +3 -3
  12. package/esm/typings/src/execution/utils/usage-constants.d.ts +77 -60
  13. package/esm/typings/src/execution/utils/usageToHuman.d.ts +5 -5
  14. package/esm/typings/src/execution/utils/usageToWorktime.d.ts +5 -5
  15. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +9 -2
  16. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
  17. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +2 -2
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +0 -9
  20. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.d.ts +2 -2
  21. package/esm/typings/src/pipeline/PipelineJson/PreparationJson.d.ts +2 -2
  22. package/esm/typings/src/playground/BrjappConnector.d.ts +67 -0
  23. package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
  24. package/esm/typings/src/playground/playground.d.ts +5 -0
  25. package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
  26. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
  27. package/esm/typings/src/types/typeAliases.d.ts +8 -2
  28. package/package.json +2 -2
  29. package/umd/index.umd.js +119 -82
  30. package/umd/index.umd.js.map +1 -1
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/remote-server`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -31,7 +31,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
31
31
  * @generated
32
32
  * @see https://github.com/webgptorg/promptbook
33
33
  */
34
- const PROMPTBOOK_ENGINE_VERSION = '0.88.0';
34
+ const PROMPTBOOK_ENGINE_VERSION = '0.89.0-2';
35
35
  /**
36
36
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
37
37
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -694,6 +694,7 @@ function $execCommand(options) {
694
694
  }
695
695
  else {
696
696
  console.warn(`Command "${humanReadableCommand}" exceeded time limit of ${timeout}ms but continues running`);
697
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
697
698
  resolve('Command exceeded time limit');
698
699
  }
699
700
  });
@@ -719,6 +720,7 @@ function $execCommand(options) {
719
720
  output.push(stderr.toString());
720
721
  if (isVerbose && stderr.toString().trim()) {
721
722
  console.warn(stderr.toString());
723
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
722
724
  }
723
725
  });
724
726
  const finishWithCode = (code) => {
@@ -730,6 +732,7 @@ function $execCommand(options) {
730
732
  else {
731
733
  if (isVerbose) {
732
734
  console.warn(`Command "${humanReadableCommand}" exited with code ${code}`);
735
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
733
736
  }
734
737
  resolve(spaceTrim(output.join('\n')));
735
738
  }
@@ -751,6 +754,7 @@ function $execCommand(options) {
751
754
  else {
752
755
  if (isVerbose) {
753
756
  console.warn(error);
757
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
754
758
  }
755
759
  resolve(spaceTrim(output.join('\n')));
756
760
  }
@@ -1819,6 +1823,7 @@ function assertsTaskSuccessful(executionResult) {
1819
1823
  const { isSuccessful, errors, warnings } = executionResult;
1820
1824
  for (const warning of warnings) {
1821
1825
  console.warn(warning.message);
1826
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
1822
1827
  }
1823
1828
  if (isSuccessful === true) {
1824
1829
  return;
@@ -2420,30 +2425,42 @@ async function forEachAsync(array, options, callbackfunction) {
2420
2425
  await Promise.all(tasks);
2421
2426
  }
2422
2427
 
2428
+ /**
2429
+ * Represents the uncertain value
2430
+ *
2431
+ * @public exported from `@promptbook/core`
2432
+ */
2433
+ const ZERO_VALUE = $deepFreeze({ value: 0 });
2434
+ /**
2435
+ * Represents the uncertain value
2436
+ *
2437
+ * @public exported from `@promptbook/core`
2438
+ */
2439
+ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
2423
2440
  /**
2424
2441
  * Represents the usage with no resources consumed
2425
2442
  *
2426
2443
  * @public exported from `@promptbook/core`
2427
2444
  */
2428
2445
  const ZERO_USAGE = $deepFreeze({
2429
- price: { value: 0 },
2446
+ price: ZERO_VALUE,
2430
2447
  input: {
2431
- tokensCount: { value: 0 },
2432
- charactersCount: { value: 0 },
2433
- wordsCount: { value: 0 },
2434
- sentencesCount: { value: 0 },
2435
- linesCount: { value: 0 },
2436
- paragraphsCount: { value: 0 },
2437
- pagesCount: { value: 0 },
2448
+ tokensCount: ZERO_VALUE,
2449
+ charactersCount: ZERO_VALUE,
2450
+ wordsCount: ZERO_VALUE,
2451
+ sentencesCount: ZERO_VALUE,
2452
+ linesCount: ZERO_VALUE,
2453
+ paragraphsCount: ZERO_VALUE,
2454
+ pagesCount: ZERO_VALUE,
2438
2455
  },
2439
2456
  output: {
2440
- tokensCount: { value: 0 },
2441
- charactersCount: { value: 0 },
2442
- wordsCount: { value: 0 },
2443
- sentencesCount: { value: 0 },
2444
- linesCount: { value: 0 },
2445
- paragraphsCount: { value: 0 },
2446
- pagesCount: { value: 0 },
2457
+ tokensCount: ZERO_VALUE,
2458
+ charactersCount: ZERO_VALUE,
2459
+ wordsCount: ZERO_VALUE,
2460
+ sentencesCount: ZERO_VALUE,
2461
+ linesCount: ZERO_VALUE,
2462
+ paragraphsCount: ZERO_VALUE,
2463
+ pagesCount: ZERO_VALUE,
2447
2464
  },
2448
2465
  });
2449
2466
  /**
@@ -2452,24 +2469,24 @@ const ZERO_USAGE = $deepFreeze({
2452
2469
  * @public exported from `@promptbook/core`
2453
2470
  */
2454
2471
  $deepFreeze({
2455
- price: { value: 0, isUncertain: true },
2472
+ price: UNCERTAIN_ZERO_VALUE,
2456
2473
  input: {
2457
- tokensCount: { value: 0, isUncertain: true },
2458
- charactersCount: { value: 0, isUncertain: true },
2459
- wordsCount: { value: 0, isUncertain: true },
2460
- sentencesCount: { value: 0, isUncertain: true },
2461
- linesCount: { value: 0, isUncertain: true },
2462
- paragraphsCount: { value: 0, isUncertain: true },
2463
- pagesCount: { value: 0, isUncertain: true },
2474
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2475
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2476
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2477
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2478
+ linesCount: UNCERTAIN_ZERO_VALUE,
2479
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2480
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2464
2481
  },
2465
2482
  output: {
2466
- tokensCount: { value: 0, isUncertain: true },
2467
- charactersCount: { value: 0, isUncertain: true },
2468
- wordsCount: { value: 0, isUncertain: true },
2469
- sentencesCount: { value: 0, isUncertain: true },
2470
- linesCount: { value: 0, isUncertain: true },
2471
- paragraphsCount: { value: 0, isUncertain: true },
2472
- pagesCount: { value: 0, isUncertain: true },
2483
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2484
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2485
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2486
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2487
+ linesCount: UNCERTAIN_ZERO_VALUE,
2488
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2489
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2473
2490
  },
2474
2491
  });
2475
2492
  /**
@@ -2530,8 +2547,9 @@ function addUsage(...usageItems) {
2530
2547
  * @returns LLM tools with same functionality with added total cost counting
2531
2548
  * @public exported from `@promptbook/core`
2532
2549
  */
2533
- function countTotalUsage(llmTools) {
2550
+ function countUsage(llmTools) {
2534
2551
  let totalUsage = ZERO_USAGE;
2552
+ const spending = new Subject();
2535
2553
  const proxyTools = {
2536
2554
  get title() {
2537
2555
  // TODO: [🧠] Maybe put here some suffix
@@ -2541,12 +2559,15 @@ function countTotalUsage(llmTools) {
2541
2559
  // TODO: [🧠] Maybe put here some suffix
2542
2560
  return llmTools.description;
2543
2561
  },
2544
- async checkConfiguration() {
2562
+ checkConfiguration() {
2545
2563
  return /* not await */ llmTools.checkConfiguration();
2546
2564
  },
2547
2565
  listModels() {
2548
2566
  return /* not await */ llmTools.listModels();
2549
2567
  },
2568
+ spending() {
2569
+ return spending.asObservable();
2570
+ },
2550
2571
  getTotalUsage() {
2551
2572
  // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2552
2573
  return totalUsage;
@@ -2557,6 +2578,7 @@ function countTotalUsage(llmTools) {
2557
2578
  // console.info('[🚕] callChatModel through countTotalUsage');
2558
2579
  const promptResult = await llmTools.callChatModel(prompt);
2559
2580
  totalUsage = addUsage(totalUsage, promptResult.usage);
2581
+ spending.next(promptResult.usage);
2560
2582
  return promptResult;
2561
2583
  };
2562
2584
  }
@@ -2565,6 +2587,7 @@ function countTotalUsage(llmTools) {
2565
2587
  // console.info('[🚕] callCompletionModel through countTotalUsage');
2566
2588
  const promptResult = await llmTools.callCompletionModel(prompt);
2567
2589
  totalUsage = addUsage(totalUsage, promptResult.usage);
2590
+ spending.next(promptResult.usage);
2568
2591
  return promptResult;
2569
2592
  };
2570
2593
  }
@@ -2573,6 +2596,7 @@ function countTotalUsage(llmTools) {
2573
2596
  // console.info('[🚕] callEmbeddingModel through countTotalUsage');
2574
2597
  const promptResult = await llmTools.callEmbeddingModel(prompt);
2575
2598
  totalUsage = addUsage(totalUsage, promptResult.usage);
2599
+ spending.next(promptResult.usage);
2576
2600
  return promptResult;
2577
2601
  };
2578
2602
  }
@@ -2750,6 +2774,7 @@ function joinLlmExecutionTools(...llmExecutionTools) {
2750
2774
  `);
2751
2775
  // TODO: [🟥] Detect browser / node and make it colorfull
2752
2776
  console.warn(warningMessage);
2777
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
2753
2778
  /*
2754
2779
  return {
2755
2780
  async listModels() {
@@ -3693,63 +3718,73 @@ async function prepareKnowledgePieces(knowledgeSources, tools, options) {
3693
3718
  const { maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, rootDirname, isVerbose = DEFAULT_IS_VERBOSE } = options;
3694
3719
  const knowledgePreparedUnflatten = new Array(knowledgeSources.length);
3695
3720
  await forEachAsync(knowledgeSources, { maxParallelCount }, async (knowledgeSource, index) => {
3696
- let partialPieces = null;
3697
- const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
3698
- const scrapers = arrayableToArray(tools.scrapers);
3699
- for (const scraper of scrapers) {
3700
- if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
3701
- // <- TODO: [🦔] Implement mime-type wildcards
3702
- ) {
3703
- continue;
3704
- }
3705
- const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
3706
- if (partialPiecesUnchecked !== null) {
3707
- partialPieces = [...partialPiecesUnchecked];
3708
- // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
3709
- break;
3710
- }
3711
- console.warn(spaceTrim$1((block) => `
3712
- Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
3721
+ try {
3722
+ let partialPieces = null;
3723
+ const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
3724
+ const scrapers = arrayableToArray(tools.scrapers);
3725
+ for (const scraper of scrapers) {
3726
+ if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
3727
+ // <- TODO: [🦔] Implement mime-type wildcards
3728
+ ) {
3729
+ continue;
3730
+ }
3731
+ const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
3732
+ if (partialPiecesUnchecked !== null) {
3733
+ partialPieces = [...partialPiecesUnchecked];
3734
+ // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
3735
+ break;
3736
+ }
3737
+ console.warn(spaceTrim$1((block) => `
3738
+ Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
3713
3739
 
3714
- The source:
3715
- ${block(knowledgeSource.knowledgeSourceContent
3716
- .split('\n')
3717
- .map((line) => `> ${line}`)
3718
- .join('\n'))}
3740
+ The source:
3741
+ ${block(knowledgeSource.knowledgeSourceContent
3742
+ .split('\n')
3743
+ .map((line) => `> ${line}`)
3744
+ .join('\n'))}
3719
3745
 
3720
- ${block($registeredScrapersMessage(scrapers))}
3746
+ ${block($registeredScrapersMessage(scrapers))}
3721
3747
 
3722
3748
 
3723
- `));
3724
- }
3725
- if (partialPieces === null) {
3726
- throw new KnowledgeScrapeError(spaceTrim$1((block) => `
3727
- Cannot scrape knowledge
3749
+ `));
3750
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3751
+ }
3752
+ if (partialPieces === null) {
3753
+ throw new KnowledgeScrapeError(spaceTrim$1((block) => `
3754
+ Cannot scrape knowledge
3728
3755
 
3729
- The source:
3730
- > ${block(knowledgeSource.knowledgeSourceContent
3731
- .split('\n')
3732
- .map((line) => `> ${line}`)
3733
- .join('\n'))}
3756
+ The source:
3757
+ > ${block(knowledgeSource.knowledgeSourceContent
3758
+ .split('\n')
3759
+ .map((line) => `> ${line}`)
3760
+ .join('\n'))}
3734
3761
 
3735
- No scraper found for the mime type "${sourceHandler.mimeType}"
3762
+ No scraper found for the mime type "${sourceHandler.mimeType}"
3736
3763
 
3737
- ${block($registeredScrapersMessage(scrapers))}
3764
+ ${block($registeredScrapersMessage(scrapers))}
3738
3765
 
3739
3766
 
3740
- `));
3767
+ `));
3768
+ }
3769
+ const pieces = partialPieces.map((partialPiece) => ({
3770
+ ...partialPiece,
3771
+ sources: [
3772
+ {
3773
+ name: knowledgeSource.name,
3774
+ // line, column <- TODO: [☀]
3775
+ // <- TODO: [❎]
3776
+ },
3777
+ ],
3778
+ }));
3779
+ knowledgePreparedUnflatten[index] = pieces;
3780
+ }
3781
+ catch (error) {
3782
+ if (!(error instanceof Error)) {
3783
+ throw error;
3784
+ }
3785
+ console.warn(error);
3786
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3741
3787
  }
3742
- const pieces = partialPieces.map((partialPiece) => ({
3743
- ...partialPiece,
3744
- sources: [
3745
- {
3746
- name: knowledgeSource.name,
3747
- // line, column <- TODO: [☀]
3748
- // <- TODO: [❎]
3749
- },
3750
- ],
3751
- }));
3752
- knowledgePreparedUnflatten[index] = pieces;
3753
3788
  });
3754
3789
  const knowledgePrepared = knowledgePreparedUnflatten.flat();
3755
3790
  return knowledgePrepared;
@@ -3855,7 +3890,7 @@ async function preparePipeline(pipeline, tools, options) {
3855
3890
  // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
3856
3891
  const _llms = arrayableToArray(tools.llm);
3857
3892
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3858
- const llmToolsWithUsage = countTotalUsage(llmTools);
3893
+ const llmToolsWithUsage = countUsage(llmTools);
3859
3894
  // <- TODO: [🌯]
3860
3895
  /*
3861
3896
  TODO: [🧠][🪑][🔃] Should this be done or not
@@ -4167,7 +4202,7 @@ function extractParameterNamesFromTask(task) {
4167
4202
  if (parameterNames.has(subparameterName)) {
4168
4203
  parameterNames.delete(subparameterName);
4169
4204
  parameterNames.add(foreach.parameterName);
4170
- // <- TODO: [🚎] Warn/logic error when `subparameterName` not used
4205
+ // <- TODO: [🏮] Warn/logic error when `subparameterName` not used
4171
4206
  }
4172
4207
  }
4173
4208
  }
@@ -5780,6 +5815,7 @@ function createPipelineExecutor(options) {
5780
5815
 
5781
5816
  @see more at https://ptbk.io/prepare-pipeline
5782
5817
  `));
5818
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
5783
5819
  }
5784
5820
  let runCount = 0;
5785
5821
  const pipelineExecutorWithCallback = async (inputParameters, onProgress) => {
@@ -6805,6 +6841,7 @@ function startRemoteServer(options) {
6805
6841
  https://github.com/webgptorg/promptbook
6806
6842
  `));
6807
6843
  });
6844
+ // TODO: !!!!!! Add login route
6808
6845
  app.get(`${rootPath}/books`, async (request, response) => {
6809
6846
  if (collection === null) {
6810
6847
  response.status(500).send('No collection available');