@promptbook/core 0.92.0-26 → 0.92.0-27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-26';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-27';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -6497,7 +6497,7 @@ const sectionCommandParser = {
6497
6497
  /**
6498
6498
  * Parses the boilerplate command
6499
6499
  *
6500
- * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6500
+ * Note: @@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6501
6501
  *
6502
6502
  * @see `documentationUrl` for more details
6503
6503
  * @private within the commands folder
@@ -7433,17 +7433,20 @@ const ImageGeneratorFormfactorDefinition = {
7433
7433
  };
7434
7434
 
7435
7435
  /**
7436
- * Matcher is form of app that @@@
7436
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
7437
+ * determining if it matches or meets specific requirements. Used for classification,
7438
+ * validation, filtering, and quality assessment of inputs.
7437
7439
  *
7438
7440
  * @public exported from `@promptbook/core`
7439
7441
  */
7440
7442
  const MatcherFormfactorDefinition = {
7441
7443
  name: 'EXPERIMENTAL_MATCHER',
7442
- description: `@@@`,
7444
+ description: `An evaluation system that determines whether content meets specific criteria or patterns.
7445
+ Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.`,
7443
7446
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/177`,
7444
7447
  pipelineInterface: {
7445
7448
  inputParameters: [
7446
- /* @@@ */
7449
+ /* Input parameters for content to be matched and criteria to match against */
7447
7450
  {
7448
7451
  name: 'nonce',
7449
7452
  description: 'Just to prevent EXPERIMENTAL_MATCHER to be set as implicit formfactor',
@@ -7452,7 +7455,7 @@ const MatcherFormfactorDefinition = {
7452
7455
  },
7453
7456
  ],
7454
7457
  outputParameters: [
7455
- /* @@@ */
7458
+ /* Output parameters containing match results, confidence scores, and relevant metadata */
7456
7459
  ],
7457
7460
  },
7458
7461
  };
@@ -7489,13 +7492,16 @@ const SheetsFormfactorDefinition = {
7489
7492
  };
7490
7493
 
7491
7494
  /**
7492
- * Translator is form of app that @@@
7495
+ * Translator is form of app that transforms input text from one form to another,
7496
+ * such as language translation, style conversion, tone modification, or other text transformations.
7493
7497
  *
7494
7498
  * @public exported from `@promptbook/core`
7495
7499
  */
7496
7500
  const TranslatorFormfactorDefinition = {
7497
7501
  name: 'TRANSLATOR',
7498
- description: `@@@`,
7502
+ description: `A text transformation system that converts input content into different forms,
7503
+ including language translations, paraphrasing, style conversions, and tone adjustments.
7504
+ This form factor takes one input and produces one transformed output.`,
7499
7505
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/175`,
7500
7506
  pipelineInterface: {
7501
7507
  inputParameters: [
@@ -8622,7 +8628,10 @@ function parseCommand(raw, usagePlace) {
8622
8628
  `));
8623
8629
  }
8624
8630
  /**
8625
- * @@@
8631
+ * Generates a markdown-formatted message listing all supported commands
8632
+ * with their descriptions and documentation links
8633
+ *
8634
+ * @returns A formatted markdown string containing all available commands and their details
8626
8635
  */
8627
8636
  function getSupportedCommandsMessage() {
8628
8637
  return COMMANDS.flatMap(({ name, aliasNames, description, documentationUrl }) =>
@@ -8633,7 +8642,10 @@ function getSupportedCommandsMessage() {
8633
8642
  ]).join('\n');
8634
8643
  }
8635
8644
  /**
8636
- * @@@
8645
+ * Attempts to parse a command variant using the provided input parameters
8646
+ *
8647
+ * @param input Object containing command parsing information including raw command text and normalized values
8648
+ * @returns A parsed Command object if successful, or null if the command cannot be parsed
8637
8649
  */
8638
8650
  function parseCommandVariant(input) {
8639
8651
  const { commandNameRaw, usagePlace, normalized, args, raw, rawArgs } = input;
@@ -10494,11 +10506,16 @@ function $registeredLlmToolsMessage() {
10494
10506
  */
10495
10507
 
10496
10508
  /**
10497
- * @@@
10509
+ * Creates LLM execution tools from provided configuration objects
10510
+ *
10511
+ * Instantiates and configures LLM tool instances for each configuration entry,
10512
+ * combining them into a unified interface via MultipleLlmExecutionTools.
10498
10513
  *
10499
10514
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
10500
10515
  *
10501
- * @returns @@@
10516
+ * @param configuration Array of LLM tool configurations to instantiate
10517
+ * @param options Additional options for configuring the LLM tools
10518
+ * @returns A unified interface combining all successfully instantiated LLM tools
10502
10519
  * @public exported from `@promptbook/core`
10503
10520
  */
10504
10521
  function createLlmToolsFromConfiguration(configuration, options = {}) {
@@ -10537,7 +10554,11 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
10537
10554
  /**
10538
10555
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
10539
10556
  * TODO: [🧠][🎌] Dynamically install required providers
10540
- * TODO: @@@ write discussion about this - wizzard
10557
+ * TODO: We should implement an interactive configuration wizard that would:
10558
+ * 1. Detect which LLM providers are available in the environment
10559
+ * 2. Guide users through required configuration settings for each provider
10560
+ * 3. Allow testing connections before completing setup
10561
+ * 4. Generate appropriate configuration code for application integration
10541
10562
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
10542
10563
  * TODO: [🧠] Is there some meaningfull way how to test this util
10543
10564
  * TODO: This should be maybe not under `_common` but under `utils`
@@ -10675,20 +10696,34 @@ function cacheLlmTools(llmTools, options = {}) {
10675
10696
  const callCommonModel = async (prompt) => {
10676
10697
  const { parameters, content, modelRequirements } = prompt;
10677
10698
  // <- Note: These are relevant things from the prompt that the cache key should depend on.
10699
+ // TODO: Maybe some standalone function for normalization of content for cache
10700
+ let normalizedContent = content;
10701
+ normalizedContent = normalizedContent.replace(/\s+/g, ' ');
10702
+ normalizedContent = normalizedContent.split('\r\n').join('\n');
10703
+ normalizedContent = spaceTrim(normalizedContent);
10704
+ // Note: Do not need to save everything in the cache, just the relevant parameters
10705
+ const relevantParameterNames = extractParameterNames(content);
10706
+ const relevantParameters = Object.fromEntries(Object.entries(parameters).filter(([key]) => relevantParameterNames.has(key)));
10707
+ const keyHashBase = { relevantParameters, normalizedContent, modelRequirements };
10678
10708
  const key = titleToName(prompt.title.substring(0, MAX_FILENAME_LENGTH - 10) +
10679
10709
  '-' +
10680
- sha256(hexEncoder.parse(JSON.stringify({ parameters, content, modelRequirements }))).toString( /* hex */));
10710
+ sha256(hexEncoder.parse(JSON.stringify(keyHashBase)))
10711
+ .toString( /* hex */)
10712
+ .substring(0, 10 - 1));
10681
10713
  const cacheItem = !isCacheReloaded ? await storage.getItem(key) : null;
10682
10714
  if (cacheItem) {
10683
- console.log('!!! Cache hit for key:', key);
10715
+ console.log('!!! Cache hit for key:', { key, keyHashBase });
10684
10716
  return cacheItem.promptResult;
10685
10717
  }
10686
10718
  console.log('!!! Cache miss for key:', key, {
10687
10719
  prompt,
10688
10720
  'prompt.title': prompt.title,
10689
10721
  MAX_FILENAME_LENGTH,
10722
+ keyHashBase,
10690
10723
  parameters,
10724
+ relevantParameters,
10691
10725
  content,
10726
+ normalizedContent,
10692
10727
  modelRequirements,
10693
10728
  });
10694
10729
  let promptResult;
@@ -10711,7 +10746,16 @@ function cacheLlmTools(llmTools, options = {}) {
10711
10746
  await storage.setItem(key, {
10712
10747
  date: $getCurrentDate(),
10713
10748
  promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
10714
- prompt,
10749
+ bookVersion: BOOK_LANGUAGE_VERSION,
10750
+ prompt: {
10751
+ ...prompt,
10752
+ parameters: Object.entries(parameters).length === Object.entries(relevantParameters).length
10753
+ ? parameters
10754
+ : {
10755
+ ...relevantParameters,
10756
+ note: `<- Note: Only relevant parameters are stored in the cache`,
10757
+ },
10758
+ },
10715
10759
  promptResult,
10716
10760
  });
10717
10761
  return promptResult;