@promptbook/cli 0.68.4 → 0.68.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/esm/index.es.js +35 -35
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  4. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  5. package/esm/typings/src/config.d.ts +2 -2
  6. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  7. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  8. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  9. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  10. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -1
  12. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  14. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -1
  16. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +2 -1
  17. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  18. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  19. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  20. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  21. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  22. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  23. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  24. package/package.json +1 -1
  25. package/umd/index.umd.js +35 -35
  26. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -20,8 +20,8 @@ import OpenAI from 'openai';
20
20
  /**
21
21
  * The version of the Promptbook library
22
22
  */
23
- var PROMPTBOOK_VERSION = '0.68.3';
24
- // TODO: !!!! List here all the versions and annotate + put into script
23
+ var PROMPTBOOK_VERSION = '0.68.4';
24
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
25
25
 
26
26
  /*! *****************************************************************************
27
27
  Copyright (c) Microsoft Corporation.
@@ -356,7 +356,7 @@ function checkSerializableAsJson(name, value) {
356
356
  }
357
357
  /**
358
358
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
359
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
359
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
360
360
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
361
361
  */
362
362
 
@@ -660,7 +660,7 @@ function pipelineJsonToString(pipelineJson) {
660
660
  commands.push("PIPELINE URL ".concat(pipelineUrl));
661
661
  }
662
662
  commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
663
- // TODO: !!! This increase size of the bundle and is probbably not necessary
663
+ // TODO:[main] !!! This increase size of the bundle and is probbably not necessary
664
664
  pipelineString = prettifyMarkdown(pipelineString);
665
665
  try {
666
666
  for (var _g = __values(parameters.filter(function (_a) {
@@ -808,12 +808,12 @@ function pipelineJsonToString(pipelineJson) {
808
808
  pipelineString += '```' + contentLanguage;
809
809
  pipelineString += '\n';
810
810
  pipelineString += spaceTrim$1(content);
811
- // <- TODO: !!! Escape
811
+ // <- TODO:[main] !!! Escape
812
812
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
813
813
  pipelineString += '\n';
814
814
  pipelineString += '```';
815
815
  pipelineString += '\n\n';
816
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: !!! If the parameter here has description, add it and use templateParameterJsonToString
816
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO:[main] !!! If the parameter here has description, add it and use templateParameterJsonToString
817
817
  }
818
818
  }
819
819
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -1115,7 +1115,7 @@ function isValidPromptbookVersion(version) {
1115
1115
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
1116
1116
  return false;
1117
1117
  }
1118
- // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1118
+ // <- TODO:[main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1119
1119
  return true;
1120
1120
  }
1121
1121
 
@@ -1459,11 +1459,11 @@ function validatePipeline(pipeline) {
1459
1459
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1460
1460
  */
1461
1461
  /**
1462
- * TODO: [🐣] !!!! Validate that all samples match expectations
1463
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1464
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1465
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1466
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1462
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
1463
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
1464
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
1465
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1466
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
1467
1467
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1468
1468
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1469
1469
  */
@@ -2748,7 +2748,7 @@ function isPipelinePrepared(pipeline) {
2748
2748
  return true;
2749
2749
  }
2750
2750
  /**
2751
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2751
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2752
2752
  * TODO: [🐠] Maybe base this on `makeValidator`
2753
2753
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2754
2754
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -3127,7 +3127,7 @@ function createPipelineExecutor(options) {
3127
3127
  console.warn(spaceTrim(function (block) { return "\n Pipeline is not prepared\n\n ".concat(block(pipelineIdentification), "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n "); }));
3128
3128
  }
3129
3129
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
3130
- // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3130
+ // TODO:[main] !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3131
3131
  function getContextForTemplate(template) {
3132
3132
  return __awaiter(this, void 0, void 0, function () {
3133
3133
  return __generator(this, function (_a) {
@@ -3946,7 +3946,7 @@ function createPipelineExecutor(options) {
3946
3946
  return pipelineExecutor;
3947
3947
  }
3948
3948
  /**
3949
- * TODO: !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3949
+ * TODO:[main] !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3950
3950
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3951
3951
  * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3952
3952
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
@@ -4009,7 +4009,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4009
4009
  outputParameters = result.outputParameters;
4010
4010
  knowledgePiecesRaw = outputParameters.knowledgePieces;
4011
4011
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
4012
- // <- TODO: !!!!! Smarter split and filter out empty pieces
4012
+ // <- TODO:[main] !!!!! Smarter split and filter out empty pieces
4013
4013
  if (isVerbose) {
4014
4014
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
4015
4015
  }
@@ -4089,7 +4089,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4089
4089
  });
4090
4090
  }
4091
4091
  /**
4092
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
4092
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
4093
4093
  * TODO: [🪂] Do it in parallel 11:11
4094
4094
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
4095
4095
  */
@@ -4113,7 +4113,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
4113
4113
  var partialPieces, pieces;
4114
4114
  return __generator(this, function (_a) {
4115
4115
  switch (_a.label) {
4116
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4116
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4117
4117
  options)];
4118
4118
  case 1:
4119
4119
  partialPieces = _a.sent();
@@ -4305,7 +4305,7 @@ function preparePersona(personaDescription, options) {
4305
4305
  });
4306
4306
  }
4307
4307
  /**
4308
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4308
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4309
4309
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
4310
4310
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
4311
4311
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -4354,7 +4354,7 @@ function prepareTemplates(pipeline, options) {
4354
4354
  case 0:
4355
4355
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
4356
4356
  templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
4357
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
4357
+ // TODO:[main] !!!!! Apply samples to each template (if missing and is for the template defined)
4358
4358
  TODO_USE(parameters);
4359
4359
  templatesPrepared = new Array(
4360
4360
  // <- TODO: [🧱] Implement in a functional (not new Class) way
@@ -4386,7 +4386,7 @@ function prepareTemplates(pipeline, options) {
4386
4386
  /**
4387
4387
  * TODO: [🧠] Add context to each template (if missing)
4388
4388
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
4389
- * TODO: [♨] !!! Prepare index the samples and maybe templates
4389
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
4390
4390
  * TODO: Write tests for `preparePipeline`
4391
4391
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
4392
4392
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -4558,7 +4558,7 @@ var knowledgeCommandParser = {
4558
4558
  if (sourceContent === '') {
4559
4559
  throw new ParseError("Source is not defined");
4560
4560
  }
4561
- // TODO: !!!! Following checks should be applied every link in the `sourceContent`
4561
+ // TODO:[main] !!!! Following checks should be applied every link in the `sourceContent`
4562
4562
  if (sourceContent.startsWith('http://')) {
4563
4563
  throw new ParseError("Source is not secure");
4564
4564
  }
@@ -4761,7 +4761,7 @@ var templateCommandParser = {
4761
4761
  if (command.templateType === 'KNOWLEDGE') {
4762
4762
  knowledgeCommandParser.$applyToPipelineJson({
4763
4763
  type: 'KNOWLEDGE',
4764
- sourceContent: $templateJson.content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4764
+ sourceContent: $templateJson.content, // <- TODO: [🐝][main] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4765
4765
  }, $pipelineJson);
4766
4766
  $templateJson.isTemplate = false;
4767
4767
  return;
@@ -6984,7 +6984,7 @@ function pipelineStringToJsonSync(pipelineString) {
6984
6984
  return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
6985
6985
  }
6986
6986
  /**
6987
- * TODO: !!!! Warn if used only sync version
6987
+ * TODO:[main] !!!! Warn if used only sync version
6988
6988
  * TODO: [🚞] Report here line/column of error
6989
6989
  * TODO: Use spaceTrim more effectively
6990
6990
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -7489,7 +7489,7 @@ function isSerializableAsJson(value) {
7489
7489
  }
7490
7490
  }
7491
7491
  /**
7492
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
7492
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
7493
7493
  * TODO: [🧠][💺] Can be done this on type-level?
7494
7494
  */
7495
7495
 
@@ -8330,7 +8330,7 @@ function initializeMakeCommand(program) {
8330
8330
  });
8331
8331
  }
8332
8332
  /**
8333
- * TODO: [🥃] !!! Allow `ptbk make` without configuring any llm tools
8333
+ * TODO: [🥃][main] !!! Allow `ptbk make` without configuring any llm tools
8334
8334
  * TODO: Maybe remove this command - "about" command should be enough?
8335
8335
  * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types
8336
8336
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -8809,7 +8809,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
8809
8809
  socket.on('connect', function () {
8810
8810
  resolve(socket);
8811
8811
  });
8812
- // TODO: !!!! Better timeout handling
8812
+ // TODO:[main] !!!! Better timeout handling
8813
8813
  setTimeout(function () {
8814
8814
  reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
8815
8815
  }, CONNECTION_TIMEOUT_MS);
@@ -8989,11 +8989,11 @@ var ANTHROPIC_CLAUDE_MODELS = $asDeeplyFrozenSerializableJson('ANTHROPIC_CLAUDE_
8989
8989
  output: computeUsage("$2.40 / 1M tokens"),
8990
8990
  },
8991
8991
  },
8992
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
8992
+ // TODO:[main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
8993
8993
  ]);
8994
8994
  /**
8995
8995
  * Note: [🤖] Add models of new variant
8996
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
8996
+ * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
8997
8997
  * TODO: [🧠] Some mechanism to propagate unsureness
8998
8998
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
8999
8999
  * TODO: [🎰] Some mechanism to auto-update available models
@@ -9349,8 +9349,8 @@ var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
9349
9349
  className: 'AnthropicClaudeExecutionTools',
9350
9350
  });
9351
9351
  /**
9352
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
9353
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
9352
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
9353
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
9354
9354
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
9355
9355
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
9356
9356
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
@@ -9728,7 +9728,7 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
9728
9728
  prompt: computeUsage("$5.00 / 1M tokens"),
9729
9729
  output: computeUsage("$15.00 / 1M tokens"),
9730
9730
  },
9731
- //TODO: !!!!!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
9731
+ //TODO:[main] !!!!!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
9732
9732
  },
9733
9733
  /**/
9734
9734
  /**/
@@ -9758,7 +9758,7 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
9758
9758
  modelVariant: 'CHAT',
9759
9759
  modelTitle: 'o1-preview-2024-09-12',
9760
9760
  modelName: 'o1-preview-2024-09-12',
9761
- // <- TODO: !!!!!! Some better system to organize theese date suffixes and versions
9761
+ // <- TODO:[main] !!!!!! Some better system to organize theese date suffixes and versions
9762
9762
  pricing: {
9763
9763
  prompt: computeUsage("$15.00 / 1M tokens"),
9764
9764
  output: computeUsage("$60.00 / 1M tokens"),
@@ -9876,7 +9876,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
9876
9876
  AzureOpenAiExecutionTools.prototype.listModels = function () {
9877
9877
  return __awaiter(this, void 0, void 0, function () {
9878
9878
  return __generator(this, function (_a) {
9879
- // TODO: !!! Do here some filtering which models are really available as deployment
9879
+ // TODO:[main] !!! Do here some filtering which models are really available as deployment
9880
9880
  // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
9881
9881
  return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
9882
9882
  var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
@@ -10565,7 +10565,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
10565
10565
  * @public exported from `@promptbook/openai`
10566
10566
  */
10567
10567
  var createOpenAiExecutionTools = Object.assign(function (options) {
10568
- // TODO: [🧠] !!!! If browser, auto add `dangerouslyAllowBrowser`
10568
+ // TODO: [🧠][main] !!!! If browser, auto add `dangerouslyAllowBrowser`
10569
10569
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
10570
10570
  options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
10571
10571
  }