@promptbook/cli 0.92.0-20 β†’ 0.92.0-22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -47,7 +47,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
47
47
  * @generated
48
48
  * @see https://github.com/webgptorg/promptbook
49
49
  */
50
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-20';
50
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-22';
51
51
  /**
52
52
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
53
53
  * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
@@ -346,6 +346,14 @@ const DEFAULT_IS_AUTO_INSTALLED = false;
346
346
  * @public exported from `@promptbook/core`
347
347
  */
348
348
  const DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME = `getPipelineCollection`;
349
+ /**
350
+ * Default rate limits (requests per minute)
351
+ *
352
+ * Note: Adjust based on the provider tier you are have
353
+ *
354
+ * @public exported from `@promptbook/core`
355
+ */
356
+ const DEFAULT_RPM$1 = 60;
349
357
  /**
350
358
  * @@@
351
359
  *
@@ -1034,15 +1042,12 @@ function jsonParse(value) {
1034
1042
  }
1035
1043
  throw new Error(spaceTrim((block) => `
1036
1044
  ${block(error.message)}
1037
-
1045
+
1038
1046
  The JSON text:
1039
1047
  ${block(value)}
1040
1048
  `));
1041
1049
  }
1042
1050
  }
1043
- /**
1044
- * TODO: !!!! Use in Promptbook.studio
1045
- */
1046
1051
 
1047
1052
  /**
1048
1053
  * Convert identification to Promptbook token
@@ -5460,7 +5465,8 @@ const CsvFormatParser = {
5460
5465
  subvalueParsers: [
5461
5466
  {
5462
5467
  subvalueName: 'ROW',
5463
- async mapValues(value, outputParameterName, settings, mapCallback) {
5468
+ async mapValues(options) {
5469
+ const { value, outputParameterName, settings, mapCallback, onProgress } = options;
5464
5470
  const csv = csvParse(value, settings);
5465
5471
  if (csv.errors.length !== 0) {
5466
5472
  throw new CsvFormatError(spaceTrim((block) => `
@@ -5476,21 +5482,29 @@ const CsvFormatParser = {
5476
5482
  ${block(value)}
5477
5483
  `));
5478
5484
  }
5479
- const mappedData = await Promise.all(csv.data.map(async (row, index) => {
5485
+ const mappedData = [];
5486
+ for (let index = 0; index < csv.data.length; index++) {
5487
+ const row = csv.data[index];
5480
5488
  if (row[outputParameterName]) {
5481
5489
  throw new CsvFormatError(`Can not overwrite existing column "${outputParameterName}" in CSV row`);
5482
5490
  }
5483
- return {
5491
+ const mappedRow = {
5484
5492
  ...row,
5485
5493
  [outputParameterName]: await mapCallback(row, index),
5486
5494
  };
5487
- }));
5495
+ mappedData.push(mappedRow);
5496
+ if (onProgress) {
5497
+ // Note: Report the CSV with all rows mapped so far
5498
+ await onProgress(unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS }));
5499
+ }
5500
+ }
5488
5501
  return unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS });
5489
5502
  },
5490
5503
  },
5491
5504
  {
5492
5505
  subvalueName: 'CELL',
5493
- async mapValues(value, outputParameterName, settings, mapCallback) {
5506
+ async mapValues(options) {
5507
+ const { value, settings, mapCallback, onProgress } = options;
5494
5508
  const csv = csvParse(value, settings);
5495
5509
  if (csv.errors.length !== 0) {
5496
5510
  throw new CsvFormatError(spaceTrim((block) => `
@@ -5577,7 +5591,8 @@ const TextFormatParser = {
5577
5591
  subvalueParsers: [
5578
5592
  {
5579
5593
  subvalueName: 'LINE',
5580
- async mapValues(value, outputParameterName, settings, mapCallback) {
5594
+ async mapValues(options) {
5595
+ const { value, mapCallback, onProgress } = options;
5581
5596
  const lines = value.split('\n');
5582
5597
  const mappedLines = await Promise.all(lines.map((lineContent, lineNumber) =>
5583
5598
  // TODO: [🧠] Maybe option to skip empty line
@@ -6417,7 +6432,7 @@ async function executeAttempts(options) {
6417
6432
  * @private internal utility of `createPipelineExecutor`
6418
6433
  */
6419
6434
  async function executeFormatSubvalues(options) {
6420
- const { task, jokerParameterNames, parameters, priority, csvSettings, pipelineIdentification } = options;
6435
+ const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, pipelineIdentification } = options;
6421
6436
  if (task.foreach === undefined) {
6422
6437
  return /* not await */ executeAttempts(options);
6423
6438
  }
@@ -6471,21 +6486,32 @@ async function executeFormatSubvalues(options) {
6471
6486
  formatSettings = csvSettings;
6472
6487
  // <- TODO: [πŸ€Ήβ€β™‚οΈ] More universal, make simmilar pattern for other formats for example \n vs \r\n in text
6473
6488
  }
6474
- const resultString = await subvalueParser.mapValues(parameterValue, task.foreach.outputSubparameterName, formatSettings, async (subparameters, index) => {
6475
- let mappedParameters;
6476
- // TODO: [πŸ€Ήβ€β™‚οΈ][πŸͺ‚] Limit to N concurrent executions
6477
- // TODO: When done [🐚] Report progress also for each subvalue here
6478
- try {
6479
- mappedParameters = mapAvailableToExpectedParameters({
6480
- expectedParameters: Object.fromEntries(task.foreach.inputSubparameterNames.map((subparameterName) => [subparameterName, null])),
6481
- availableParameters: subparameters,
6482
- });
6483
- }
6484
- catch (error) {
6485
- if (!(error instanceof PipelineExecutionError)) {
6486
- throw error;
6489
+ const resultString = await subvalueParser.mapValues({
6490
+ value: parameterValue,
6491
+ outputParameterName: task.foreach.outputSubparameterName,
6492
+ settings: formatSettings,
6493
+ onProgress(partialResultString) {
6494
+ return onProgress(Object.freeze({
6495
+ [task.resultingParameterName]:
6496
+ // <- Note: [πŸ‘©β€πŸ‘©β€πŸ‘§] No need to detect parameter collision here because pipeline checks logic consistency during construction
6497
+ partialResultString,
6498
+ }));
6499
+ },
6500
+ async mapCallback(subparameters, index) {
6501
+ let mappedParameters;
6502
+ // TODO: [πŸ€Ήβ€β™‚οΈ][πŸͺ‚] Limit to N concurrent executions
6503
+ // TODO: When done [🐚] Report progress also for each subvalue here
6504
+ try {
6505
+ mappedParameters = mapAvailableToExpectedParameters({
6506
+ expectedParameters: Object.fromEntries(task.foreach.inputSubparameterNames.map((subparameterName) => [subparameterName, null])),
6507
+ availableParameters: subparameters,
6508
+ });
6487
6509
  }
6488
- throw new PipelineExecutionError(spaceTrim((block) => `
6510
+ catch (error) {
6511
+ if (!(error instanceof PipelineExecutionError)) {
6512
+ throw error;
6513
+ }
6514
+ throw new PipelineExecutionError(spaceTrim((block) => `
6489
6515
  ${error.message}
6490
6516
 
6491
6517
  This is error in FOREACH command
@@ -6494,23 +6520,24 @@ async function executeFormatSubvalues(options) {
6494
6520
  ${block(pipelineIdentification)}
6495
6521
  Subparameter index: ${index}
6496
6522
  `));
6497
- }
6498
- const allSubparameters = {
6499
- ...parameters,
6500
- ...mappedParameters,
6501
- };
6502
- // Note: [πŸ‘¨β€πŸ‘¨β€πŸ‘§] Now we can freeze `subparameters` because we are sure that all and only used parameters are defined and are not going to be changed
6503
- Object.freeze(allSubparameters);
6504
- const subresultString = await executeAttempts({
6505
- ...options,
6506
- priority: priority + index,
6507
- parameters: allSubparameters,
6508
- pipelineIdentification: spaceTrim((block) => `
6523
+ }
6524
+ const allSubparameters = {
6525
+ ...parameters,
6526
+ ...mappedParameters,
6527
+ };
6528
+ // Note: [πŸ‘¨β€πŸ‘¨β€πŸ‘§] Now we can freeze `subparameters` because we are sure that all and only used parameters are defined and are not going to be changed
6529
+ Object.freeze(allSubparameters);
6530
+ const subresultString = await executeAttempts({
6531
+ ...options,
6532
+ priority: priority + index,
6533
+ parameters: allSubparameters,
6534
+ pipelineIdentification: spaceTrim((block) => `
6509
6535
  ${block(pipelineIdentification)}
6510
6536
  Subparameter index: ${index}
6511
6537
  `),
6512
- });
6513
- return subresultString;
6538
+ });
6539
+ return subresultString;
6540
+ },
6514
6541
  });
6515
6542
  return resultString;
6516
6543
  }
@@ -6684,11 +6711,6 @@ async function getReservedParametersForTask(options) {
6684
6711
  async function executeTask(options) {
6685
6712
  const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
6686
6713
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
6687
- await onProgress({
6688
- outputParameters: {
6689
- [currentTask.resultingParameterName]: '', // <- TODO: [🧠] What is the best value here?
6690
- },
6691
- });
6692
6714
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
6693
6715
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
6694
6716
  const dependentParameterNames = new Set(currentTask.dependentParameterNames);
@@ -6763,6 +6785,7 @@ async function executeTask(options) {
6763
6785
  preparedPipeline,
6764
6786
  tools,
6765
6787
  $executionReport,
6788
+ onProgress,
6766
6789
  pipelineIdentification,
6767
6790
  maxExecutionAttempts,
6768
6791
  maxParallelCount,
@@ -8892,6 +8915,43 @@ const ChatbotFormfactorDefinition = {
8892
8915
  },
8893
8916
  };
8894
8917
 
8918
+ /**
8919
+ * Completion is formfactor that emulates completion models
8920
+ *
8921
+ * @public exported from `@promptbook/core`
8922
+ */
8923
+ const CompletionFormfactorDefinition = {
8924
+ name: 'COMPLETION',
8925
+ description: `@@@`,
8926
+ documentationUrl: `https://github.com/webgptorg/promptbook/discussions/@@`,
8927
+ // <- TODO: https://github.com/webgptorg/promptbook/discussions/new?category=concepts
8928
+ // "πŸ”  Completion Formfactor"
8929
+ pipelineInterface: {
8930
+ inputParameters: [
8931
+ {
8932
+ name: 'inputText',
8933
+ description: `Input text to be completed`,
8934
+ isInput: true,
8935
+ isOutput: false,
8936
+ },
8937
+ {
8938
+ name: 'instructions',
8939
+ description: `Additional instructions for the model, for example the required length, empty by default`,
8940
+ isInput: true,
8941
+ isOutput: false,
8942
+ },
8943
+ ],
8944
+ outputParameters: [
8945
+ {
8946
+ name: 'followingText',
8947
+ description: `Text that follows the input text`,
8948
+ isInput: false,
8949
+ isOutput: true,
8950
+ },
8951
+ ],
8952
+ },
8953
+ };
8954
+
8895
8955
  /**
8896
8956
  * Generator is form of app that @@@
8897
8957
  *
@@ -9076,6 +9136,8 @@ const FORMFACTOR_DEFINITIONS = [
9076
9136
  MatcherFormfactorDefinition,
9077
9137
  GeneratorFormfactorDefinition,
9078
9138
  ImageGeneratorFormfactorDefinition,
9139
+ CompletionFormfactorDefinition,
9140
+ // <- [πŸ›¬] When making new formfactor, copy the _boilerplate and link it here
9079
9141
  ];
9080
9142
  /**
9081
9143
  * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
@@ -15818,7 +15880,7 @@ const OPENAI_MODELS = exportJson({
15818
15880
  */
15819
15881
 
15820
15882
  // Default rate limits (requests per minute) - adjust as needed based on Azure OpenAI tier
15821
- const DEFAULT_RPM$1 = 60;
15883
+ const DEFAULT_RPM = 60;
15822
15884
  // <- TODO: !!! Put in some better place
15823
15885
  /**
15824
15886
  * Execution Tools for calling Azure OpenAI API.
@@ -15839,7 +15901,7 @@ class AzureOpenAiExecutionTools {
15839
15901
  this.client = null;
15840
15902
  // TODO: Allow configuring rate limits via options
15841
15903
  this.limiter = new Bottleneck({
15842
- minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_RPM$1),
15904
+ minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_RPM),
15843
15905
  });
15844
15906
  }
15845
15907
  get title() {
@@ -16755,6 +16817,7 @@ const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
16755
16817
  className: 'OpenAiExecutionTools',
16756
16818
  options: {
16757
16819
  apiKey: 'sk-',
16820
+ maxRequestsPerMinute: DEFAULT_RPM$1,
16758
16821
  },
16759
16822
  };
16760
16823
  },
@@ -16868,9 +16931,6 @@ resultContent, rawResponse) {
16868
16931
  * TODO: [🀝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
16869
16932
  */
16870
16933
 
16871
- // Default rate limits (requests per minute) - adjust as needed based on OpenAI tier
16872
- const DEFAULT_RPM = 60;
16873
- // <- TODO: !!! Put in some better place
16874
16934
  /**
16875
16935
  * Execution Tools for calling OpenAI API
16876
16936
  *
@@ -16890,7 +16950,7 @@ class OpenAiExecutionTools {
16890
16950
  this.client = null;
16891
16951
  // TODO: Allow configuring rate limits via options
16892
16952
  this.limiter = new Bottleneck({
16893
- minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_RPM),
16953
+ minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_RPM$1),
16894
16954
  });
16895
16955
  }
16896
16956
  get title() {
@@ -17240,6 +17300,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
17240
17300
  constructor(options) {
17241
17301
  super(options);
17242
17302
  this.assistantId = options.assistantId;
17303
+ // TODO: !!!! Make limiter same as in OpenAiExecutionTools
17243
17304
  }
17244
17305
  get title() {
17245
17306
  return 'OpenAI Assistant';