@promptbook/node 0.71.0-0 → 0.72.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/esm/index.es.js +157 -128
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  7. package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +1 -1
  8. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +1 -1
  9. package/esm/typings/src/execution/utils/addUsage.d.ts +0 -56
  10. package/esm/typings/src/execution/utils/usage-constants.d.ts +127 -0
  11. package/esm/typings/src/knowledge/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  12. package/esm/typings/src/knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +3 -2
  15. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +37 -0
  18. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +14 -0
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -2
  20. package/esm/typings/src/llm-providers/openai/createOpenAiAssistantExecutionTools.d.ts +15 -0
  21. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +9 -0
  22. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +9 -0
  23. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  24. package/esm/typings/src/scripting/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +1 -1
  27. package/esm/typings/src/storage/files-storage/FilesStorage.d.ts +1 -1
  28. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +2 -9
  29. package/package.json +2 -2
  30. package/umd/index.umd.js +157 -128
  31. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -15,7 +15,7 @@ import * as dotenv from 'dotenv';
15
15
  /**
16
16
  * The version of the Promptbook library
17
17
  */
18
- var PROMPTBOOK_VERSION = '0.70.0-1';
18
+ var PROMPTBOOK_VERSION = '0.72.0-0';
19
19
  // TODO:[main] !!!! List here all the versions and annotate + put into script
20
20
 
21
21
  /*! *****************************************************************************
@@ -700,127 +700,6 @@ function templateParameterJsonToString(templateParameterJson) {
700
700
  * TODO: [🧠] Should be in generated .ptbk.md file GENERATOR_WARNING
701
701
  */
702
702
 
703
- /**
704
- * @@@
705
- *
706
- * @public exported from `@promptbook/utils`
707
- */
708
- function deepClone(objectValue) {
709
- return JSON.parse(JSON.stringify(objectValue));
710
- /*
711
- TODO: [🧠] Is there a better implementation?
712
- > const propertyNames = Object.getOwnPropertyNames(objectValue);
713
- > for (const propertyName of propertyNames) {
714
- > const value = (objectValue as really_any)[propertyName];
715
- > if (value && typeof value === 'object') {
716
- > deepClone(value);
717
- > }
718
- > }
719
- > return Object.assign({}, objectValue);
720
- */
721
- }
722
- /**
723
- * TODO: [🧠] Is there a way how to meaningfully test this utility
724
- */
725
-
726
- /**
727
- * @@@
728
- *
729
- * @public exported from `@promptbook/core`
730
- */
731
- var ZERO_USAGE = $deepFreeze({
732
- price: { value: 0 },
733
- input: {
734
- tokensCount: { value: 0 },
735
- charactersCount: { value: 0 },
736
- wordsCount: { value: 0 },
737
- sentencesCount: { value: 0 },
738
- linesCount: { value: 0 },
739
- paragraphsCount: { value: 0 },
740
- pagesCount: { value: 0 },
741
- },
742
- output: {
743
- tokensCount: { value: 0 },
744
- charactersCount: { value: 0 },
745
- wordsCount: { value: 0 },
746
- sentencesCount: { value: 0 },
747
- linesCount: { value: 0 },
748
- paragraphsCount: { value: 0 },
749
- pagesCount: { value: 0 },
750
- },
751
- });
752
- /**
753
- * Function `addUsage` will add multiple usages into one
754
- *
755
- * Note: If you provide 0 values, it returns ZERO_USAGE
756
- *
757
- * @public exported from `@promptbook/core`
758
- */
759
- function addUsage() {
760
- var usageItems = [];
761
- for (var _i = 0; _i < arguments.length; _i++) {
762
- usageItems[_i] = arguments[_i];
763
- }
764
- return usageItems.reduce(function (acc, item) {
765
- var e_1, _a, e_2, _b;
766
- var _c;
767
- acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
768
- try {
769
- for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
770
- var key = _e.value;
771
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
772
- //@ts-ignore
773
- if (item.input[key]) {
774
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
775
- //@ts-ignore
776
- acc.input[key].value += item.input[key].value || 0;
777
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
778
- //@ts-ignore
779
- if (item.input[key].isUncertain) {
780
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
781
- //@ts-ignore
782
- acc.input[key].isUncertain = true;
783
- }
784
- }
785
- }
786
- }
787
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
788
- finally {
789
- try {
790
- if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
791
- }
792
- finally { if (e_1) throw e_1.error; }
793
- }
794
- try {
795
- for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
796
- var key = _g.value;
797
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
798
- //@ts-ignore
799
- if (item.output[key]) {
800
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
801
- //@ts-ignore
802
- acc.output[key].value += item.output[key].value || 0;
803
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
804
- //@ts-ignore
805
- if (item.output[key].isUncertain) {
806
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
807
- //@ts-ignore
808
- acc.output[key].isUncertain = true;
809
- }
810
- }
811
- }
812
- }
813
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
814
- finally {
815
- try {
816
- if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
817
- }
818
- finally { if (e_2) throw e_2.error; }
819
- }
820
- return acc;
821
- }, deepClone(ZERO_USAGE));
822
- }
823
-
824
703
  /**
825
704
  * Async version of Array.forEach
826
705
  *
@@ -896,6 +775,59 @@ function forEachAsync(array, options, callbackfunction) {
896
775
  });
897
776
  }
898
777
 
778
+ /**
779
+ * Represents the usage with no resources consumed
780
+ *
781
+ * @public exported from `@promptbook/core`
782
+ */
783
+ var ZERO_USAGE = $deepFreeze({
784
+ price: { value: 0 },
785
+ input: {
786
+ tokensCount: { value: 0 },
787
+ charactersCount: { value: 0 },
788
+ wordsCount: { value: 0 },
789
+ sentencesCount: { value: 0 },
790
+ linesCount: { value: 0 },
791
+ paragraphsCount: { value: 0 },
792
+ pagesCount: { value: 0 },
793
+ },
794
+ output: {
795
+ tokensCount: { value: 0 },
796
+ charactersCount: { value: 0 },
797
+ wordsCount: { value: 0 },
798
+ sentencesCount: { value: 0 },
799
+ linesCount: { value: 0 },
800
+ paragraphsCount: { value: 0 },
801
+ pagesCount: { value: 0 },
802
+ },
803
+ });
804
+ /**
805
+ * Represents the usage with unknown resources consumed
806
+ *
807
+ * @public exported from `@promptbook/core`
808
+ */
809
+ $deepFreeze({
810
+ price: { value: 0, isUncertain: true },
811
+ input: {
812
+ tokensCount: { value: 0, isUncertain: true },
813
+ charactersCount: { value: 0, isUncertain: true },
814
+ wordsCount: { value: 0, isUncertain: true },
815
+ sentencesCount: { value: 0, isUncertain: true },
816
+ linesCount: { value: 0, isUncertain: true },
817
+ paragraphsCount: { value: 0, isUncertain: true },
818
+ pagesCount: { value: 0, isUncertain: true },
819
+ },
820
+ output: {
821
+ tokensCount: { value: 0, isUncertain: true },
822
+ charactersCount: { value: 0, isUncertain: true },
823
+ wordsCount: { value: 0, isUncertain: true },
824
+ sentencesCount: { value: 0, isUncertain: true },
825
+ linesCount: { value: 0, isUncertain: true },
826
+ paragraphsCount: { value: 0, isUncertain: true },
827
+ pagesCount: { value: 0, isUncertain: true },
828
+ },
829
+ });
830
+
899
831
  var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
900
832
 
901
833
  /**
@@ -2420,6 +2352,101 @@ function arrayableToArray(input) {
2420
2352
  return [input];
2421
2353
  }
2422
2354
 
2355
+ /**
2356
+ * @@@
2357
+ *
2358
+ * @public exported from `@promptbook/utils`
2359
+ */
2360
+ function deepClone(objectValue) {
2361
+ return JSON.parse(JSON.stringify(objectValue));
2362
+ /*
2363
+ TODO: [🧠] Is there a better implementation?
2364
+ > const propertyNames = Object.getOwnPropertyNames(objectValue);
2365
+ > for (const propertyName of propertyNames) {
2366
+ > const value = (objectValue as really_any)[propertyName];
2367
+ > if (value && typeof value === 'object') {
2368
+ > deepClone(value);
2369
+ > }
2370
+ > }
2371
+ > return Object.assign({}, objectValue);
2372
+ */
2373
+ }
2374
+ /**
2375
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
2376
+ */
2377
+
2378
+ /**
2379
+ * Function `addUsage` will add multiple usages into one
2380
+ *
2381
+ * Note: If you provide 0 values, it returns ZERO_USAGE
2382
+ *
2383
+ * @public exported from `@promptbook/core`
2384
+ */
2385
+ function addUsage() {
2386
+ var usageItems = [];
2387
+ for (var _i = 0; _i < arguments.length; _i++) {
2388
+ usageItems[_i] = arguments[_i];
2389
+ }
2390
+ return usageItems.reduce(function (acc, item) {
2391
+ var e_1, _a, e_2, _b;
2392
+ var _c;
2393
+ acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
2394
+ try {
2395
+ for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
2396
+ var key = _e.value;
2397
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2398
+ //@ts-ignore
2399
+ if (item.input[key]) {
2400
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2401
+ //@ts-ignore
2402
+ acc.input[key].value += item.input[key].value || 0;
2403
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2404
+ //@ts-ignore
2405
+ if (item.input[key].isUncertain) {
2406
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2407
+ //@ts-ignore
2408
+ acc.input[key].isUncertain = true;
2409
+ }
2410
+ }
2411
+ }
2412
+ }
2413
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2414
+ finally {
2415
+ try {
2416
+ if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
2417
+ }
2418
+ finally { if (e_1) throw e_1.error; }
2419
+ }
2420
+ try {
2421
+ for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
2422
+ var key = _g.value;
2423
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2424
+ //@ts-ignore
2425
+ if (item.output[key]) {
2426
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2427
+ //@ts-ignore
2428
+ acc.output[key].value += item.output[key].value || 0;
2429
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2430
+ //@ts-ignore
2431
+ if (item.output[key].isUncertain) {
2432
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2433
+ //@ts-ignore
2434
+ acc.output[key].isUncertain = true;
2435
+ }
2436
+ }
2437
+ }
2438
+ }
2439
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
2440
+ finally {
2441
+ try {
2442
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
2443
+ }
2444
+ finally { if (e_2) throw e_2.error; }
2445
+ }
2446
+ return acc;
2447
+ }, deepClone(ZERO_USAGE));
2448
+ }
2449
+
2423
2450
  /**
2424
2451
  * Parses the given script and returns the list of all used variables that are not defined in the script
2425
2452
  *
@@ -3779,12 +3806,12 @@ function executeFormatCells(options) {
3779
3806
  if (!(error instanceof PipelineExecutionError)) {
3780
3807
  throw error;
3781
3808
  }
3782
- throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n ".concat(error.message, "\n\n This is error in FOREACH command\n You have probbably passed wrong data to pipeline or wrong data was generated which are processed by FOREACH command\n\n ").concat(block(pipelineIdentification), "\n "); }));
3809
+ throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n ".concat(error.message, "\n\n This is error in FOREACH command\n You have probbably passed wrong data to pipeline or wrong data was generated which are processed by FOREACH command\n\n ").concat(block(pipelineIdentification), "\n Subparameter index: ").concat(index, "\n "); }));
3783
3810
  }
3784
3811
  allSubparameters = __assign(__assign({}, parameters), mappedParameters);
3785
3812
  // Note: [👨‍👨‍👧] Now we can freeze `subparameters` because we are sure that all and only used parameters are defined and are not going to be changed
3786
3813
  Object.freeze(allSubparameters);
3787
- return [4 /*yield*/, executeAttempts(__assign(__assign({}, options), { priority: priority + index, parameters: allSubparameters, pipelineIdentification: pipelineIdentification }))];
3814
+ return [4 /*yield*/, executeAttempts(__assign(__assign({}, options), { priority: priority + index, parameters: allSubparameters, pipelineIdentification: spaceTrim$1(function (block) { return "\n ".concat(block(pipelineIdentification), "\n Subparameter index: ").concat(index, "\n "); }) }))];
3788
3815
  case 1:
3789
3816
  subresultString = _a.sent();
3790
3817
  return [2 /*return*/, subresultString];
@@ -4294,7 +4321,7 @@ function executePipeline(options) {
4294
4321
  },
4295
4322
  settings: settings,
4296
4323
  $executionReport: executionReport,
4297
- pipelineIdentification: pipelineIdentification,
4324
+ pipelineIdentification: spaceTrim(function (block) { return "\n ".concat(block(pipelineIdentification), "\n Template name: ").concat(currentTemplate.name, "\n Template title: ").concat(currentTemplate.title, "\n "); }),
4298
4325
  })
4299
4326
  .then(function (newParametersToPass) {
4300
4327
  parametersToPass = __assign(__assign({}, newParametersToPass), parametersToPass);
@@ -4417,9 +4444,11 @@ function createPipelineExecutor(options) {
4417
4444
  else if (isNotPreparedWarningSupressed !== true) {
4418
4445
  console.warn(spaceTrim(function (block) { return "\n Pipeline is not prepared\n\n ".concat(block(pipelineIdentification), "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n "); }));
4419
4446
  }
4447
+ var runCount = 0;
4420
4448
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
4421
4449
  return __generator(this, function (_a) {
4422
- return [2 /*return*/, executePipeline({
4450
+ runCount++;
4451
+ return [2 /*return*/, /* not await */ executePipeline({
4423
4452
  pipeline: pipeline,
4424
4453
  preparedPipeline: preparedPipeline,
4425
4454
  setPreparedPipeline: function (newPreparedPipeline) {
@@ -4428,7 +4457,7 @@ function createPipelineExecutor(options) {
4428
4457
  inputParameters: inputParameters,
4429
4458
  tools: tools,
4430
4459
  onProgress: onProgress,
4431
- pipelineIdentification: pipelineIdentification,
4460
+ pipelineIdentification: spaceTrim(function (block) { return "\n ".concat(block(pipelineIdentification), "\n ").concat(runCount === 1 ? '' : "Run #".concat(runCount), "\n "); }),
4432
4461
  settings: {
4433
4462
  maxExecutionAttempts: maxExecutionAttempts,
4434
4463
  maxParallelCount: maxParallelCount,
@@ -4601,7 +4630,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
4601
4630
  var partialPieces, pieces;
4602
4631
  return __generator(this, function (_a) {
4603
4632
  switch (_a.label) {
4604
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!!!!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4633
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4605
4634
  options)];
4606
4635
  case 1:
4607
4636
  partialPieces = _a.sent();