@promptbook/node 0.88.0 → 0.89.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +127 -89
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/types.index.d.ts +16 -4
  6. package/esm/typings/src/cli/cli-commands/login.d.ts +15 -0
  7. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +2 -2
  8. package/esm/typings/src/execution/PromptResult.d.ts +2 -2
  9. package/esm/typings/src/execution/{PromptResultUsage.d.ts → Usage.d.ts} +5 -5
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +2 -2
  11. package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +3 -3
  12. package/esm/typings/src/execution/utils/usage-constants.d.ts +77 -60
  13. package/esm/typings/src/execution/utils/usageToHuman.d.ts +5 -5
  14. package/esm/typings/src/execution/utils/usageToWorktime.d.ts +5 -5
  15. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +9 -2
  16. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
  17. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +2 -2
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +0 -9
  20. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.d.ts +2 -2
  21. package/esm/typings/src/pipeline/PipelineJson/PreparationJson.d.ts +2 -2
  22. package/esm/typings/src/playground/BrjappConnector.d.ts +67 -0
  23. package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
  24. package/esm/typings/src/playground/playground.d.ts +5 -0
  25. package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
  26. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
  27. package/esm/typings/src/types/typeAliases.d.ts +8 -2
  28. package/package.json +2 -2
  29. package/umd/index.umd.js +127 -89
  30. package/umd/index.umd.js.map +1 -1
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
3
+ /**
4
+ * Note: [⚫] Code in this file should never be published in any package
5
+ */
@@ -37,7 +37,8 @@ export type PromptbookServer_AnonymousIdentification = {
37
37
  /**
38
38
  * Identifier of the end user
39
39
  *
40
- * Note: this is passed to the certain model providers to identify misuse
40
+ * Note: This can be either some id or email or any other identifier
41
+ * Note: In anonymous mode, this is passed to the certain model providers to identify misuse
41
42
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
42
43
  */
43
44
  readonly userId?: string_user_id;
@@ -58,15 +58,27 @@ export type ApplicationRemoteServerOptions<TCustomOptions> = {
58
58
  };
59
59
  export type ApplicationRemoteServerClientOptions<TCustomOptions> = {
60
60
  /**
61
- * @@@
61
+ * Identifier of the application
62
+ *
63
+ * Note: This is usefull when you use Promptbook remote server for multiple apps/frontends, if its used just for single app, use here just "app" or "your-app-name"
64
+ * Note: This can be some id or some semantic name like "email-agent"
62
65
  */
63
66
  readonly appId: string_app_id | null;
64
67
  /**
65
- * @@@
68
+ * Identifier of the end user
69
+ *
70
+ * Note: This can be either some id or email or any other identifier
71
+ * Note: This is also passed to the certain model providers to identify misuse
66
72
  */
67
73
  readonly userId?: string_user_id;
68
74
  /**
69
- * @@@
75
+ * Token of the user to verify its identity
76
+ *
77
+ * Note: This is passed for example to `createLlmExecutionTools`
78
+ */
79
+ readonly userToken?: string_user_id;
80
+ /**
81
+ * Additional arbitrary options to identify the client or to pass custom metadata
70
82
  */
71
83
  readonly customOptions?: TCustomOptions;
72
84
  };
@@ -242,6 +242,12 @@ export type string_promptbook_documentation_url = `https://github.com/webgptorg/
242
242
  * For example `"towns.cz"`
243
243
  */
244
244
  export type string_domain = string;
245
+ /**
246
+ * Semantic helper
247
+ *
248
+ * For example `"https://*.pavolhejny.com/*"`
249
+ */
250
+ export type string_origin = string;
245
251
  /**
246
252
  * Semantic helper
247
253
  *
@@ -433,13 +439,13 @@ export type string_uuid = string & {
433
439
  *
434
440
  * @@@
435
441
  */
436
- export type string_app_id = id;
442
+ export type string_app_id = id | 'app';
437
443
  /**
438
444
  * End user identifier
439
445
  *
440
446
  * @@@
441
447
  */
442
- export type string_user_id = id;
448
+ export type string_user_id = id | string_email;
443
449
  /**
444
450
  * Semantic helper
445
451
  *
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.88.0",
3
+ "version": "0.89.0-2",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.88.0"
50
+ "@promptbook/core": "0.89.0-2"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -46,7 +46,7 @@
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.88.0';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.89.0-2';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1889,6 +1889,7 @@
1889
1889
  const { isSuccessful, errors, warnings } = executionResult;
1890
1890
  for (const warning of warnings) {
1891
1891
  console.warn(warning.message);
1892
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
1892
1893
  }
1893
1894
  if (isSuccessful === true) {
1894
1895
  return;
@@ -2113,30 +2114,42 @@
2113
2114
  }
2114
2115
  }
2115
2116
 
2117
+ /**
2118
+ * Represents the uncertain value
2119
+ *
2120
+ * @public exported from `@promptbook/core`
2121
+ */
2122
+ const ZERO_VALUE = $deepFreeze({ value: 0 });
2123
+ /**
2124
+ * Represents the uncertain value
2125
+ *
2126
+ * @public exported from `@promptbook/core`
2127
+ */
2128
+ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
2116
2129
  /**
2117
2130
  * Represents the usage with no resources consumed
2118
2131
  *
2119
2132
  * @public exported from `@promptbook/core`
2120
2133
  */
2121
2134
  const ZERO_USAGE = $deepFreeze({
2122
- price: { value: 0 },
2135
+ price: ZERO_VALUE,
2123
2136
  input: {
2124
- tokensCount: { value: 0 },
2125
- charactersCount: { value: 0 },
2126
- wordsCount: { value: 0 },
2127
- sentencesCount: { value: 0 },
2128
- linesCount: { value: 0 },
2129
- paragraphsCount: { value: 0 },
2130
- pagesCount: { value: 0 },
2137
+ tokensCount: ZERO_VALUE,
2138
+ charactersCount: ZERO_VALUE,
2139
+ wordsCount: ZERO_VALUE,
2140
+ sentencesCount: ZERO_VALUE,
2141
+ linesCount: ZERO_VALUE,
2142
+ paragraphsCount: ZERO_VALUE,
2143
+ pagesCount: ZERO_VALUE,
2131
2144
  },
2132
2145
  output: {
2133
- tokensCount: { value: 0 },
2134
- charactersCount: { value: 0 },
2135
- wordsCount: { value: 0 },
2136
- sentencesCount: { value: 0 },
2137
- linesCount: { value: 0 },
2138
- paragraphsCount: { value: 0 },
2139
- pagesCount: { value: 0 },
2146
+ tokensCount: ZERO_VALUE,
2147
+ charactersCount: ZERO_VALUE,
2148
+ wordsCount: ZERO_VALUE,
2149
+ sentencesCount: ZERO_VALUE,
2150
+ linesCount: ZERO_VALUE,
2151
+ paragraphsCount: ZERO_VALUE,
2152
+ pagesCount: ZERO_VALUE,
2140
2153
  },
2141
2154
  });
2142
2155
  /**
@@ -2145,24 +2158,24 @@
2145
2158
  * @public exported from `@promptbook/core`
2146
2159
  */
2147
2160
  $deepFreeze({
2148
- price: { value: 0, isUncertain: true },
2161
+ price: UNCERTAIN_ZERO_VALUE,
2149
2162
  input: {
2150
- tokensCount: { value: 0, isUncertain: true },
2151
- charactersCount: { value: 0, isUncertain: true },
2152
- wordsCount: { value: 0, isUncertain: true },
2153
- sentencesCount: { value: 0, isUncertain: true },
2154
- linesCount: { value: 0, isUncertain: true },
2155
- paragraphsCount: { value: 0, isUncertain: true },
2156
- pagesCount: { value: 0, isUncertain: true },
2163
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2164
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2165
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2166
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2167
+ linesCount: UNCERTAIN_ZERO_VALUE,
2168
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2169
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2157
2170
  },
2158
2171
  output: {
2159
- tokensCount: { value: 0, isUncertain: true },
2160
- charactersCount: { value: 0, isUncertain: true },
2161
- wordsCount: { value: 0, isUncertain: true },
2162
- sentencesCount: { value: 0, isUncertain: true },
2163
- linesCount: { value: 0, isUncertain: true },
2164
- paragraphsCount: { value: 0, isUncertain: true },
2165
- pagesCount: { value: 0, isUncertain: true },
2172
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2173
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2174
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2175
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2176
+ linesCount: UNCERTAIN_ZERO_VALUE,
2177
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2178
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2166
2179
  },
2167
2180
  });
2168
2181
  /**
@@ -2333,7 +2346,7 @@
2333
2346
  if (parameterNames.has(subparameterName)) {
2334
2347
  parameterNames.delete(subparameterName);
2335
2348
  parameterNames.add(foreach.parameterName);
2336
- // <- TODO: [🚎] Warn/logic error when `subparameterName` not used
2349
+ // <- TODO: [🏮] Warn/logic error when `subparameterName` not used
2337
2350
  }
2338
2351
  }
2339
2352
  }
@@ -2827,6 +2840,7 @@
2827
2840
  `);
2828
2841
  // TODO: [🟥] Detect browser / node and make it colorfull
2829
2842
  console.warn(warningMessage);
2843
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
2830
2844
  /*
2831
2845
  return {
2832
2846
  async listModels() {
@@ -4416,6 +4430,7 @@
4416
4430
 
4417
4431
  @see more at https://ptbk.io/prepare-pipeline
4418
4432
  `));
4433
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
4419
4434
  }
4420
4435
  let runCount = 0;
4421
4436
  const pipelineExecutorWithCallback = async (inputParameters, onProgress) => {
@@ -4492,8 +4507,9 @@
4492
4507
  * @returns LLM tools with same functionality with added total cost counting
4493
4508
  * @public exported from `@promptbook/core`
4494
4509
  */
4495
- function countTotalUsage(llmTools) {
4510
+ function countUsage(llmTools) {
4496
4511
  let totalUsage = ZERO_USAGE;
4512
+ const spending = new rxjs.Subject();
4497
4513
  const proxyTools = {
4498
4514
  get title() {
4499
4515
  // TODO: [🧠] Maybe put here some suffix
@@ -4503,12 +4519,15 @@
4503
4519
  // TODO: [🧠] Maybe put here some suffix
4504
4520
  return llmTools.description;
4505
4521
  },
4506
- async checkConfiguration() {
4522
+ checkConfiguration() {
4507
4523
  return /* not await */ llmTools.checkConfiguration();
4508
4524
  },
4509
4525
  listModels() {
4510
4526
  return /* not await */ llmTools.listModels();
4511
4527
  },
4528
+ spending() {
4529
+ return spending.asObservable();
4530
+ },
4512
4531
  getTotalUsage() {
4513
4532
  // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
4514
4533
  return totalUsage;
@@ -4519,6 +4538,7 @@
4519
4538
  // console.info('[🚕] callChatModel through countTotalUsage');
4520
4539
  const promptResult = await llmTools.callChatModel(prompt);
4521
4540
  totalUsage = addUsage(totalUsage, promptResult.usage);
4541
+ spending.next(promptResult.usage);
4522
4542
  return promptResult;
4523
4543
  };
4524
4544
  }
@@ -4527,6 +4547,7 @@
4527
4547
  // console.info('[🚕] callCompletionModel through countTotalUsage');
4528
4548
  const promptResult = await llmTools.callCompletionModel(prompt);
4529
4549
  totalUsage = addUsage(totalUsage, promptResult.usage);
4550
+ spending.next(promptResult.usage);
4530
4551
  return promptResult;
4531
4552
  };
4532
4553
  }
@@ -4535,6 +4556,7 @@
4535
4556
  // console.info('[🚕] callEmbeddingModel through countTotalUsage');
4536
4557
  const promptResult = await llmTools.callEmbeddingModel(prompt);
4537
4558
  totalUsage = addUsage(totalUsage, promptResult.usage);
4559
+ spending.next(promptResult.usage);
4538
4560
  return promptResult;
4539
4561
  };
4540
4562
  }
@@ -5185,63 +5207,73 @@
5185
5207
  const { maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, rootDirname, isVerbose = DEFAULT_IS_VERBOSE } = options;
5186
5208
  const knowledgePreparedUnflatten = new Array(knowledgeSources.length);
5187
5209
  await forEachAsync(knowledgeSources, { maxParallelCount }, async (knowledgeSource, index) => {
5188
- let partialPieces = null;
5189
- const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
5190
- const scrapers = arrayableToArray(tools.scrapers);
5191
- for (const scraper of scrapers) {
5192
- if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
5193
- // <- TODO: [🦔] Implement mime-type wildcards
5194
- ) {
5195
- continue;
5196
- }
5197
- const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
5198
- if (partialPiecesUnchecked !== null) {
5199
- partialPieces = [...partialPiecesUnchecked];
5200
- // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
5201
- break;
5202
- }
5203
- console.warn(spaceTrim__default["default"]((block) => `
5204
- Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
5210
+ try {
5211
+ let partialPieces = null;
5212
+ const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
5213
+ const scrapers = arrayableToArray(tools.scrapers);
5214
+ for (const scraper of scrapers) {
5215
+ if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
5216
+ // <- TODO: [🦔] Implement mime-type wildcards
5217
+ ) {
5218
+ continue;
5219
+ }
5220
+ const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
5221
+ if (partialPiecesUnchecked !== null) {
5222
+ partialPieces = [...partialPiecesUnchecked];
5223
+ // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
5224
+ break;
5225
+ }
5226
+ console.warn(spaceTrim__default["default"]((block) => `
5227
+ Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
5205
5228
 
5206
- The source:
5207
- ${block(knowledgeSource.knowledgeSourceContent
5208
- .split('\n')
5209
- .map((line) => `> ${line}`)
5210
- .join('\n'))}
5229
+ The source:
5230
+ ${block(knowledgeSource.knowledgeSourceContent
5231
+ .split('\n')
5232
+ .map((line) => `> ${line}`)
5233
+ .join('\n'))}
5211
5234
 
5212
- ${block($registeredScrapersMessage(scrapers))}
5235
+ ${block($registeredScrapersMessage(scrapers))}
5213
5236
 
5214
5237
 
5215
- `));
5216
- }
5217
- if (partialPieces === null) {
5218
- throw new KnowledgeScrapeError(spaceTrim__default["default"]((block) => `
5219
- Cannot scrape knowledge
5238
+ `));
5239
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
5240
+ }
5241
+ if (partialPieces === null) {
5242
+ throw new KnowledgeScrapeError(spaceTrim__default["default"]((block) => `
5243
+ Cannot scrape knowledge
5220
5244
 
5221
- The source:
5222
- > ${block(knowledgeSource.knowledgeSourceContent
5223
- .split('\n')
5224
- .map((line) => `> ${line}`)
5225
- .join('\n'))}
5245
+ The source:
5246
+ > ${block(knowledgeSource.knowledgeSourceContent
5247
+ .split('\n')
5248
+ .map((line) => `> ${line}`)
5249
+ .join('\n'))}
5226
5250
 
5227
- No scraper found for the mime type "${sourceHandler.mimeType}"
5251
+ No scraper found for the mime type "${sourceHandler.mimeType}"
5228
5252
 
5229
- ${block($registeredScrapersMessage(scrapers))}
5253
+ ${block($registeredScrapersMessage(scrapers))}
5230
5254
 
5231
5255
 
5232
- `));
5256
+ `));
5257
+ }
5258
+ const pieces = partialPieces.map((partialPiece) => ({
5259
+ ...partialPiece,
5260
+ sources: [
5261
+ {
5262
+ name: knowledgeSource.name,
5263
+ // line, column <- TODO: [☀]
5264
+ // <- TODO: [❎]
5265
+ },
5266
+ ],
5267
+ }));
5268
+ knowledgePreparedUnflatten[index] = pieces;
5269
+ }
5270
+ catch (error) {
5271
+ if (!(error instanceof Error)) {
5272
+ throw error;
5273
+ }
5274
+ console.warn(error);
5275
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
5233
5276
  }
5234
- const pieces = partialPieces.map((partialPiece) => ({
5235
- ...partialPiece,
5236
- sources: [
5237
- {
5238
- name: knowledgeSource.name,
5239
- // line, column <- TODO: [☀]
5240
- // <- TODO: [❎]
5241
- },
5242
- ],
5243
- }));
5244
- knowledgePreparedUnflatten[index] = pieces;
5245
5277
  });
5246
5278
  const knowledgePrepared = knowledgePreparedUnflatten.flat();
5247
5279
  return knowledgePrepared;
@@ -5347,7 +5379,7 @@
5347
5379
  // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5348
5380
  const _llms = arrayableToArray(tools.llm);
5349
5381
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5350
- const llmToolsWithUsage = countTotalUsage(llmTools);
5382
+ const llmToolsWithUsage = countUsage(llmTools);
5351
5383
  // <- TODO: [🌯]
5352
5384
  /*
5353
5385
  TODO: [🧠][🪑][🔃] Should this be done or not
@@ -7066,7 +7098,8 @@
7066
7098
  if ($pipelineJson.defaultModelRequirements[command.key] !== undefined) {
7067
7099
  if ($pipelineJson.defaultModelRequirements[command.key] === command.value) {
7068
7100
  console.warn(`Multiple commands \`MODEL ${command.key} ${command.value}\` in the pipeline head`);
7069
- // <- TODO: [🚎][💩] Some better way how to get warnings from pipeline parsing / logic
7101
+ // <- TODO: [🏮] Some better way how to get warnings from pipeline parsing / logic
7102
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
7070
7103
  }
7071
7104
  else {
7072
7105
  throw new ParseError(spaceTrim__default["default"](`
@@ -7098,6 +7131,7 @@
7098
7131
  modelVariant: 'VARIANT',
7099
7132
  maxTokens: '???',
7100
7133
  }[command.key]} ${command.value}\` in the task "${$taskJson.title || $taskJson.name}"`);
7134
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
7101
7135
  }
7102
7136
  else {
7103
7137
  throw new ParseError(spaceTrim__default["default"](`
@@ -7377,15 +7411,15 @@
7377
7411
  }
7378
7412
  console.warn(spaceTrim__default["default"](`
7379
7413
 
7380
- Persona "${personaName}" is defined multiple times with different description:
7414
+ Persona "${personaName}" is defined multiple times with different description:
7381
7415
 
7382
- First definition:
7383
- ${persona.description}
7416
+ First definition:
7417
+ ${persona.description}
7384
7418
 
7385
- Second definition:
7386
- ${personaDescription}
7419
+ Second definition:
7420
+ ${personaDescription}
7387
7421
 
7388
- `));
7422
+ `));
7389
7423
  persona.description += spaceTrim__default["default"]('\n\n' + personaDescription);
7390
7424
  }
7391
7425
 
@@ -9021,6 +9055,7 @@
9021
9055
  }
9022
9056
  else {
9023
9057
  console.warn(`Command "${humanReadableCommand}" exceeded time limit of ${timeout}ms but continues running`);
9058
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
9024
9059
  resolve('Command exceeded time limit');
9025
9060
  }
9026
9061
  });
@@ -9046,6 +9081,7 @@
9046
9081
  output.push(stderr.toString());
9047
9082
  if (isVerbose && stderr.toString().trim()) {
9048
9083
  console.warn(stderr.toString());
9084
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
9049
9085
  }
9050
9086
  });
9051
9087
  const finishWithCode = (code) => {
@@ -9057,6 +9093,7 @@
9057
9093
  else {
9058
9094
  if (isVerbose) {
9059
9095
  console.warn(`Command "${humanReadableCommand}" exited with code ${code}`);
9096
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
9060
9097
  }
9061
9098
  resolve(spaceTrim.spaceTrim(output.join('\n')));
9062
9099
  }
@@ -9078,6 +9115,7 @@
9078
9115
  else {
9079
9116
  if (isVerbose) {
9080
9117
  console.warn(error);
9118
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
9081
9119
  }
9082
9120
  resolve(spaceTrim.spaceTrim(output.join('\n')));
9083
9121
  }