@promptbook/remote-server 0.88.0 → 0.89.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +119 -82
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/types.index.d.ts +16 -4
  6. package/esm/typings/src/cli/cli-commands/login.d.ts +15 -0
  7. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +2 -2
  8. package/esm/typings/src/execution/PromptResult.d.ts +2 -2
  9. package/esm/typings/src/execution/{PromptResultUsage.d.ts → Usage.d.ts} +5 -5
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +2 -2
  11. package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +3 -3
  12. package/esm/typings/src/execution/utils/usage-constants.d.ts +77 -60
  13. package/esm/typings/src/execution/utils/usageToHuman.d.ts +5 -5
  14. package/esm/typings/src/execution/utils/usageToWorktime.d.ts +5 -5
  15. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +9 -2
  16. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
  17. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +2 -2
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +0 -9
  20. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.d.ts +2 -2
  21. package/esm/typings/src/pipeline/PipelineJson/PreparationJson.d.ts +2 -2
  22. package/esm/typings/src/playground/BrjappConnector.d.ts +67 -0
  23. package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
  24. package/esm/typings/src/playground/playground.d.ts +5 -0
  25. package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
  26. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
  27. package/esm/typings/src/types/typeAliases.d.ts +8 -2
  28. package/package.json +2 -2
  29. package/umd/index.umd.js +119 -82
  30. package/umd/index.umd.js.map +1 -1
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
3
+ /**
4
+ * Note: [⚫] Code in this file should never be published in any package
5
+ */
@@ -37,7 +37,8 @@ export type PromptbookServer_AnonymousIdentification = {
37
37
  /**
38
38
  * Identifier of the end user
39
39
  *
40
- * Note: this is passed to the certain model providers to identify misuse
40
+ * Note: This can be either some id or email or any other identifier
41
+ * Note: In anonymous mode, this is passed to the certain model providers to identify misuse
41
42
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
42
43
  */
43
44
  readonly userId?: string_user_id;
@@ -58,15 +58,27 @@ export type ApplicationRemoteServerOptions<TCustomOptions> = {
58
58
  };
59
59
  export type ApplicationRemoteServerClientOptions<TCustomOptions> = {
60
60
  /**
61
- * @@@
61
+ * Identifier of the application
62
+ *
63
+ * Note: This is usefull when you use Promptbook remote server for multiple apps/frontends, if its used just for single app, use here just "app" or "your-app-name"
64
+ * Note: This can be some id or some semantic name like "email-agent"
62
65
  */
63
66
  readonly appId: string_app_id | null;
64
67
  /**
65
- * @@@
68
+ * Identifier of the end user
69
+ *
70
+ * Note: This can be either some id or email or any other identifier
71
+ * Note: This is also passed to the certain model providers to identify misuse
66
72
  */
67
73
  readonly userId?: string_user_id;
68
74
  /**
69
- * @@@
75
+ * Token of the user to verify its identity
76
+ *
77
+ * Note: This is passed for example to `createLlmExecutionTools`
78
+ */
79
+ readonly userToken?: string_user_id;
80
+ /**
81
+ * Additional arbitrary options to identify the client or to pass custom metadata
70
82
  */
71
83
  readonly customOptions?: TCustomOptions;
72
84
  };
@@ -242,6 +242,12 @@ export type string_promptbook_documentation_url = `https://github.com/webgptorg/
242
242
  * For example `"towns.cz"`
243
243
  */
244
244
  export type string_domain = string;
245
+ /**
246
+ * Semantic helper
247
+ *
248
+ * For example `"https://*.pavolhejny.com/*"`
249
+ */
250
+ export type string_origin = string;
245
251
  /**
246
252
  * Semantic helper
247
253
  *
@@ -433,13 +439,13 @@ export type string_uuid = string & {
433
439
  *
434
440
  * @@@
435
441
  */
436
- export type string_app_id = id;
442
+ export type string_app_id = id | 'app';
437
443
  /**
438
444
  * End user identifier
439
445
  *
440
446
  * @@@
441
447
  */
442
- export type string_user_id = id;
448
+ export type string_user_id = id | string_email;
443
449
  /**
444
450
  * Semantic helper
445
451
  *
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.88.0",
3
+ "version": "0.89.0-2",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.88.0"
50
+ "@promptbook/core": "0.89.0-2"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -28,7 +28,7 @@
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.88.0';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.89.0-2';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -691,6 +691,7 @@
691
691
  }
692
692
  else {
693
693
  console.warn(`Command "${humanReadableCommand}" exceeded time limit of ${timeout}ms but continues running`);
694
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
694
695
  resolve('Command exceeded time limit');
695
696
  }
696
697
  });
@@ -716,6 +717,7 @@
716
717
  output.push(stderr.toString());
717
718
  if (isVerbose && stderr.toString().trim()) {
718
719
  console.warn(stderr.toString());
720
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
719
721
  }
720
722
  });
721
723
  const finishWithCode = (code) => {
@@ -727,6 +729,7 @@
727
729
  else {
728
730
  if (isVerbose) {
729
731
  console.warn(`Command "${humanReadableCommand}" exited with code ${code}`);
732
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
730
733
  }
731
734
  resolve(spaceTrim.spaceTrim(output.join('\n')));
732
735
  }
@@ -748,6 +751,7 @@
748
751
  else {
749
752
  if (isVerbose) {
750
753
  console.warn(error);
754
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
751
755
  }
752
756
  resolve(spaceTrim.spaceTrim(output.join('\n')));
753
757
  }
@@ -1816,6 +1820,7 @@
1816
1820
  const { isSuccessful, errors, warnings } = executionResult;
1817
1821
  for (const warning of warnings) {
1818
1822
  console.warn(warning.message);
1823
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
1819
1824
  }
1820
1825
  if (isSuccessful === true) {
1821
1826
  return;
@@ -2417,30 +2422,42 @@
2417
2422
  await Promise.all(tasks);
2418
2423
  }
2419
2424
 
2425
+ /**
2426
+ * Represents the uncertain value
2427
+ *
2428
+ * @public exported from `@promptbook/core`
2429
+ */
2430
+ const ZERO_VALUE = $deepFreeze({ value: 0 });
2431
+ /**
2432
+ * Represents the uncertain value
2433
+ *
2434
+ * @public exported from `@promptbook/core`
2435
+ */
2436
+ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
2420
2437
  /**
2421
2438
  * Represents the usage with no resources consumed
2422
2439
  *
2423
2440
  * @public exported from `@promptbook/core`
2424
2441
  */
2425
2442
  const ZERO_USAGE = $deepFreeze({
2426
- price: { value: 0 },
2443
+ price: ZERO_VALUE,
2427
2444
  input: {
2428
- tokensCount: { value: 0 },
2429
- charactersCount: { value: 0 },
2430
- wordsCount: { value: 0 },
2431
- sentencesCount: { value: 0 },
2432
- linesCount: { value: 0 },
2433
- paragraphsCount: { value: 0 },
2434
- pagesCount: { value: 0 },
2445
+ tokensCount: ZERO_VALUE,
2446
+ charactersCount: ZERO_VALUE,
2447
+ wordsCount: ZERO_VALUE,
2448
+ sentencesCount: ZERO_VALUE,
2449
+ linesCount: ZERO_VALUE,
2450
+ paragraphsCount: ZERO_VALUE,
2451
+ pagesCount: ZERO_VALUE,
2435
2452
  },
2436
2453
  output: {
2437
- tokensCount: { value: 0 },
2438
- charactersCount: { value: 0 },
2439
- wordsCount: { value: 0 },
2440
- sentencesCount: { value: 0 },
2441
- linesCount: { value: 0 },
2442
- paragraphsCount: { value: 0 },
2443
- pagesCount: { value: 0 },
2454
+ tokensCount: ZERO_VALUE,
2455
+ charactersCount: ZERO_VALUE,
2456
+ wordsCount: ZERO_VALUE,
2457
+ sentencesCount: ZERO_VALUE,
2458
+ linesCount: ZERO_VALUE,
2459
+ paragraphsCount: ZERO_VALUE,
2460
+ pagesCount: ZERO_VALUE,
2444
2461
  },
2445
2462
  });
2446
2463
  /**
@@ -2449,24 +2466,24 @@
2449
2466
  * @public exported from `@promptbook/core`
2450
2467
  */
2451
2468
  $deepFreeze({
2452
- price: { value: 0, isUncertain: true },
2469
+ price: UNCERTAIN_ZERO_VALUE,
2453
2470
  input: {
2454
- tokensCount: { value: 0, isUncertain: true },
2455
- charactersCount: { value: 0, isUncertain: true },
2456
- wordsCount: { value: 0, isUncertain: true },
2457
- sentencesCount: { value: 0, isUncertain: true },
2458
- linesCount: { value: 0, isUncertain: true },
2459
- paragraphsCount: { value: 0, isUncertain: true },
2460
- pagesCount: { value: 0, isUncertain: true },
2471
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2472
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2473
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2474
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2475
+ linesCount: UNCERTAIN_ZERO_VALUE,
2476
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2477
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2461
2478
  },
2462
2479
  output: {
2463
- tokensCount: { value: 0, isUncertain: true },
2464
- charactersCount: { value: 0, isUncertain: true },
2465
- wordsCount: { value: 0, isUncertain: true },
2466
- sentencesCount: { value: 0, isUncertain: true },
2467
- linesCount: { value: 0, isUncertain: true },
2468
- paragraphsCount: { value: 0, isUncertain: true },
2469
- pagesCount: { value: 0, isUncertain: true },
2480
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2481
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2482
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2483
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2484
+ linesCount: UNCERTAIN_ZERO_VALUE,
2485
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2486
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2470
2487
  },
2471
2488
  });
2472
2489
  /**
@@ -2527,8 +2544,9 @@
2527
2544
  * @returns LLM tools with same functionality with added total cost counting
2528
2545
  * @public exported from `@promptbook/core`
2529
2546
  */
2530
- function countTotalUsage(llmTools) {
2547
+ function countUsage(llmTools) {
2531
2548
  let totalUsage = ZERO_USAGE;
2549
+ const spending = new rxjs.Subject();
2532
2550
  const proxyTools = {
2533
2551
  get title() {
2534
2552
  // TODO: [🧠] Maybe put here some suffix
@@ -2538,12 +2556,15 @@
2538
2556
  // TODO: [🧠] Maybe put here some suffix
2539
2557
  return llmTools.description;
2540
2558
  },
2541
- async checkConfiguration() {
2559
+ checkConfiguration() {
2542
2560
  return /* not await */ llmTools.checkConfiguration();
2543
2561
  },
2544
2562
  listModels() {
2545
2563
  return /* not await */ llmTools.listModels();
2546
2564
  },
2565
+ spending() {
2566
+ return spending.asObservable();
2567
+ },
2547
2568
  getTotalUsage() {
2548
2569
  // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2549
2570
  return totalUsage;
@@ -2554,6 +2575,7 @@
2554
2575
  // console.info('[🚕] callChatModel through countTotalUsage');
2555
2576
  const promptResult = await llmTools.callChatModel(prompt);
2556
2577
  totalUsage = addUsage(totalUsage, promptResult.usage);
2578
+ spending.next(promptResult.usage);
2557
2579
  return promptResult;
2558
2580
  };
2559
2581
  }
@@ -2562,6 +2584,7 @@
2562
2584
  // console.info('[🚕] callCompletionModel through countTotalUsage');
2563
2585
  const promptResult = await llmTools.callCompletionModel(prompt);
2564
2586
  totalUsage = addUsage(totalUsage, promptResult.usage);
2587
+ spending.next(promptResult.usage);
2565
2588
  return promptResult;
2566
2589
  };
2567
2590
  }
@@ -2570,6 +2593,7 @@
2570
2593
  // console.info('[🚕] callEmbeddingModel through countTotalUsage');
2571
2594
  const promptResult = await llmTools.callEmbeddingModel(prompt);
2572
2595
  totalUsage = addUsage(totalUsage, promptResult.usage);
2596
+ spending.next(promptResult.usage);
2573
2597
  return promptResult;
2574
2598
  };
2575
2599
  }
@@ -2747,6 +2771,7 @@
2747
2771
  `);
2748
2772
  // TODO: [🟥] Detect browser / node and make it colorfull
2749
2773
  console.warn(warningMessage);
2774
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
2750
2775
  /*
2751
2776
  return {
2752
2777
  async listModels() {
@@ -3690,63 +3715,73 @@
3690
3715
  const { maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, rootDirname, isVerbose = DEFAULT_IS_VERBOSE } = options;
3691
3716
  const knowledgePreparedUnflatten = new Array(knowledgeSources.length);
3692
3717
  await forEachAsync(knowledgeSources, { maxParallelCount }, async (knowledgeSource, index) => {
3693
- let partialPieces = null;
3694
- const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
3695
- const scrapers = arrayableToArray(tools.scrapers);
3696
- for (const scraper of scrapers) {
3697
- if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
3698
- // <- TODO: [🦔] Implement mime-type wildcards
3699
- ) {
3700
- continue;
3701
- }
3702
- const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
3703
- if (partialPiecesUnchecked !== null) {
3704
- partialPieces = [...partialPiecesUnchecked];
3705
- // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
3706
- break;
3707
- }
3708
- console.warn(spaceTrim__default["default"]((block) => `
3709
- Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
3718
+ try {
3719
+ let partialPieces = null;
3720
+ const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
3721
+ const scrapers = arrayableToArray(tools.scrapers);
3722
+ for (const scraper of scrapers) {
3723
+ if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
3724
+ // <- TODO: [🦔] Implement mime-type wildcards
3725
+ ) {
3726
+ continue;
3727
+ }
3728
+ const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
3729
+ if (partialPiecesUnchecked !== null) {
3730
+ partialPieces = [...partialPiecesUnchecked];
3731
+ // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
3732
+ break;
3733
+ }
3734
+ console.warn(spaceTrim__default["default"]((block) => `
3735
+ Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
3710
3736
 
3711
- The source:
3712
- ${block(knowledgeSource.knowledgeSourceContent
3713
- .split('\n')
3714
- .map((line) => `> ${line}`)
3715
- .join('\n'))}
3737
+ The source:
3738
+ ${block(knowledgeSource.knowledgeSourceContent
3739
+ .split('\n')
3740
+ .map((line) => `> ${line}`)
3741
+ .join('\n'))}
3716
3742
 
3717
- ${block($registeredScrapersMessage(scrapers))}
3743
+ ${block($registeredScrapersMessage(scrapers))}
3718
3744
 
3719
3745
 
3720
- `));
3721
- }
3722
- if (partialPieces === null) {
3723
- throw new KnowledgeScrapeError(spaceTrim__default["default"]((block) => `
3724
- Cannot scrape knowledge
3746
+ `));
3747
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3748
+ }
3749
+ if (partialPieces === null) {
3750
+ throw new KnowledgeScrapeError(spaceTrim__default["default"]((block) => `
3751
+ Cannot scrape knowledge
3725
3752
 
3726
- The source:
3727
- > ${block(knowledgeSource.knowledgeSourceContent
3728
- .split('\n')
3729
- .map((line) => `> ${line}`)
3730
- .join('\n'))}
3753
+ The source:
3754
+ > ${block(knowledgeSource.knowledgeSourceContent
3755
+ .split('\n')
3756
+ .map((line) => `> ${line}`)
3757
+ .join('\n'))}
3731
3758
 
3732
- No scraper found for the mime type "${sourceHandler.mimeType}"
3759
+ No scraper found for the mime type "${sourceHandler.mimeType}"
3733
3760
 
3734
- ${block($registeredScrapersMessage(scrapers))}
3761
+ ${block($registeredScrapersMessage(scrapers))}
3735
3762
 
3736
3763
 
3737
- `));
3764
+ `));
3765
+ }
3766
+ const pieces = partialPieces.map((partialPiece) => ({
3767
+ ...partialPiece,
3768
+ sources: [
3769
+ {
3770
+ name: knowledgeSource.name,
3771
+ // line, column <- TODO: [☀]
3772
+ // <- TODO: [❎]
3773
+ },
3774
+ ],
3775
+ }));
3776
+ knowledgePreparedUnflatten[index] = pieces;
3777
+ }
3778
+ catch (error) {
3779
+ if (!(error instanceof Error)) {
3780
+ throw error;
3781
+ }
3782
+ console.warn(error);
3783
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3738
3784
  }
3739
- const pieces = partialPieces.map((partialPiece) => ({
3740
- ...partialPiece,
3741
- sources: [
3742
- {
3743
- name: knowledgeSource.name,
3744
- // line, column <- TODO: [☀]
3745
- // <- TODO: [❎]
3746
- },
3747
- ],
3748
- }));
3749
- knowledgePreparedUnflatten[index] = pieces;
3750
3785
  });
3751
3786
  const knowledgePrepared = knowledgePreparedUnflatten.flat();
3752
3787
  return knowledgePrepared;
@@ -3852,7 +3887,7 @@
3852
3887
  // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
3853
3888
  const _llms = arrayableToArray(tools.llm);
3854
3889
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3855
- const llmToolsWithUsage = countTotalUsage(llmTools);
3890
+ const llmToolsWithUsage = countUsage(llmTools);
3856
3891
  // <- TODO: [🌯]
3857
3892
  /*
3858
3893
  TODO: [🧠][🪑][🔃] Should this be done or not
@@ -4164,7 +4199,7 @@
4164
4199
  if (parameterNames.has(subparameterName)) {
4165
4200
  parameterNames.delete(subparameterName);
4166
4201
  parameterNames.add(foreach.parameterName);
4167
- // <- TODO: [🚎] Warn/logic error when `subparameterName` not used
4202
+ // <- TODO: [🏮] Warn/logic error when `subparameterName` not used
4168
4203
  }
4169
4204
  }
4170
4205
  }
@@ -5777,6 +5812,7 @@
5777
5812
 
5778
5813
  @see more at https://ptbk.io/prepare-pipeline
5779
5814
  `));
5815
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
5780
5816
  }
5781
5817
  let runCount = 0;
5782
5818
  const pipelineExecutorWithCallback = async (inputParameters, onProgress) => {
@@ -6802,6 +6838,7 @@
6802
6838
  https://github.com/webgptorg/promptbook
6803
6839
  `));
6804
6840
  });
6841
+ // TODO: !!!!!! Add login route
6805
6842
  app.get(`${rootPath}/books`, async (request, response) => {
6806
6843
  if (collection === null) {
6807
6844
  response.status(500).send('No collection available');