@promptbook/pdf 0.88.0 → 0.89.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +114 -82
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/types.index.d.ts +16 -4
  6. package/esm/typings/src/cli/cli-commands/login.d.ts +15 -0
  7. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +2 -2
  8. package/esm/typings/src/execution/PromptResult.d.ts +2 -2
  9. package/esm/typings/src/execution/{PromptResultUsage.d.ts → Usage.d.ts} +5 -5
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +2 -2
  11. package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +3 -3
  12. package/esm/typings/src/execution/utils/usage-constants.d.ts +77 -60
  13. package/esm/typings/src/execution/utils/usageToHuman.d.ts +5 -5
  14. package/esm/typings/src/execution/utils/usageToWorktime.d.ts +5 -5
  15. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +9 -2
  16. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
  17. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +2 -2
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +0 -9
  20. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.d.ts +2 -2
  21. package/esm/typings/src/pipeline/PipelineJson/PreparationJson.d.ts +2 -2
  22. package/esm/typings/src/playground/BrjappConnector.d.ts +67 -0
  23. package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
  24. package/esm/typings/src/playground/playground.d.ts +5 -0
  25. package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
  26. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
  27. package/esm/typings/src/types/typeAliases.d.ts +8 -2
  28. package/package.json +2 -2
  29. package/umd/index.umd.js +115 -83
  30. package/umd/index.umd.js.map +1 -1
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
3
+ /**
4
+ * Note: [⚫] Code in this file should never be published in any package
5
+ */
@@ -37,7 +37,8 @@ export type PromptbookServer_AnonymousIdentification = {
37
37
  /**
38
38
  * Identifier of the end user
39
39
  *
40
- * Note: this is passed to the certain model providers to identify misuse
40
+ * Note: This can be either some id or email or any other identifier
41
+ * Note: In anonymous mode, this is passed to the certain model providers to identify misuse
41
42
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
42
43
  */
43
44
  readonly userId?: string_user_id;
@@ -58,15 +58,27 @@ export type ApplicationRemoteServerOptions<TCustomOptions> = {
58
58
  };
59
59
  export type ApplicationRemoteServerClientOptions<TCustomOptions> = {
60
60
  /**
61
- * @@@
61
+ * Identifier of the application
62
+ *
63
+ * Note: This is usefull when you use Promptbook remote server for multiple apps/frontends, if its used just for single app, use here just "app" or "your-app-name"
64
+ * Note: This can be some id or some semantic name like "email-agent"
62
65
  */
63
66
  readonly appId: string_app_id | null;
64
67
  /**
65
- * @@@
68
+ * Identifier of the end user
69
+ *
70
+ * Note: This can be either some id or email or any other identifier
71
+ * Note: This is also passed to the certain model providers to identify misuse
66
72
  */
67
73
  readonly userId?: string_user_id;
68
74
  /**
69
- * @@@
75
+ * Token of the user to verify its identity
76
+ *
77
+ * Note: This is passed for example to `createLlmExecutionTools`
78
+ */
79
+ readonly userToken?: string_user_id;
80
+ /**
81
+ * Additional arbitrary options to identify the client or to pass custom metadata
70
82
  */
71
83
  readonly customOptions?: TCustomOptions;
72
84
  };
@@ -242,6 +242,12 @@ export type string_promptbook_documentation_url = `https://github.com/webgptorg/
242
242
  * For example `"towns.cz"`
243
243
  */
244
244
  export type string_domain = string;
245
+ /**
246
+ * Semantic helper
247
+ *
248
+ * For example `"https://*.pavolhejny.com/*"`
249
+ */
250
+ export type string_origin = string;
245
251
  /**
246
252
  * Semantic helper
247
253
  *
@@ -433,13 +439,13 @@ export type string_uuid = string & {
433
439
  *
434
440
  * @@@
435
441
  */
436
- export type string_app_id = id;
442
+ export type string_app_id = id | 'app';
437
443
  /**
438
444
  * End user identifier
439
445
  *
440
446
  * @@@
441
447
  */
442
- export type string_user_id = id;
448
+ export type string_user_id = id | string_email;
443
449
  /**
444
450
  * Semantic helper
445
451
  *
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/pdf",
3
- "version": "0.88.0",
3
+ "version": "0.89.0-2",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/pdf.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.88.0"
50
+ "@promptbook/core": "0.89.0-2"
51
51
  },
52
52
  "dependencies": {
53
53
  "crypto": "^1.0.1",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.88.0';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.89.0-2';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2309,6 +2309,7 @@
2309
2309
  const { isSuccessful, errors, warnings } = executionResult;
2310
2310
  for (const warning of warnings) {
2311
2311
  console.warn(warning.message);
2312
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
2312
2313
  }
2313
2314
  if (isSuccessful === true) {
2314
2315
  return;
@@ -2487,30 +2488,42 @@
2487
2488
  await Promise.all(tasks);
2488
2489
  }
2489
2490
 
2491
+ /**
2492
+ * Represents the uncertain value
2493
+ *
2494
+ * @public exported from `@promptbook/core`
2495
+ */
2496
+ const ZERO_VALUE = $deepFreeze({ value: 0 });
2497
+ /**
2498
+ * Represents the uncertain value
2499
+ *
2500
+ * @public exported from `@promptbook/core`
2501
+ */
2502
+ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
2490
2503
  /**
2491
2504
  * Represents the usage with no resources consumed
2492
2505
  *
2493
2506
  * @public exported from `@promptbook/core`
2494
2507
  */
2495
2508
  const ZERO_USAGE = $deepFreeze({
2496
- price: { value: 0 },
2509
+ price: ZERO_VALUE,
2497
2510
  input: {
2498
- tokensCount: { value: 0 },
2499
- charactersCount: { value: 0 },
2500
- wordsCount: { value: 0 },
2501
- sentencesCount: { value: 0 },
2502
- linesCount: { value: 0 },
2503
- paragraphsCount: { value: 0 },
2504
- pagesCount: { value: 0 },
2511
+ tokensCount: ZERO_VALUE,
2512
+ charactersCount: ZERO_VALUE,
2513
+ wordsCount: ZERO_VALUE,
2514
+ sentencesCount: ZERO_VALUE,
2515
+ linesCount: ZERO_VALUE,
2516
+ paragraphsCount: ZERO_VALUE,
2517
+ pagesCount: ZERO_VALUE,
2505
2518
  },
2506
2519
  output: {
2507
- tokensCount: { value: 0 },
2508
- charactersCount: { value: 0 },
2509
- wordsCount: { value: 0 },
2510
- sentencesCount: { value: 0 },
2511
- linesCount: { value: 0 },
2512
- paragraphsCount: { value: 0 },
2513
- pagesCount: { value: 0 },
2520
+ tokensCount: ZERO_VALUE,
2521
+ charactersCount: ZERO_VALUE,
2522
+ wordsCount: ZERO_VALUE,
2523
+ sentencesCount: ZERO_VALUE,
2524
+ linesCount: ZERO_VALUE,
2525
+ paragraphsCount: ZERO_VALUE,
2526
+ pagesCount: ZERO_VALUE,
2514
2527
  },
2515
2528
  });
2516
2529
  /**
@@ -2519,24 +2532,24 @@
2519
2532
  * @public exported from `@promptbook/core`
2520
2533
  */
2521
2534
  $deepFreeze({
2522
- price: { value: 0, isUncertain: true },
2535
+ price: UNCERTAIN_ZERO_VALUE,
2523
2536
  input: {
2524
- tokensCount: { value: 0, isUncertain: true },
2525
- charactersCount: { value: 0, isUncertain: true },
2526
- wordsCount: { value: 0, isUncertain: true },
2527
- sentencesCount: { value: 0, isUncertain: true },
2528
- linesCount: { value: 0, isUncertain: true },
2529
- paragraphsCount: { value: 0, isUncertain: true },
2530
- pagesCount: { value: 0, isUncertain: true },
2537
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2538
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2539
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2540
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2541
+ linesCount: UNCERTAIN_ZERO_VALUE,
2542
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2543
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2531
2544
  },
2532
2545
  output: {
2533
- tokensCount: { value: 0, isUncertain: true },
2534
- charactersCount: { value: 0, isUncertain: true },
2535
- wordsCount: { value: 0, isUncertain: true },
2536
- sentencesCount: { value: 0, isUncertain: true },
2537
- linesCount: { value: 0, isUncertain: true },
2538
- paragraphsCount: { value: 0, isUncertain: true },
2539
- pagesCount: { value: 0, isUncertain: true },
2546
+ tokensCount: UNCERTAIN_ZERO_VALUE,
2547
+ charactersCount: UNCERTAIN_ZERO_VALUE,
2548
+ wordsCount: UNCERTAIN_ZERO_VALUE,
2549
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
2550
+ linesCount: UNCERTAIN_ZERO_VALUE,
2551
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
2552
+ pagesCount: UNCERTAIN_ZERO_VALUE,
2540
2553
  },
2541
2554
  });
2542
2555
  /**
@@ -2597,8 +2610,9 @@
2597
2610
  * @returns LLM tools with same functionality with added total cost counting
2598
2611
  * @public exported from `@promptbook/core`
2599
2612
  */
2600
- function countTotalUsage(llmTools) {
2613
+ function countUsage(llmTools) {
2601
2614
  let totalUsage = ZERO_USAGE;
2615
+ const spending = new rxjs.Subject();
2602
2616
  const proxyTools = {
2603
2617
  get title() {
2604
2618
  // TODO: [🧠] Maybe put here some suffix
@@ -2608,12 +2622,15 @@
2608
2622
  // TODO: [🧠] Maybe put here some suffix
2609
2623
  return llmTools.description;
2610
2624
  },
2611
- async checkConfiguration() {
2625
+ checkConfiguration() {
2612
2626
  return /* not await */ llmTools.checkConfiguration();
2613
2627
  },
2614
2628
  listModels() {
2615
2629
  return /* not await */ llmTools.listModels();
2616
2630
  },
2631
+ spending() {
2632
+ return spending.asObservable();
2633
+ },
2617
2634
  getTotalUsage() {
2618
2635
  // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2619
2636
  return totalUsage;
@@ -2624,6 +2641,7 @@
2624
2641
  // console.info('[🚕] callChatModel through countTotalUsage');
2625
2642
  const promptResult = await llmTools.callChatModel(prompt);
2626
2643
  totalUsage = addUsage(totalUsage, promptResult.usage);
2644
+ spending.next(promptResult.usage);
2627
2645
  return promptResult;
2628
2646
  };
2629
2647
  }
@@ -2632,6 +2650,7 @@
2632
2650
  // console.info('[🚕] callCompletionModel through countTotalUsage');
2633
2651
  const promptResult = await llmTools.callCompletionModel(prompt);
2634
2652
  totalUsage = addUsage(totalUsage, promptResult.usage);
2653
+ spending.next(promptResult.usage);
2635
2654
  return promptResult;
2636
2655
  };
2637
2656
  }
@@ -2640,6 +2659,7 @@
2640
2659
  // console.info('[🚕] callEmbeddingModel through countTotalUsage');
2641
2660
  const promptResult = await llmTools.callEmbeddingModel(prompt);
2642
2661
  totalUsage = addUsage(totalUsage, promptResult.usage);
2662
+ spending.next(promptResult.usage);
2643
2663
  return promptResult;
2644
2664
  };
2645
2665
  }
@@ -2817,6 +2837,7 @@
2817
2837
  `);
2818
2838
  // TODO: [🟥] Detect browser / node and make it colorfull
2819
2839
  console.warn(warningMessage);
2840
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
2820
2841
  /*
2821
2842
  return {
2822
2843
  async listModels() {
@@ -3374,63 +3395,73 @@
3374
3395
  const { maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, rootDirname, isVerbose = DEFAULT_IS_VERBOSE } = options;
3375
3396
  const knowledgePreparedUnflatten = new Array(knowledgeSources.length);
3376
3397
  await forEachAsync(knowledgeSources, { maxParallelCount }, async (knowledgeSource, index) => {
3377
- let partialPieces = null;
3378
- const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
3379
- const scrapers = arrayableToArray(tools.scrapers);
3380
- for (const scraper of scrapers) {
3381
- if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
3382
- // <- TODO: [🦔] Implement mime-type wildcards
3383
- ) {
3384
- continue;
3385
- }
3386
- const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
3387
- if (partialPiecesUnchecked !== null) {
3388
- partialPieces = [...partialPiecesUnchecked];
3389
- // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
3390
- break;
3391
- }
3392
- console.warn(spaceTrim__default["default"]((block) => `
3393
- Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
3398
+ try {
3399
+ let partialPieces = null;
3400
+ const sourceHandler = await makeKnowledgeSourceHandler(knowledgeSource, tools, { rootDirname, isVerbose });
3401
+ const scrapers = arrayableToArray(tools.scrapers);
3402
+ for (const scraper of scrapers) {
3403
+ if (!scraper.metadata.mimeTypes.includes(sourceHandler.mimeType)
3404
+ // <- TODO: [🦔] Implement mime-type wildcards
3405
+ ) {
3406
+ continue;
3407
+ }
3408
+ const partialPiecesUnchecked = await scraper.scrape(sourceHandler);
3409
+ if (partialPiecesUnchecked !== null) {
3410
+ partialPieces = [...partialPiecesUnchecked];
3411
+ // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
3412
+ break;
3413
+ }
3414
+ console.warn(spaceTrim__default["default"]((block) => `
3415
+ Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
3394
3416
 
3395
- The source:
3396
- ${block(knowledgeSource.knowledgeSourceContent
3397
- .split('\n')
3398
- .map((line) => `> ${line}`)
3399
- .join('\n'))}
3417
+ The source:
3418
+ ${block(knowledgeSource.knowledgeSourceContent
3419
+ .split('\n')
3420
+ .map((line) => `> ${line}`)
3421
+ .join('\n'))}
3400
3422
 
3401
- ${block($registeredScrapersMessage(scrapers))}
3423
+ ${block($registeredScrapersMessage(scrapers))}
3402
3424
 
3403
3425
 
3404
- `));
3405
- }
3406
- if (partialPieces === null) {
3407
- throw new KnowledgeScrapeError(spaceTrim__default["default"]((block) => `
3408
- Cannot scrape knowledge
3409
-
3410
- The source:
3411
- > ${block(knowledgeSource.knowledgeSourceContent
3412
- .split('\n')
3413
- .map((line) => `> ${line}`)
3414
- .join('\n'))}
3426
+ `));
3427
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3428
+ }
3429
+ if (partialPieces === null) {
3430
+ throw new KnowledgeScrapeError(spaceTrim__default["default"]((block) => `
3431
+ Cannot scrape knowledge
3415
3432
 
3416
- No scraper found for the mime type "${sourceHandler.mimeType}"
3433
+ The source:
3434
+ > ${block(knowledgeSource.knowledgeSourceContent
3435
+ .split('\n')
3436
+ .map((line) => `> ${line}`)
3437
+ .join('\n'))}
3417
3438
 
3418
- ${block($registeredScrapersMessage(scrapers))}
3439
+ No scraper found for the mime type "${sourceHandler.mimeType}"
3419
3440
 
3441
+ ${block($registeredScrapersMessage(scrapers))}
3420
3442
 
3421
- `));
3443
+
3444
+ `));
3445
+ }
3446
+ const pieces = partialPieces.map((partialPiece) => ({
3447
+ ...partialPiece,
3448
+ sources: [
3449
+ {
3450
+ name: knowledgeSource.name,
3451
+ // line, column <- TODO: [☀]
3452
+ // <- TODO: [❎]
3453
+ },
3454
+ ],
3455
+ }));
3456
+ knowledgePreparedUnflatten[index] = pieces;
3457
+ }
3458
+ catch (error) {
3459
+ if (!(error instanceof Error)) {
3460
+ throw error;
3461
+ }
3462
+ console.warn(error);
3463
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3422
3464
  }
3423
- const pieces = partialPieces.map((partialPiece) => ({
3424
- ...partialPiece,
3425
- sources: [
3426
- {
3427
- name: knowledgeSource.name,
3428
- // line, column <- TODO: [☀]
3429
- // <- TODO: [❎]
3430
- },
3431
- ],
3432
- }));
3433
- knowledgePreparedUnflatten[index] = pieces;
3434
3465
  });
3435
3466
  const knowledgePrepared = knowledgePreparedUnflatten.flat();
3436
3467
  return knowledgePrepared;
@@ -3536,7 +3567,7 @@
3536
3567
  // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
3537
3568
  const _llms = arrayableToArray(tools.llm);
3538
3569
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3539
- const llmToolsWithUsage = countTotalUsage(llmTools);
3570
+ const llmToolsWithUsage = countUsage(llmTools);
3540
3571
  // <- TODO: [🌯]
3541
3572
  /*
3542
3573
  TODO: [🧠][🪑][🔃] Should this be done or not
@@ -3848,7 +3879,7 @@
3848
3879
  if (parameterNames.has(subparameterName)) {
3849
3880
  parameterNames.delete(subparameterName);
3850
3881
  parameterNames.add(foreach.parameterName);
3851
- // <- TODO: [🚎] Warn/logic error when `subparameterName` not used
3882
+ // <- TODO: [🏮] Warn/logic error when `subparameterName` not used
3852
3883
  }
3853
3884
  }
3854
3885
  }
@@ -5444,6 +5475,7 @@
5444
5475
 
5445
5476
  @see more at https://ptbk.io/prepare-pipeline
5446
5477
  `));
5478
+ // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
5447
5479
  }
5448
5480
  let runCount = 0;
5449
5481
  const pipelineExecutorWithCallback = async (inputParameters, onProgress) => {