@promptbook/pdf 0.92.0-32 → 0.92.0-34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -102,12 +102,12 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
102
102
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
103
103
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
104
104
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
105
+ import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
106
+ import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
105
107
  import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
106
108
  import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
107
109
  import { _DeepseekMetadataRegistration } from '../llm-providers/deepseek/register-configuration';
108
110
  import { _GoogleMetadataRegistration } from '../llm-providers/google/register-configuration';
109
- import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
110
- import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
111
111
  import { _OpenAiMetadataRegistration } from '../llm-providers/openai/register-configuration';
112
112
  import { _OpenAiAssistantMetadataRegistration } from '../llm-providers/openai/register-configuration';
113
113
  import { migratePipeline } from '../migrations/migratePipeline';
@@ -249,12 +249,12 @@ export { createLlmToolsFromConfiguration };
249
249
  export { cacheLlmTools };
250
250
  export { countUsage };
251
251
  export { limitTotalUsage };
252
+ export { joinLlmExecutionTools };
253
+ export { MultipleLlmExecutionTools };
252
254
  export { _AnthropicClaudeMetadataRegistration };
253
255
  export { _AzureOpenAiMetadataRegistration };
254
256
  export { _DeepseekMetadataRegistration };
255
257
  export { _GoogleMetadataRegistration };
256
- export { joinLlmExecutionTools };
257
- export { MultipleLlmExecutionTools };
258
258
  export { _OpenAiMetadataRegistration };
259
259
  export { _OpenAiAssistantMetadataRegistration };
260
260
  export { migratePipeline };
@@ -134,6 +134,7 @@ import type { JavascriptExecutionToolsOptions } from '../scripting/javascript/Ja
134
134
  import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
135
135
  import type { PromptbookStorage } from '../storage/_common/PromptbookStorage';
136
136
  import type { FileCacheStorageOptions } from '../storage/file-cache-storage/FileCacheStorageOptions';
137
+ import type { IndexedDbStorageOptions } from '../storage/local-storage/utils/IndexedDbStorageOptions';
137
138
  import type { IntermediateFilesStrategy } from '../types/IntermediateFilesStrategy';
138
139
  import type { ModelRequirements } from '../types/ModelRequirements';
139
140
  import type { CompletionModelRequirements } from '../types/ModelRequirements';
@@ -431,6 +432,7 @@ export type { JavascriptExecutionToolsOptions };
431
432
  export type { PostprocessingFunction };
432
433
  export type { PromptbookStorage };
433
434
  export type { FileCacheStorageOptions };
435
+ export type { IndexedDbStorageOptions };
434
436
  export type { IntermediateFilesStrategy };
435
437
  export type { ModelRequirements };
436
438
  export type { CompletionModelRequirements };
@@ -2,6 +2,7 @@ import type { Observable } from 'rxjs';
2
2
  import { PartialDeep } from 'type-fest';
3
3
  import type { task_id } from '../types/typeAliases';
4
4
  import type { string_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
5
+ import type { string_promptbook_version } from '../version';
5
6
  import type { AbstractTaskResult } from './AbstractTaskResult';
6
7
  import type { PipelineExecutorResult } from './PipelineExecutorResult';
7
8
  /**
@@ -12,12 +13,21 @@ type CreateTaskOptions<TTaskResult extends AbstractTaskResult> = {
12
13
  * The type of task to create
13
14
  */
14
15
  readonly taskType: AbstractTask<TTaskResult>['taskType'];
16
+ /**
17
+ * Human-readable title of the task - used for displaying in the UI
18
+ */
19
+ readonly title: AbstractTask<TTaskResult>['title'];
15
20
  /**
16
21
  * Callback that processes the task and updates the ongoing result
17
22
  * @param ongoingResult The partial result of the task processing
18
23
  * @returns The final task result
19
24
  */
20
- taskProcessCallback(updateOngoingResult: (newOngoingResult: PartialDeep<TTaskResult>) => void): Promise<TTaskResult>;
25
+ taskProcessCallback(updateOngoingResult: (newOngoingResult: PartialDeep<TTaskResult> & {
26
+ /**
27
+ * Optional update of the task title
28
+ */
29
+ readonly title?: AbstractTask<TTaskResult>['title'];
30
+ }) => void): Promise<TTaskResult>;
21
31
  };
22
32
  /**
23
33
  * Helper to create a new task
@@ -52,10 +62,18 @@ export type AbstractTask<TTaskResult extends AbstractTaskResult> = {
52
62
  * Type of the task
53
63
  */
54
64
  readonly taskType: string_SCREAMING_CASE;
65
+ /**
66
+ * Version of the promptbook used to run the task
67
+ */
68
+ readonly promptbookVersion: string_promptbook_version;
55
69
  /**
56
70
  * Unique identifier for the task
57
71
  */
58
72
  readonly taskId: task_id;
73
+ /**
74
+ * Human-readable title of the task - used for displaying in the UI
75
+ */
76
+ readonly title: string;
59
77
  /**
60
78
  * Status of the task
61
79
  */
@@ -1,4 +1,4 @@
1
- import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
1
+ import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
2
2
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
3
3
  /**
4
4
  * Automatically configures LLM tools from environment variables in Node.js
@@ -1,5 +1,5 @@
1
1
  import type { string_user_id } from '../../../types/typeAliases';
2
- import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
2
+ import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
5
5
  * Options for `$provideLlmToolsFromEnv`
@@ -9,6 +9,10 @@ import type { string_user_id } from '../../types/typeAliases';
9
9
  * @public exported from `@promptbook/azure-openai`
10
10
  */
11
11
  export type AzureOpenAiExecutionToolsOptions = CommonToolsOptions & {
12
+ /**
13
+ * The API key of the Azure OpenAI resource
14
+ */
15
+ readonly apiKey: string_token;
12
16
  /**
13
17
  * The resource name of the Azure OpenAI resource
14
18
  *
@@ -23,10 +27,6 @@ export type AzureOpenAiExecutionToolsOptions = CommonToolsOptions & {
23
27
  * Note: Typically you have one resource and multiple deployments.
24
28
  */
25
29
  readonly deploymentName: string_name;
26
- /**
27
- * The API key of the Azure OpenAI resource
28
- */
29
- readonly apiKey: string_token;
30
30
  /**
31
31
  * A unique identifier representing your end-user, which can help Azure OpenAI to monitor
32
32
  * and detect abuse.
@@ -1,10 +1,11 @@
1
1
  import type { PromptbookStorage } from '../_common/PromptbookStorage';
2
+ import type { IndexedDbStorageOptions } from './utils/IndexedDbStorageOptions';
2
3
  /**
3
4
  * Gets wrapper around IndexedDB which can be used as PromptbookStorage
4
5
  *
5
6
  * @public exported from `@promptbook/browser`
6
7
  */
7
- export declare function getIndexedDbStorage<TItem>(): PromptbookStorage<TItem>;
8
+ export declare function getIndexedDbStorage<TItem>(options: IndexedDbStorageOptions): PromptbookStorage<TItem>;
8
9
  /**
9
10
  * Note: [🔵] Code in this file should never be published outside of `@promptbook/browser`
10
11
  */
@@ -0,0 +1,14 @@
1
+ import type { string_name } from '../../../types/typeAliases';
2
+ /**
3
+ * Options for IndexedDB storage
4
+ */
5
+ export type IndexedDbStorageOptions = {
6
+ /**
7
+ * Name of the database
8
+ */
9
+ databaseName: string_name;
10
+ /**
11
+ * Name of the object store (table) in the database
12
+ */
13
+ storeName: string_name;
14
+ };
@@ -1,7 +1,8 @@
1
1
  import type { PromptbookStorage } from '../../_common/PromptbookStorage';
2
+ import type { IndexedDbStorageOptions } from './IndexedDbStorageOptions';
2
3
  /**
3
4
  * Creates a PromptbookStorage backed by IndexedDB.
4
5
  * Uses a single object store named 'promptbook'.
5
6
  * @private for `getIndexedDbStorage`
6
7
  */
7
- export declare function makePromptbookStorageFromIndexedDb<TValue>(dbName?: string, storeName?: string): PromptbookStorage<TValue>;
8
+ export declare function makePromptbookStorageFromIndexedDb<TValue>(options: IndexedDbStorageOptions): PromptbookStorage<TValue>;
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.92.0-31`).
18
+ * It follows semantic versioning (e.g., `0.92.0-33`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/pdf",
3
- "version": "0.92.0-32",
3
+ "version": "0.92.0-34",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/pdf.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.92.0-32"
54
+ "@promptbook/core": "0.92.0-34"
55
55
  },
56
56
  "dependencies": {
57
57
  "crypto": "1.0.1",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-32';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-34';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2513,6 +2513,7 @@
2513
2513
  */
2514
2514
  function createTask(options) {
2515
2515
  const { taskType, taskProcessCallback } = options;
2516
+ let { title } = options;
2516
2517
  // TODO: [🐙] DRY
2517
2518
  const taskId = `${taskType.toLowerCase().substring(0, 4)}-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid simmilar char conflicts */)}`;
2518
2519
  let status = 'RUNNING';
@@ -2524,6 +2525,10 @@
2524
2525
  const partialResultSubject = new rxjs.Subject();
2525
2526
  // <- Note: Not using `BehaviorSubject` because on error we can't access the last value
2526
2527
  const finalResultPromise = /* not await */ taskProcessCallback((newOngoingResult) => {
2528
+ if (newOngoingResult.title) {
2529
+ title = newOngoingResult.title;
2530
+ }
2531
+ updatedAt = new Date();
2527
2532
  Object.assign(currentValue, newOngoingResult);
2528
2533
  // <- TODO: assign deep
2529
2534
  partialResultSubject.next(newOngoingResult);
@@ -2569,17 +2574,24 @@
2569
2574
  return {
2570
2575
  taskType,
2571
2576
  taskId,
2577
+ get promptbookVersion() {
2578
+ return PROMPTBOOK_ENGINE_VERSION;
2579
+ },
2580
+ get title() {
2581
+ return title;
2582
+ // <- Note: [1] Theese must be getters to allow changing the value in the future
2583
+ },
2572
2584
  get status() {
2573
2585
  return status;
2574
- // <- Note: [1] Theese must be getters to allow changing the value in the future
2586
+ // <- Note: [1] --||--
2575
2587
  },
2576
2588
  get createdAt() {
2577
2589
  return createdAt;
2578
- // <- Note: [1]
2590
+ // <- Note: [1] --||--
2579
2591
  },
2580
2592
  get updatedAt() {
2581
2593
  return updatedAt;
2582
- // <- Note: [1]
2594
+ // <- Note: [1] --||--
2583
2595
  },
2584
2596
  asPromise,
2585
2597
  asObservable() {
@@ -2587,15 +2599,15 @@
2587
2599
  },
2588
2600
  get errors() {
2589
2601
  return errors;
2590
- // <- Note: [1]
2602
+ // <- Note: [1] --||--
2591
2603
  },
2592
2604
  get warnings() {
2593
2605
  return warnings;
2594
- // <- Note: [1]
2606
+ // <- Note: [1] --||--
2595
2607
  },
2596
2608
  get currentValue() {
2597
2609
  return currentValue;
2598
- // <- Note: [1]
2610
+ // <- Note: [1] --||--
2599
2611
  },
2600
2612
  };
2601
2613
  }
@@ -2760,12 +2772,14 @@
2760
2772
  const spending = new rxjs.Subject();
2761
2773
  const proxyTools = {
2762
2774
  get title() {
2763
- // TODO: [🧠] Maybe put here some suffix
2764
- return llmTools.title;
2775
+ return `${llmTools.title} (+usage)`;
2776
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2777
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2765
2778
  },
2766
2779
  get description() {
2767
- // TODO: [🧠] Maybe put here some suffix
2768
- return llmTools.description;
2780
+ return `${llmTools.description} (+usage)`;
2781
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2782
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2769
2783
  },
2770
2784
  checkConfiguration() {
2771
2785
  return /* not await */ llmTools.checkConfiguration();
@@ -2836,7 +2850,14 @@
2836
2850
  return 'Multiple LLM Providers';
2837
2851
  }
2838
2852
  get description() {
2839
- return this.llmExecutionTools.map(({ title }, index) => `${index + 1}) \`${title}\``).join('\n');
2853
+ const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2854
+ .map(({ title, description }, index) => `${index + 1}) \`${title}\`\n${description}`)
2855
+ .join('\n\n');
2856
+ return spaceTrim__default["default"]((block) => `
2857
+ Multiple LLM Providers:
2858
+
2859
+ ${block(innerModelsTitlesAndDescriptions)}
2860
+ `);
2840
2861
  }
2841
2862
  /**
2842
2863
  * Check the configuration of all execution tools
@@ -5325,6 +5346,7 @@
5325
5346
  */
5326
5347
  async function getKnowledgeForTask(options) {
5327
5348
  const { tools, preparedPipeline, task, parameters } = options;
5349
+ console.log('!!! getKnowledgeForTask', options);
5328
5350
  const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5329
5351
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5330
5352
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
@@ -5363,7 +5385,7 @@
5363
5385
  });
5364
5386
  const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5365
5387
  const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5366
- console.log('!!! Embedding', {
5388
+ console.log('!!! `getKnowledgeForTask` Embedding', {
5367
5389
  task,
5368
5390
  taskEmbeddingPrompt,
5369
5391
  taskEmbeddingResult,
@@ -5399,6 +5421,7 @@
5399
5421
  */
5400
5422
  async function getReservedParametersForTask(options) {
5401
5423
  const { tools, preparedPipeline, task, parameters, pipelineIdentification } = options;
5424
+ console.log('!!! getReservedParametersForTask', options);
5402
5425
  const context = await getContextForTask(); // <- [🏍]
5403
5426
  const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task, parameters });
5404
5427
  const examples = await getExamplesForTask();
@@ -5435,6 +5458,7 @@
5435
5458
  */
5436
5459
  async function executeTask(options) {
5437
5460
  const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
5461
+ console.log('!!! executeTask', options);
5438
5462
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
5439
5463
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
5440
5464
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
@@ -5458,14 +5482,15 @@
5458
5482
 
5459
5483
  `));
5460
5484
  }
5485
+ const reservedParameters = await getReservedParametersForTask({
5486
+ tools,
5487
+ preparedPipeline,
5488
+ task: currentTask,
5489
+ pipelineIdentification,
5490
+ parameters: parametersToPass,
5491
+ });
5461
5492
  const definedParameters = Object.freeze({
5462
- ...(await getReservedParametersForTask({
5463
- tools,
5464
- preparedPipeline,
5465
- task: currentTask,
5466
- pipelineIdentification,
5467
- parameters: parametersToPass,
5468
- })),
5493
+ ...reservedParameters,
5469
5494
  ...parametersToPass,
5470
5495
  });
5471
5496
  const definedParameterNames = new Set(Object.keys(definedParameters));
@@ -5912,6 +5937,7 @@
5912
5937
  };
5913
5938
  const pipelineExecutor = (inputParameters) => createTask({
5914
5939
  taskType: 'EXECUTION',
5940
+ title: pipeline.title,
5915
5941
  taskProcessCallback(updateOngoingResult) {
5916
5942
  return pipelineExecutorWithCallback(inputParameters, async (newOngoingResult) => {
5917
5943
  updateOngoingResult(newOngoingResult);
@@ -6042,6 +6068,12 @@
6042
6068
  }
6043
6069
  // ---
6044
6070
  if (!llmTools.callEmbeddingModel) {
6071
+ console.log('!!! No callEmbeddingModel function provided', {
6072
+ 'llmTools.title': llmTools.title,
6073
+ 'llmTools.description': llmTools.description,
6074
+ 'llmTools.callEmbeddingModel': llmTools.callEmbeddingModel,
6075
+ llmTools,
6076
+ });
6045
6077
  // TODO: [🟥] Detect browser / node and make it colorfull
6046
6078
  console.error('No callEmbeddingModel function provided');
6047
6079
  }