@promptbook/node 0.89.0 → 0.92.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +175 -32
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  8. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  10. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  13. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  14. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  15. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  17. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  18. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  19. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  23. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  24. package/esm/typings/src/remote-server/openapi.d.ts +397 -3
  25. package/package.json +2 -2
  26. package/umd/index.umd.js +175 -32
  27. package/umd/index.umd.js.map +1 -1
@@ -87,9 +87,12 @@ import { FORMFACTOR_DEFINITIONS } from '../formfactors/index';
87
87
  import { MatcherFormfactorDefinition } from '../formfactors/matcher/MatcherFormfactorDefinition';
88
88
  import { SheetsFormfactorDefinition } from '../formfactors/sheets/SheetsFormfactorDefinition';
89
89
  import { TranslatorFormfactorDefinition } from '../formfactors/translator/TranslatorFormfactorDefinition';
90
+ import { filterModels } from '../llm-providers/_common/filterModels';
90
91
  import { $llmToolsMetadataRegister } from '../llm-providers/_common/register/$llmToolsMetadataRegister';
91
92
  import { $llmToolsRegister } from '../llm-providers/_common/register/$llmToolsRegister';
92
93
  import { createLlmToolsFromConfiguration } from '../llm-providers/_common/register/createLlmToolsFromConfiguration';
94
+ import { MODEL_TRUST_LEVEL } from '../llm-providers/_common/register/LlmToolsMetadata';
95
+ import { MODEL_ORDER } from '../llm-providers/_common/register/LlmToolsMetadata';
93
96
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
94
97
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
95
98
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
@@ -225,9 +228,12 @@ export { FORMFACTOR_DEFINITIONS };
225
228
  export { MatcherFormfactorDefinition };
226
229
  export { SheetsFormfactorDefinition };
227
230
  export { TranslatorFormfactorDefinition };
231
+ export { filterModels };
228
232
  export { $llmToolsMetadataRegister };
229
233
  export { $llmToolsRegister };
230
234
  export { createLlmToolsFromConfiguration };
235
+ export { MODEL_TRUST_LEVEL };
236
+ export { MODEL_ORDER };
231
237
  export { cacheLlmTools };
232
238
  export { countUsage };
233
239
  export { limitTotalUsage };
@@ -1,8 +1,10 @@
1
1
  import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
2
2
  import { createDeepseekExecutionTools } from '../llm-providers/deepseek/createDeepseekExecutionTools';
3
+ import { DEEPSEEK_MODELS } from '../llm-providers/deepseek/deepseek-models';
3
4
  import type { DeepseekExecutionToolsOptions } from '../llm-providers/deepseek/DeepseekExecutionToolsOptions';
4
5
  import { _DeepseekRegistration } from '../llm-providers/deepseek/register-constructor';
5
6
  export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
6
7
  export { createDeepseekExecutionTools };
8
+ export { DEEPSEEK_MODELS };
7
9
  export type { DeepseekExecutionToolsOptions };
8
10
  export { _DeepseekRegistration };
@@ -1,10 +1,12 @@
1
1
  import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
2
2
  import { createGoogleExecutionTools } from '../llm-providers/google/createGoogleExecutionTools';
3
+ import { GOOGLE_MODELS } from '../llm-providers/google/google-models';
3
4
  import type { GoogleExecutionToolsOptions } from '../llm-providers/google/GoogleExecutionToolsOptions';
4
5
  import { _GoogleRegistration } from '../llm-providers/google/register-constructor';
5
6
  import type { VercelExecutionToolsOptions } from '../llm-providers/vercel/VercelExecutionToolsOptions';
6
7
  export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
7
8
  export { createGoogleExecutionTools };
9
+ export { GOOGLE_MODELS };
8
10
  export type { GoogleExecutionToolsOptions };
9
11
  export { _GoogleRegistration };
10
12
  export type { VercelExecutionToolsOptions };
@@ -7,6 +7,7 @@ import { serializeError } from '../errors/utils/serializeError';
7
7
  import { forEachAsync } from '../execution/utils/forEachAsync';
8
8
  import { isValidCsvString } from '../formats/csv/utils/isValidCsvString';
9
9
  import { isValidJsonString } from '../formats/json/utils/isValidJsonString';
10
+ import { jsonParse } from '../formats/json/utils/jsonParse';
10
11
  import { isValidXmlString } from '../formats/xml/utils/isValidXmlString';
11
12
  import { prompt } from '../pipeline/prompt-notation';
12
13
  import { promptTemplate } from '../pipeline/prompt-notation';
@@ -91,6 +92,7 @@ export { serializeError };
91
92
  export { forEachAsync };
92
93
  export { isValidCsvString };
93
94
  export { isValidJsonString };
95
+ export { jsonParse };
94
96
  export { isValidXmlString };
95
97
  export { prompt };
96
98
  export { promptTemplate };
@@ -1,5 +1,5 @@
1
- import type { LlmExecutionToolsWithTotalUsage } from '../../llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
2
1
  import type { CacheLlmToolsOptions } from '../../llm-providers/_common/utils/cache/CacheLlmToolsOptions';
2
+ import type { LlmExecutionToolsWithTotalUsage } from '../../llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
3
  import type { string_promptbook_server_url } from '../../types/typeAliases';
4
4
  type ProvideLlmToolsForCliOptions = Pick<CacheLlmToolsOptions, 'isCacheReloaded'> & {
5
5
  /**
@@ -1,6 +1,6 @@
1
+ import type { FilesystemTools } from '../../execution/FilesystemTools';
1
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
2
3
  import type { string_filename } from '../../types/typeAliases';
3
- import type { FilesystemTools } from '../../execution/FilesystemTools';
4
4
  /**
5
5
  * Loads the books from the archive file with `.bookc` extension
6
6
  *
@@ -10,7 +10,7 @@ import type { FilesystemTools } from '../../execution/FilesystemTools';
10
10
  *
11
11
  * @private utility of Prompbook
12
12
  */
13
- export declare function loadArchive(filePath: string_filename, fs: FilesystemTools): Promise<Array<PipelineJson>>;
13
+ export declare function loadArchive(filePath: string_filename, fs: FilesystemTools): Promise<ReadonlyArray<PipelineJson>>;
14
14
  /**
15
15
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
16
16
  */
@@ -16,6 +16,10 @@ export type CommonToolsOptions = {
16
16
  * If true, the internal executions will be logged
17
17
  */
18
18
  readonly isVerbose?: boolean;
19
+ /**
20
+ * Maximum number of requests per minute
21
+ */
22
+ readonly maxRequestsPerMinute?: number;
19
23
  };
20
24
  /**
21
25
  * TODO: [🧠][🤺] Maybe allow overriding of `userId` for each prompt
@@ -3,12 +3,17 @@ import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
4
  import type { string_markdown } from '../../types/typeAliases';
5
5
  import type { string_parameter_value } from '../../types/typeAliases';
6
+ import type { ExecutionTools } from '../ExecutionTools';
6
7
  /**
7
8
  * @@@
8
9
  *
9
10
  * @private internal type of `getKnowledgeFoTask`
10
11
  */
11
12
  type GetKnowledgeForTaskOptions = {
13
+ /**
14
+ * The execution tools to be used during the execution of the pipeline
15
+ */
16
+ readonly tools: ExecutionTools;
12
17
  /**
13
18
  * @@@
14
19
  */
@@ -21,7 +26,14 @@ type GetKnowledgeForTaskOptions = {
21
26
  /**
22
27
  * @@@
23
28
  *
29
+ * Here is the place where RAG (retrieval-augmented generation) happens
30
+ *
24
31
  * @private internal utility of `createPipelineExecutor`
25
32
  */
26
33
  export declare function getKnowledgeForTask(options: GetKnowledgeForTaskOptions): Promise<string_parameter_value & string_markdown>;
27
34
  export {};
35
+ /**
36
+ * TODO: !!!! Verify if this is working
37
+ * TODO: [♨] Implement Better - use keyword search
38
+ * TODO: [♨] Examples of values
39
+ */
@@ -2,12 +2,17 @@ import type { ReadonlyDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
4
  import type { ReservedParameters } from '../../types/typeAliases';
5
+ import type { ExecutionTools } from '../ExecutionTools';
5
6
  /**
6
7
  * @@@
7
8
  *
8
9
  * @private internal type of `getReservedParametersForTask`
9
10
  */
10
11
  type GetReservedParametersForTaskOptions = {
12
+ /**
13
+ * The execution tools to be used during the execution of the pipeline
14
+ */
15
+ readonly tools: ExecutionTools;
11
16
  /**
12
17
  * @@@
13
18
  */
@@ -0,0 +1,12 @@
1
+ import type { ParseResult } from 'papaparse';
2
+ import type { TODO_any } from '../../../utils/organization/TODO_any';
3
+ import type { Parameters } from '../../../types/typeAliases';
4
+ import type { CsvSettings } from '../CsvSettings';
5
+ /**
6
+ * Converts a CSV string into an object
7
+ *
8
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
9
+ *
10
+ * @private - for now until `@promptbook/csv` is released
11
+ */
12
+ export declare function csvParse(value: string, settings?: CsvSettings, schema?: TODO_any): ParseResult<Parameters>;
@@ -0,0 +1,11 @@
1
+ /**
2
+ * Converts a JavaScript Object Notation (JSON) string into an object.
3
+ *
4
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
5
+ *
6
+ * @public exported from `@promptbook/utils`
7
+ */
8
+ export declare function jsonParse<T>(value: string): T;
9
+ /**
10
+ * TODO: !!!! Use in Promptbook.studio
11
+ */
@@ -0,0 +1,15 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
+ /**
4
+ * Creates a wrapper around LlmExecutionTools that only exposes models matching the filter function
5
+ *
6
+ * @param llmTools The original LLM execution tools to wrap
7
+ * @param modelFilter Function that determines whether a model should be included
8
+ * @returns A new LlmExecutionTools instance with filtered models
9
+ *
10
+ * @public exported from `@promptbook/core`
11
+ */
12
+ export declare function filterModels<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, modelFilter: (model: AvailableModel) => boolean): TLlmTools;
13
+ /**
14
+ * TODO: !!! [models] Test that this is working
15
+ */
@@ -3,6 +3,41 @@ import type { string_title } from '../../../types/typeAliases';
3
3
  import type { Registered } from '../../../utils/$Register';
4
4
  import type { string_SCREAMING_CASE } from '../../../utils/normalization/normalizeTo_SCREAMING_CASE';
5
5
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
6
+ /**
7
+ * How is the model provider trusted?
8
+ *
9
+ * @public exported from `@promptbook/core`
10
+ */
11
+ export declare const MODEL_TRUST_LEVEL: {
12
+ readonly FULL: "Model is running on the local machine, training data and model weights are known, data are ethically sourced";
13
+ readonly OPEN: "Model is open source, training data and model weights are known";
14
+ readonly PARTIALLY_OPEN: "Model is open source, but training data and model weights are not (fully) known";
15
+ readonly CLOSED_LOCAL: "Model can be run locally, but it is not open source";
16
+ readonly CLOSED_FREE: "Model is behind API gateway but free to use";
17
+ readonly CLOSED_BUSINESS: "Model is behind API gateway and paid but has good SLA, TOS, privacy policy and in general is a good to use in business applications";
18
+ readonly CLOSED: "Model is behind API gateway and paid";
19
+ readonly UNTRUSTED: "Model has questions about the training data and ethics, but it is not known if it is a problem or not";
20
+ readonly VURNABLE: "Model has some known serious vulnerabilities, leaks, ethical problems, etc.";
21
+ };
22
+ /**
23
+ * How is the model provider important?
24
+ *
25
+ * @public exported from `@promptbook/core`
26
+ */
27
+ export declare const MODEL_ORDER: {
28
+ /**
29
+ * Top-tier models, e.g. OpenAI, Anthropic,...
30
+ */
31
+ readonly TOP_TIER: 333;
32
+ /**
33
+ * Mid-tier models, e.g. Llama, Mistral, etc.
34
+ */
35
+ readonly NORMAL: 100;
36
+ /**
37
+ * Low-tier models, e.g. Phi, Tiny, etc.
38
+ */
39
+ readonly LOW_TIER: 0;
40
+ };
6
41
  /**
7
42
  * @@@
8
43
  *
@@ -13,6 +48,14 @@ export type LlmToolsMetadata = Registered & {
13
48
  * @@@
14
49
  */
15
50
  readonly title: string_title;
51
+ /**
52
+ * How is the model is trusted?
53
+ */
54
+ readonly trustLevel: keyof typeof MODEL_TRUST_LEVEL;
55
+ /**
56
+ * How is the model provider important and should be sorted in the list of available providers?
57
+ */
58
+ readonly order: typeof MODEL_ORDER[keyof typeof MODEL_ORDER] | number;
16
59
  /**
17
60
  * List of environment variables that can be used to configure the provider
18
61
  *
@@ -19,6 +19,10 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
19
19
  * OpenAI Azure API client.
20
20
  */
21
21
  private client;
22
+ /**
23
+ * Rate limiter instance
24
+ */
25
+ private limiter;
22
26
  /**
23
27
  * Creates OpenAI Execution Tools.
24
28
  *
@@ -0,0 +1,23 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
+ import type { number_usd } from '../../types/typeAliases';
3
+ /**
4
+ * List of available Deepseek models with descriptions
5
+ *
6
+ * Note: Done at 2025-04-22
7
+ *
8
+ * @see https://www.deepseek.com/models
9
+ * @public exported from `@promptbook/deepseek`
10
+ */
11
+ export declare const DEEPSEEK_MODELS: ReadonlyArray<AvailableModel & {
12
+ modelDescription?: string;
13
+ pricing?: {
14
+ readonly prompt: number_usd;
15
+ readonly output: number_usd;
16
+ };
17
+ }>;
18
+ /**
19
+ * TODO: [🧠] Add information about context window sizes, capabilities, and relative performance characteristics
20
+ * TODO: [🎰] Some mechanism to auto-update available models
21
+ * TODO: [🧠] Verify pricing information is current with Deepseek's official documentation
22
+ * Note: [💞] Ignore a discrepancy between file name and entity name
23
+ */
@@ -0,0 +1,23 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
+ import type { number_usd } from '../../types/typeAliases';
3
+ /**
4
+ * List of available Google models with descriptions
5
+ *
6
+ * Note: Done at 2025-04-22
7
+ *
8
+ * @see https://ai.google.dev/models/gemini
9
+ * @public exported from `@promptbook/google`
10
+ */
11
+ export declare const GOOGLE_MODELS: ReadonlyArray<AvailableModel & {
12
+ modelDescription?: string;
13
+ pricing?: {
14
+ readonly prompt: number_usd;
15
+ readonly output: number_usd;
16
+ };
17
+ }>;
18
+ /**
19
+ * TODO: [🧠] Add information about context window sizes, capabilities, and relative performance characteristics
20
+ * TODO: [🎰] Some mechanism to auto-update available models
21
+ * TODO: [🧠] Verify pricing information is current with Google's official documentation
22
+ * Note: [💞] Ignore a discrepancy between file name and entity name
23
+ */
@@ -20,6 +20,10 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
20
20
  * OpenAI API client.
21
21
  */
22
22
  private client;
23
+ /**
24
+ * Rate limiter instance
25
+ */
26
+ private limiter;
23
27
  /**
24
28
  * Creates OpenAI Execution Tools.
25
29
  *
@@ -8,7 +8,7 @@ import type { string_persona_description } from '../types/typeAliases';
8
8
  * @see https://github.com/webgptorg/promptbook/discussions/22
9
9
  * @public exported from `@promptbook/core`
10
10
  */
11
- export declare function preparePersona(personaDescription: string_persona_description, tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions): Promise<PersonaPreparedJson['modelRequirements']>;
11
+ export declare function preparePersona(personaDescription: string_persona_description, tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions): Promise<Pick<PersonaPreparedJson, 'modelsRequirements'>>;
12
12
  /**
13
13
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
14
14
  * TODO: [🏢] Check validity of `modelName` in pipeline
@@ -30,11 +30,13 @@ export type PersonaJson = {
30
30
  */
31
31
  export type PersonaPreparedJson = PersonaJson & {
32
32
  /**
33
- * Model requirements for the persona
33
+ * Models requirements for the persona
34
+ *
35
+ * Sorted by relevance, best-fitting models is first
34
36
  *
35
37
  * Note: The model must be CHAT variant to be usable through persona
36
38
  */
37
- readonly modelRequirements: ChatModelRequirements;
39
+ readonly modelsRequirements: Array<ChatModelRequirements>;
38
40
  /**
39
41
  * List of preparation ids that were used to prepare this persona
40
42
  */