@promptbook/remote-server 0.70.0-1 → 0.72.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/README.md +22 -57
  2. package/esm/index.es.js +7 -7
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/_packages/core.index.d.ts +4 -2
  6. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  8. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  9. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  10. package/esm/typings/src/commands/FOREACH/ForeachCommand.d.ts +1 -6
  11. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +1 -2
  12. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +1 -16
  13. package/esm/typings/src/config.d.ts +2 -2
  14. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  15. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  16. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  17. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +1 -1
  18. package/esm/typings/src/execution/utils/addUsage.d.ts +0 -56
  19. package/esm/typings/src/execution/utils/usage-constants.d.ts +127 -0
  20. package/esm/typings/src/knowledge/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  21. package/esm/typings/src/knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  22. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  23. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -2
  26. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  28. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +3 -2
  30. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  31. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +37 -0
  33. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +14 -0
  34. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -2
  35. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  36. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  37. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  38. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  39. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  40. package/esm/typings/src/scripting/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  41. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +1 -1
  42. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +1 -1
  43. package/esm/typings/src/storage/files-storage/FilesStorage.d.ts +1 -1
  44. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  45. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  46. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  47. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  48. package/package.json +2 -2
  49. package/umd/index.umd.js +7 -7
  50. package/umd/index.umd.js.map +1 -1
  51. package/esm/typings/src/personas/preparePersona.test.d.ts +0 -1
@@ -1,60 +1,4 @@
1
1
  import type { PromptResultUsage } from '../PromptResultUsage';
2
- /**
3
- * @@@
4
- *
5
- * @public exported from `@promptbook/core`
6
- */
7
- export declare const ZERO_USAGE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
8
- readonly price: {
9
- readonly value: 0;
10
- };
11
- readonly input: {
12
- readonly tokensCount: {
13
- readonly value: 0;
14
- };
15
- readonly charactersCount: {
16
- readonly value: 0;
17
- };
18
- readonly wordsCount: {
19
- readonly value: 0;
20
- };
21
- readonly sentencesCount: {
22
- readonly value: 0;
23
- };
24
- readonly linesCount: {
25
- readonly value: 0;
26
- };
27
- readonly paragraphsCount: {
28
- readonly value: 0;
29
- };
30
- readonly pagesCount: {
31
- readonly value: 0;
32
- };
33
- };
34
- readonly output: {
35
- readonly tokensCount: {
36
- readonly value: 0;
37
- };
38
- readonly charactersCount: {
39
- readonly value: 0;
40
- };
41
- readonly wordsCount: {
42
- readonly value: 0;
43
- };
44
- readonly sentencesCount: {
45
- readonly value: 0;
46
- };
47
- readonly linesCount: {
48
- readonly value: 0;
49
- };
50
- readonly paragraphsCount: {
51
- readonly value: 0;
52
- };
53
- readonly pagesCount: {
54
- readonly value: 0;
55
- };
56
- };
57
- }>;
58
2
  /**
59
3
  * Function `addUsage` will add multiple usages into one
60
4
  *
@@ -0,0 +1,127 @@
1
+ /**
2
+ * Represents the usage with no resources consumed
3
+ *
4
+ * @public exported from `@promptbook/core`
5
+ */
6
+ export declare const ZERO_USAGE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
7
+ readonly price: {
8
+ readonly value: 0;
9
+ };
10
+ readonly input: {
11
+ readonly tokensCount: {
12
+ readonly value: 0;
13
+ };
14
+ readonly charactersCount: {
15
+ readonly value: 0;
16
+ };
17
+ readonly wordsCount: {
18
+ readonly value: 0;
19
+ };
20
+ readonly sentencesCount: {
21
+ readonly value: 0;
22
+ };
23
+ readonly linesCount: {
24
+ readonly value: 0;
25
+ };
26
+ readonly paragraphsCount: {
27
+ readonly value: 0;
28
+ };
29
+ readonly pagesCount: {
30
+ readonly value: 0;
31
+ };
32
+ };
33
+ readonly output: {
34
+ readonly tokensCount: {
35
+ readonly value: 0;
36
+ };
37
+ readonly charactersCount: {
38
+ readonly value: 0;
39
+ };
40
+ readonly wordsCount: {
41
+ readonly value: 0;
42
+ };
43
+ readonly sentencesCount: {
44
+ readonly value: 0;
45
+ };
46
+ readonly linesCount: {
47
+ readonly value: 0;
48
+ };
49
+ readonly paragraphsCount: {
50
+ readonly value: 0;
51
+ };
52
+ readonly pagesCount: {
53
+ readonly value: 0;
54
+ };
55
+ };
56
+ }>;
57
+ /**
58
+ * Represents the usage with unknown resources consumed
59
+ *
60
+ * @public exported from `@promptbook/core`
61
+ */
62
+ export declare const UNCERTAIN_USAGE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
63
+ readonly price: {
64
+ readonly value: 0;
65
+ readonly isUncertain: true;
66
+ };
67
+ readonly input: {
68
+ readonly tokensCount: {
69
+ readonly value: 0;
70
+ readonly isUncertain: true;
71
+ };
72
+ readonly charactersCount: {
73
+ readonly value: 0;
74
+ readonly isUncertain: true;
75
+ };
76
+ readonly wordsCount: {
77
+ readonly value: 0;
78
+ readonly isUncertain: true;
79
+ };
80
+ readonly sentencesCount: {
81
+ readonly value: 0;
82
+ readonly isUncertain: true;
83
+ };
84
+ readonly linesCount: {
85
+ readonly value: 0;
86
+ readonly isUncertain: true;
87
+ };
88
+ readonly paragraphsCount: {
89
+ readonly value: 0;
90
+ readonly isUncertain: true;
91
+ };
92
+ readonly pagesCount: {
93
+ readonly value: 0;
94
+ readonly isUncertain: true;
95
+ };
96
+ };
97
+ readonly output: {
98
+ readonly tokensCount: {
99
+ readonly value: 0;
100
+ readonly isUncertain: true;
101
+ };
102
+ readonly charactersCount: {
103
+ readonly value: 0;
104
+ readonly isUncertain: true;
105
+ };
106
+ readonly wordsCount: {
107
+ readonly value: 0;
108
+ readonly isUncertain: true;
109
+ };
110
+ readonly sentencesCount: {
111
+ readonly value: 0;
112
+ readonly isUncertain: true;
113
+ };
114
+ readonly linesCount: {
115
+ readonly value: 0;
116
+ readonly isUncertain: true;
117
+ };
118
+ readonly paragraphsCount: {
119
+ readonly value: 0;
120
+ readonly isUncertain: true;
121
+ };
122
+ readonly pagesCount: {
123
+ readonly value: 0;
124
+ readonly isUncertain: true;
125
+ };
126
+ };
127
+ }>;
@@ -8,7 +8,7 @@ import type { CallbackInterfaceToolsOptions } from './CallbackInterfaceToolsOpti
8
8
  * @public exported from `@promptbook/core`
9
9
  */
10
10
  export declare class CallbackInterfaceTools implements UserInterfaceTools {
11
- private readonly options;
11
+ protected readonly options: CallbackInterfaceToolsOptions;
12
12
  constructor(options: CallbackInterfaceToolsOptions);
13
13
  /**
14
14
  * Trigger the custom callback function
@@ -10,7 +10,7 @@ import type { UserInterfaceToolsPromptDialogOptions } from '../../../execution/U
10
10
  * @public exported from `@promptbook/browser`
11
11
  */
12
12
  export declare class SimplePromptInterfaceTools implements UserInterfaceTools {
13
- private readonly options;
13
+ protected readonly options: CommonExecutionToolsOptions;
14
14
  constructor(options?: CommonExecutionToolsOptions);
15
15
  /**
16
16
  * Trigger window.DIALOG TEMPLATE
@@ -8,7 +8,7 @@ import type { string_markdown } from '../../../types/typeAliases';
8
8
  */
9
9
  export declare function prepareKnowledgeFromMarkdown(knowledgeContent: string_markdown, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
10
10
  /**
11
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
11
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
12
12
  * TODO: [🪂] Do it in parallel 11:11
13
13
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
14
14
  */
@@ -8,7 +8,7 @@ import type { string_base64 } from '../../../types/typeAliases';
8
8
  */
9
9
  export declare function prepareKnowledgeFromPdf(content: string_base64, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
10
10
  /**
11
- * TODO: [🐝][🔼] !!! Export via `@promptbook/pdf`
11
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/pdf`
12
12
  * TODO: [🧺] In future, content can be alse File or Blob BUT for now for wider compatibility its only base64
13
13
  * @see https://stackoverflow.com/questions/14653349/node-js-cant-create-blobs
14
14
  * TODO: [🪂] Do it in parallel
@@ -13,7 +13,7 @@ export type CacheItem = {
13
13
  /**
14
14
  * @@@
15
15
  */
16
- promptbookVersion: string_promptbook_version;
16
+ promptbookVersion?: string_promptbook_version;
17
17
  /**
18
18
  * @@@
19
19
  */
@@ -1,3 +1,4 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -13,7 +14,7 @@ import type { AnthropicClaudeExecutionToolsDirectOptions } from './AnthropicClau
13
14
  * @deprecated use `createAnthropicClaudeExecutionTools` instead
14
15
  */
15
16
  export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools {
16
- private readonly options;
17
+ protected readonly options: AnthropicClaudeExecutionToolsDirectOptions;
17
18
  /**
18
19
  * Anthropic Claude API client.
19
20
  */
@@ -26,7 +27,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
26
27
  constructor(options?: AnthropicClaudeExecutionToolsDirectOptions);
27
28
  get title(): string_title & string_markdown_text;
28
29
  get description(): string_markdown;
29
- private getClient;
30
+ getClient(): Promise<Anthropic>;
30
31
  /**
31
32
  * Check the `options` passed to `constructor`
32
33
  */
@@ -16,7 +16,7 @@ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * Note: [🤖] Add models of new variant
19
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
19
+ * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
20
20
  * TODO: [🧠] Some mechanism to propagate unsureness
21
21
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
22
22
  * TODO: [🎰] Some mechanism to auto-update available models
@@ -11,8 +11,8 @@ export declare const createAnthropicClaudeExecutionTools: ((options: AnthropicCl
11
11
  className: string;
12
12
  };
13
13
  /**
14
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
15
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
14
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
15
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
16
16
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
17
17
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
18
18
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
- * TODO: !!! Playground with WebGPT / Promptbook.studio anonymous server
5
- * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
4
+ * TODO:[main] !!! Playground with WebGPT / Promptbook.studio anonymous server
5
+ * TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
6
6
  */
@@ -1,3 +1,4 @@
1
+ import { OpenAIClient } from '@azure/openai';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -13,7 +14,7 @@ import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToo
13
14
  * @public exported from `@promptbook/azure-openai`
14
15
  */
15
16
  export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
16
- private readonly options;
17
+ protected readonly options: AzureOpenAiExecutionToolsOptions;
17
18
  /**
18
19
  * OpenAI Azure API client.
19
20
  */
@@ -26,7 +27,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
26
27
  constructor(options: AzureOpenAiExecutionToolsOptions);
27
28
  get title(): string_title & string_markdown_text;
28
29
  get description(): string_markdown;
29
- private getClient;
30
+ getClient(): Promise<OpenAIClient>;
30
31
  /**
31
32
  * Check the `options` passed to `constructor`
32
33
  */
@@ -13,7 +13,7 @@ import type { string_title } from '../../types/typeAliases';
13
13
  * @public exported from `@promptbook/fake-llm`
14
14
  */
15
15
  export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
16
- private readonly options;
16
+ protected readonly options: CommonExecutionToolsOptions;
17
17
  constructor(options?: CommonExecutionToolsOptions);
18
18
  get title(): string_title & string_markdown_text;
19
19
  get description(): string_markdown;
@@ -14,7 +14,7 @@ import type { string_title } from '../../types/typeAliases';
14
14
  * @public exported from `@promptbook/fake-llm`
15
15
  */
16
16
  export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools {
17
- private readonly options;
17
+ protected readonly options: CommonExecutionToolsOptions;
18
18
  constructor(options?: CommonExecutionToolsOptions);
19
19
  get title(): string_title & string_markdown_text;
20
20
  get description(): string_markdown;
@@ -0,0 +1,37 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { ChatPromptResult } from '../../execution/PromptResult';
3
+ import type { Prompt } from '../../types/Prompt';
4
+ import type { string_markdown } from '../../types/typeAliases';
5
+ import type { string_markdown_text } from '../../types/typeAliases';
6
+ import type { string_title } from '../../types/typeAliases';
7
+ import type { OpenAiAssistantExecutionToolsOptions } from './OpenAiAssistantExecutionToolsOptions';
8
+ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
9
+ /**
10
+ * Execution Tools for calling OpenAI API Assistants
11
+ *
12
+ * This is usefull for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
13
+ *
14
+ * @public exported from `@promptbook/openai`
15
+ */
16
+ export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
17
+ private readonly assistantId?;
18
+ /**
19
+ * Creates OpenAI Execution Tools.
20
+ *
21
+ * @param options which are relevant are directly passed to the OpenAI client
22
+ */
23
+ constructor(options?: OpenAiAssistantExecutionToolsOptions);
24
+ get title(): string_title & string_markdown_text;
25
+ get description(): string_markdown;
26
+ /**
27
+ * Calls OpenAI API to use a chat model.
28
+ */
29
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
30
+ }
31
+ /**
32
+ * TODO: !!!!!! DO not use colors - can be used in browser
33
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
34
+ * TODO: Maybe make custom OpenAiError
35
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
36
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
37
+ */
@@ -0,0 +1,14 @@
1
+ import type { ClientOptions } from 'openai';
2
+ import type { string_token } from '../../types/typeAliases';
3
+ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
4
+ /**
5
+ * Options for `OpenAiAssistantExecutionTools`
6
+ *
7
+ * @public exported from `@promptbook/openai`
8
+ */
9
+ export type OpenAiAssistantExecutionToolsOptions = OpenAiExecutionToolsOptions & ClientOptions & {
10
+ /**
11
+ * Which assistant to use
12
+ */
13
+ assistantId?: string_token;
14
+ };
@@ -1,3 +1,4 @@
1
+ import OpenAI from 'openai';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -14,7 +15,7 @@ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions'
14
15
  * @public exported from `@promptbook/openai`
15
16
  */
16
17
  export declare class OpenAiExecutionTools implements LlmExecutionTools {
17
- private readonly options;
18
+ protected readonly options: OpenAiExecutionToolsOptions;
18
19
  /**
19
20
  * OpenAI API client.
20
21
  */
@@ -27,7 +28,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
27
28
  constructor(options?: OpenAiExecutionToolsOptions);
28
29
  get title(): string_title & string_markdown_text;
29
30
  get description(): string_markdown;
30
- private getClient;
31
+ getClient(): Promise<OpenAI>;
31
32
  /**
32
33
  * Check the `options` passed to `constructor`
33
34
  */
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
- * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
4
+ * TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
5
  */
@@ -20,7 +20,7 @@ import type { RemoteLlmExecutionToolsOptions } from './interfaces/RemoteLlmExecu
20
20
  * @public exported from `@promptbook/remote-client`
21
21
  */
22
22
  export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
23
- private readonly options;
23
+ protected readonly options: RemoteLlmExecutionToolsOptions;
24
24
  constructor(options: RemoteLlmExecutionToolsOptions);
25
25
  get title(): string_title & string_markdown_text;
26
26
  get description(): string_markdown;
@@ -9,7 +9,7 @@ import type { string_persona_description } from '../types/typeAliases';
9
9
  */
10
10
  export declare function preparePersona(personaDescription: string_persona_description, options: PrepareOptions): Promise<PersonaPreparedJson['modelRequirements']>;
11
11
  /**
12
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
12
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
13
13
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
14
14
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
15
15
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -6,7 +6,7 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
6
6
  */
7
7
  export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
8
8
  /**
9
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
9
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
10
10
  * TODO: [🐠] Maybe base this on `makeValidator`
11
11
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
12
12
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -23,7 +23,7 @@ export {};
23
23
  /**
24
24
  * TODO: [🧠] Add context to each template (if missing)
25
25
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
26
- * TODO: [♨] !!! Prepare index the samples and maybe templates
26
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
27
27
  * TODO: Write tests for `preparePipeline`
28
28
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
29
29
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -9,7 +9,7 @@ import type { JavascriptExecutionToolsOptions } from './JavascriptExecutionTools
9
9
  * @public exported from `@promptbook/execute-javascript`
10
10
  */
11
11
  export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: JavascriptExecutionToolsOptions;
13
13
  constructor(options?: JavascriptExecutionToolsOptions);
14
14
  /**
15
15
  * Executes a JavaScript
@@ -9,7 +9,7 @@ import type { ScriptExecutionToolsExecuteOptions } from '../../execution/ScriptE
9
9
  * @private still in development
10
10
  */
11
11
  export declare class PythonExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: CommonExecutionToolsOptions;
13
13
  constructor(options?: CommonExecutionToolsOptions);
14
14
  /**
15
15
  * Executes a Python
@@ -9,7 +9,7 @@ import type { ScriptExecutionToolsExecuteOptions } from '../../execution/ScriptE
9
9
  * @private still in development
10
10
  */
11
11
  export declare class TypescriptExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: CommonExecutionToolsOptions;
13
13
  constructor(options?: CommonExecutionToolsOptions);
14
14
  /**
15
15
  * Executes a TypeScript
@@ -6,7 +6,7 @@ import type { FilesStorageOptions } from './FilesStorageOptions';
6
6
  * @public exported from `@promptbook/node`
7
7
  */
8
8
  export declare class FilesStorage<TItem> implements PromptbookStorage<TItem> {
9
- private readonly options;
9
+ protected readonly options: FilesStorageOptions;
10
10
  constructor(options: FilesStorageOptions);
11
11
  /**
12
12
  * @@@
@@ -42,7 +42,7 @@ export type PipelineJson = {
42
42
  /**
43
43
  * Version of the .ptbk.json file
44
44
  */
45
- readonly promptbookVersion: string_semantic_version;
45
+ readonly promptbookVersion?: string_semantic_version;
46
46
  /**
47
47
  * Description of the promptbook
48
48
  * It can use multiple paragraphs of simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
@@ -592,7 +592,7 @@ export type number_megabytes = number_positive;
592
592
  export type number_gigabytes = number_positive;
593
593
  export type number_terabytes = number_positive;
594
594
  /**.
595
- * TODO: !!! Change "For example" to @example
595
+ * TODO:[main] !!! Change "For example" to @example
596
596
  * TODO: !! Change to branded types
597
597
  * TODO: Delete type aliases that are not exported or used internally
598
598
  */
@@ -22,6 +22,6 @@ import type { string_name } from '../../types/typeAliases';
22
22
  export declare function checkSerializableAsJson(name: string_name, value: unknown): void;
23
23
  /**
24
24
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
25
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
25
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
26
26
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
27
27
  */
@@ -19,6 +19,6 @@
19
19
  */
20
20
  export declare function isSerializableAsJson(value: unknown): boolean;
21
21
  /**
22
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
22
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
23
23
  * TODO: [🧠][💺] Can be done this on type-level?
24
24
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.70.0-1",
3
+ "version": "0.72.0-0",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.70.0-1"
50
+ "@promptbook/core": "0.72.0-0"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -14,8 +14,8 @@
14
14
  /**
15
15
  * The version of the Promptbook library
16
16
  */
17
- var PROMPTBOOK_VERSION = '0.70.0-0';
18
- // TODO: !!!! List here all the versions and annotate + put into script
17
+ var PROMPTBOOK_VERSION = '0.68.5';
18
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
19
19
 
20
20
  /*! *****************************************************************************
21
21
  Copyright (c) Microsoft Corporation.
@@ -325,7 +325,7 @@
325
325
  }
326
326
  /**
327
327
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
328
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
328
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
329
329
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
330
330
  */
331
331
 
@@ -1155,10 +1155,10 @@
1155
1155
  case 1:
1156
1156
  _c.trys.push([1, 14, 15, 16]);
1157
1157
  if (isAnonymous === true && !isAnonymousModeAllowed) {
1158
- throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!! Test
1158
+ throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO:[main] !!! Test
1159
1159
  }
1160
1160
  if (isAnonymous === false && !isCollectionModeAllowed) {
1161
- throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!! Test
1161
+ throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO:[main] !!! Test
1162
1162
  }
1163
1163
  llmExecutionTools = void 0;
1164
1164
  if (!(isAnonymous === true && llmToolsConfiguration !== null)) return [3 /*break*/, 2];
@@ -1249,10 +1249,10 @@
1249
1249
  case 1:
1250
1250
  _b.trys.push([1, 3, 4, 5]);
1251
1251
  if (isAnonymous === true && !isAnonymousModeAllowed) {
1252
- throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!! Test
1252
+ throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO:[main] !!! Test
1253
1253
  }
1254
1254
  if (isAnonymous === false && !isCollectionModeAllowed) {
1255
- throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!! Test
1255
+ throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO:[main] !!! Test
1256
1256
  }
1257
1257
  llmExecutionTools = void 0;
1258
1258
  if (isAnonymous === true) {