@promptbook/remote-server 0.63.3 → 0.64.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. package/esm/index.es.js +1 -1
  2. package/esm/typings/promptbook-collection/index.d.ts +9 -171
  3. package/esm/typings/src/_packages/node.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +5 -2
  5. package/esm/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -2
  6. package/esm/typings/src/llm-providers/_common/LlmConfiguration.d.ts +28 -0
  7. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +22 -0
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +15 -0
  9. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfiguration.d.ts +32 -0
  10. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +23 -0
  11. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +7 -22
  12. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +1 -0
  13. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  15. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +3 -1
  17. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  19. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -0
  20. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +8 -2
  21. package/esm/typings/src/types/typeAliases.d.ts +2 -2
  22. package/esm/typings/src/utils/organization/TODO_string.d.ts +6 -0
  23. package/package.json +2 -2
  24. package/umd/index.umd.js +1 -1
package/esm/index.es.js CHANGED
@@ -7,7 +7,7 @@ import { spaceTrim } from 'spacetrim';
7
7
  /**
8
8
  * The version of the Promptbook library
9
9
  */
10
- var PROMPTBOOK_VERSION = '0.63.2';
10
+ var PROMPTBOOK_VERSION = '0.63.4';
11
11
  // TODO: !!!! List here all the versions and annotate + put into script
12
12
 
13
13
  /*! *****************************************************************************
@@ -1,7 +1,6 @@
1
1
  declare const _default: ({
2
- pipelineUrl: string;
3
- sourceFile: string;
4
2
  title: string;
3
+ pipelineUrl: string;
5
4
  promptbookVersion: string;
6
5
  parameters: {
7
6
  name: string;
@@ -24,65 +23,11 @@ declare const _default: ({
24
23
  knowledgeSources: never[];
25
24
  knowledgePieces: never[];
26
25
  personas: never[];
27
- preparations: {
28
- id: number;
29
- promptbookVersion: string;
30
- usage: {
31
- price: {
32
- value: number;
33
- };
34
- input: {
35
- tokensCount: {
36
- value: number;
37
- };
38
- charactersCount: {
39
- value: number;
40
- };
41
- wordsCount: {
42
- value: number;
43
- };
44
- sentencesCount: {
45
- value: number;
46
- };
47
- linesCount: {
48
- value: number;
49
- };
50
- paragraphsCount: {
51
- value: number;
52
- };
53
- pagesCount: {
54
- value: number;
55
- };
56
- };
57
- output: {
58
- tokensCount: {
59
- value: number;
60
- };
61
- charactersCount: {
62
- value: number;
63
- };
64
- wordsCount: {
65
- value: number;
66
- };
67
- sentencesCount: {
68
- value: number;
69
- };
70
- linesCount: {
71
- value: number;
72
- };
73
- paragraphsCount: {
74
- value: number;
75
- };
76
- pagesCount: {
77
- value: number;
78
- };
79
- };
80
- };
81
- }[];
82
- } | {
83
- pipelineUrl: string;
26
+ preparations: never[];
84
27
  sourceFile: string;
28
+ } | {
85
29
  title: string;
30
+ pipelineUrl: string;
86
31
  promptbookVersion: string;
87
32
  parameters: {
88
33
  name: string;
@@ -111,65 +56,11 @@ declare const _default: ({
111
56
  knowledgeSources: never[];
112
57
  knowledgePieces: never[];
113
58
  personas: never[];
114
- preparations: {
115
- id: number;
116
- promptbookVersion: string;
117
- usage: {
118
- price: {
119
- value: number;
120
- };
121
- input: {
122
- tokensCount: {
123
- value: number;
124
- };
125
- charactersCount: {
126
- value: number;
127
- };
128
- wordsCount: {
129
- value: number;
130
- };
131
- sentencesCount: {
132
- value: number;
133
- };
134
- linesCount: {
135
- value: number;
136
- };
137
- paragraphsCount: {
138
- value: number;
139
- };
140
- pagesCount: {
141
- value: number;
142
- };
143
- };
144
- output: {
145
- tokensCount: {
146
- value: number;
147
- };
148
- charactersCount: {
149
- value: number;
150
- };
151
- wordsCount: {
152
- value: number;
153
- };
154
- sentencesCount: {
155
- value: number;
156
- };
157
- linesCount: {
158
- value: number;
159
- };
160
- paragraphsCount: {
161
- value: number;
162
- };
163
- pagesCount: {
164
- value: number;
165
- };
166
- };
167
- };
168
- }[];
169
- } | {
170
- pipelineUrl: string;
59
+ preparations: never[];
171
60
  sourceFile: string;
61
+ } | {
172
62
  title: string;
63
+ pipelineUrl: string;
173
64
  promptbookVersion: string;
174
65
  parameters: {
175
66
  name: string;
@@ -193,60 +84,7 @@ declare const _default: ({
193
84
  knowledgeSources: never[];
194
85
  knowledgePieces: never[];
195
86
  personas: never[];
196
- preparations: {
197
- id: number;
198
- promptbookVersion: string;
199
- usage: {
200
- price: {
201
- value: number;
202
- };
203
- input: {
204
- tokensCount: {
205
- value: number;
206
- };
207
- charactersCount: {
208
- value: number;
209
- };
210
- wordsCount: {
211
- value: number;
212
- };
213
- sentencesCount: {
214
- value: number;
215
- };
216
- linesCount: {
217
- value: number;
218
- };
219
- paragraphsCount: {
220
- value: number;
221
- };
222
- pagesCount: {
223
- value: number;
224
- };
225
- };
226
- output: {
227
- tokensCount: {
228
- value: number;
229
- };
230
- charactersCount: {
231
- value: number;
232
- };
233
- wordsCount: {
234
- value: number;
235
- };
236
- sentencesCount: {
237
- value: number;
238
- };
239
- linesCount: {
240
- value: number;
241
- };
242
- paragraphsCount: {
243
- value: number;
244
- };
245
- pagesCount: {
246
- value: number;
247
- };
248
- };
249
- };
250
- }[];
87
+ preparations: never[];
88
+ sourceFile: string;
251
89
  })[];
252
90
  export default _default;
@@ -1,8 +1,14 @@
1
1
  import { PROMPTBOOK_VERSION } from '../version';
2
2
  import { createCollectionFromDirectory } from '../collection/constructors/createCollectionFromDirectory';
3
+ import { LLM_CONFIGURATION_BOILERPLATES } from '../llm-providers/_common/config';
4
+ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/createLlmToolsFromConfiguration';
5
+ import { createLlmToolsFromConfigurationFromEnv } from '../llm-providers/_common/createLlmToolsFromConfigurationFromEnv';
3
6
  import { createLlmToolsFromEnv } from '../llm-providers/_common/createLlmToolsFromEnv';
4
7
  import { FilesStorage } from '../storage/files-storage/FilesStorage';
5
8
  export { PROMPTBOOK_VERSION };
6
9
  export { createCollectionFromDirectory };
10
+ export { LLM_CONFIGURATION_BOILERPLATES };
11
+ export { createLlmToolsFromConfiguration };
12
+ export { createLlmToolsFromConfigurationFromEnv };
7
13
  export { createLlmToolsFromEnv };
8
14
  export { FilesStorage };
@@ -30,6 +30,7 @@ import type { UncertainNumber } from '../execution/UncertainNumber';
30
30
  import type { UserInterfaceTools } from '../execution/UserInterfaceTools';
31
31
  import type { UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
32
32
  import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
33
+ import type { LlmToolsConfiguration } from '../llm-providers/_common/LlmConfiguration';
33
34
  import type { CacheItem } from '../llm-providers/_common/utils/cache/CacheItem';
34
35
  import type { CacheLlmToolsOptions } from '../llm-providers/_common/utils/cache/CacheLlmToolsOptions';
35
36
  import type { LlmExecutionToolsWithTotalUsage } from '../llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
@@ -104,7 +105,7 @@ import type { string_reserved_parameter_name } from '../types/typeAliases';
104
105
  import type { ReservedParameters } from '../types/typeAliases';
105
106
  import type { string_title } from '../types/typeAliases';
106
107
  import type { string_persona_description } from '../types/typeAliases';
107
- import type { string_knowledge_source } from '../types/typeAliases';
108
+ import type { string_knowledge_source_content } from '../types/typeAliases';
108
109
  import type { string_knowledge_source_link } from '../types/typeAliases';
109
110
  import type { string_html } from '../types/typeAliases';
110
111
  import type { string_xml } from '../types/typeAliases';
@@ -237,6 +238,8 @@ export type { UncertainNumber };
237
238
  export type { UserInterfaceTools };
238
239
  export type { UserInterfaceToolsPromptDialogOptions };
239
240
  export type { CallbackInterfaceToolsOptions };
241
+ export type { LlmToolsConfiguration };
242
+ export type { LlmToolsConfiguration };
240
243
  export type { CacheItem };
241
244
  export type { CacheLlmToolsOptions };
242
245
  export type { LlmExecutionToolsWithTotalUsage };
@@ -311,7 +314,7 @@ export type { string_reserved_parameter_name };
311
314
  export type { ReservedParameters };
312
315
  export type { string_title };
313
316
  export type { string_persona_description };
314
- export type { string_knowledge_source };
317
+ export type { string_knowledge_source_content };
315
318
  export type { string_knowledge_source_link };
316
319
  export type { string_html };
317
320
  export type { string_xml };
@@ -1,4 +1,4 @@
1
- import type { string_knowledge_source } from '../../types/typeAliases';
1
+ import type { string_knowledge_source_content } from '../../types/typeAliases';
2
2
  /**
3
3
  * Parsed KNOWLEDGE command
4
4
  *
@@ -7,5 +7,5 @@ import type { string_knowledge_source } from '../../types/typeAliases';
7
7
  */
8
8
  export type KnowledgeCommand = {
9
9
  readonly type: 'KNOWLEDGE';
10
- readonly source: string_knowledge_source;
10
+ readonly sourceContent: string_knowledge_source_content;
11
11
  };
@@ -0,0 +1,28 @@
1
+ import type { string_title } from '../../types/typeAliases';
2
+ import type { TODO_object } from '../../utils/organization/TODO_object';
3
+ import type { TODO_string } from '../../utils/organization/TODO_string';
4
+ /**
5
+ * @@@
6
+ */
7
+ export type LlmToolsConfiguration = Array<{
8
+ /**
9
+ * @@@
10
+ */
11
+ title: string_title;
12
+ /**
13
+ * @@@
14
+ */
15
+ packageName: TODO_string;
16
+ /**
17
+ * @@@
18
+ */
19
+ className: TODO_string;
20
+ /**
21
+ * @@@
22
+ */
23
+ options: TODO_object;
24
+ }>;
25
+ /**
26
+ * TODO: [🧠][🌰] `title` is redundant BUT maybe allow each provider pass it's own title for tracking purposes
27
+ * TODO: [🧠] Maybe add option for `constructorName` instead of `className`
28
+ */
@@ -0,0 +1,22 @@
1
+ import type { TODO_object } from '../../utils/organization/TODO_object';
2
+ import type { TODO_string } from '../../utils/organization/TODO_string';
3
+ /**
4
+ * @@@
5
+ */
6
+ export type LlmToolsConfiguration = Array<{
7
+ /**
8
+ * @@@
9
+ */
10
+ packageName: TODO_string;
11
+ /**
12
+ * @@@
13
+ */
14
+ className: TODO_string;
15
+ /**
16
+ * @@@
17
+ */
18
+ options: TODO_object;
19
+ }>;
20
+ /**
21
+ * TODO: [🧠] Maybe add option for `constructorName` instead of `className`
22
+ */
@@ -0,0 +1,15 @@
1
+ import type { TODO_any } from '../../utils/organization/TODO_any';
2
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
+ import type { LlmToolsConfiguration } from './LlmConfiguration';
4
+ /**
5
+ * @public exported from `@promptbook/node`
6
+ */
7
+ export declare const LLM_CONFIGURATION_BOILERPLATES: LlmToolsConfiguration;
8
+ /**
9
+ * @private internal type for `createLlmToolsFromConfiguration`
10
+ */
11
+ export declare const EXECUTION_TOOLS_CLASSES: Record<`get${string}`, (options: TODO_any) => LlmExecutionTools>;
12
+ /**
13
+ * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
14
+ * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
15
+ */
@@ -0,0 +1,32 @@
1
+ import { MultipleLlmExecutionTools } from '../multiple/MultipleLlmExecutionTools';
2
+ import type { LlmToolsConfiguration } from './LlmConfiguration';
3
+ /**
4
+ * Options for `createLlmToolsFromEnv`
5
+ *
6
+ * @private internal type for `createLlmToolsFromEnv` and `getLlmToolsForTestingAndScriptsAndPlayground`
7
+ */
8
+ export type CreateLlmToolsFromConfigurationOptions = {
9
+ /**
10
+ * This will will be passed to the created `LlmExecutionTools`
11
+ *
12
+ * @default false
13
+ */
14
+ isVerbose?: boolean;
15
+ };
16
+ /**
17
+ * @@@
18
+ *
19
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
20
+ *
21
+ * @returns @@@
22
+ * @public exported from `@promptbook/node`
23
+ */
24
+ export declare function createLlmToolsFromConfiguration(configuration: LlmToolsConfiguration, options?: CreateLlmToolsFromConfigurationOptions): MultipleLlmExecutionTools;
25
+ /**
26
+ * TODO: [🧠][🎌] Dynamically install required providers
27
+ * TODO: @@@ write discussion about this - wizzard
28
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
29
+ * TODO: [🧠] Is there some meaningfull way how to test this util
30
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
31
+ * TODO: This should be maybe not under `_common` but under `utils`
32
+ */
@@ -0,0 +1,23 @@
1
+ import type { LlmToolsConfiguration } from './LlmConfiguration';
2
+ /**
3
+ * @@@
4
+ *
5
+ * @@@ .env
6
+ *
7
+ * It looks for environment variables:
8
+ * - `process.env.OPENAI_API_KEY`
9
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
10
+ *
11
+ * @returns @@@
12
+ * @public exported from `@promptbook/node`
13
+ */
14
+ export declare function createLlmToolsFromConfigurationFromEnv(): LlmToolsConfiguration;
15
+ /**
16
+ * TODO: Add Azure OpenAI
17
+ * TODO: [🧠][🍛]
18
+ * TODO: [🧠] Is there some meaningfull way how to test this util
19
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
20
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
21
+ * TODO: This should be maybe not under `_common` but under `utils`
22
+ * TODO: [🧠] Maybe pass env as argument
23
+ */
@@ -1,21 +1,9 @@
1
- import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
- /**
3
- * Options for `createLlmToolsFromEnv`
4
- *
5
- * @private internal type for `createLlmToolsFromEnv` and `getLlmToolsForTestingAndScriptsAndPlayground`
6
- */
7
- export type CreateLlmToolsFromEnvOptions = {
8
- /**
9
- * This will will be passed to the created `LlmExecutionTools`
10
- *
11
- * @default false
12
- */
13
- isVerbose?: boolean;
14
- };
1
+ import { MultipleLlmExecutionTools } from '../multiple/MultipleLlmExecutionTools';
2
+ import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
15
3
  /**
16
4
  * @@@
17
5
  *
18
- * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
6
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
19
7
  *
20
8
  * @@@ .env
21
9
  *
@@ -26,15 +14,12 @@ export type CreateLlmToolsFromEnvOptions = {
26
14
  * @returns @@@
27
15
  * @public exported from `@promptbook/node`
28
16
  */
29
- export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
17
+ export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromConfigurationOptions): MultipleLlmExecutionTools;
30
18
  /**
31
- * TODO: [🍜] Use `createLlmToolsFromConfiguration`
32
- * TODO: @@@ write discussion about this - wizzard
33
- * TODO: Add Azure
34
- * TODO: [🧠] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
19
+ * TODO: @@@ write `createLlmToolsFromEnv` vs `createLlmToolsFromConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
20
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
35
21
  * TODO: [🧠] Is there some meaningfull way how to test this util
36
- * TODO: [🧠] Maybe pass env as argument
37
22
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
38
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
39
23
  * TODO: [🥃] Allow `ptbk make` without llm tools
24
+ * TODO: This should be maybe not under `_common` but under `utils`
40
25
  */
@@ -18,4 +18,5 @@ export {};
18
18
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
19
19
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
20
20
  * TODO: [🥃] Allow `ptbk make` without llm tools
21
+ * TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
21
22
  */
@@ -1,6 +1,6 @@
1
- import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
1
+ import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
2
2
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
- type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromEnvOptions & {
3
+ type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromConfigurationOptions & {
4
4
  /**
5
5
  * @@@
6
6
  *
@@ -18,4 +18,5 @@ export {};
18
18
  /**
19
19
  * Note: [⚪] This should never be in any released package
20
20
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
21
+ * TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
21
22
  */
@@ -49,4 +49,5 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
49
49
  * TODO: Maybe make custom OpenaiError
50
50
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
51
51
  * TODO: [🍜] Auto use anonymous server in browser
52
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
52
53
  */
@@ -47,4 +47,5 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
47
47
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
48
48
  * TODO: Maybe make custom AzureOpenaiError
49
49
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
50
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
50
51
  */
@@ -42,8 +42,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
42
42
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
43
43
  /**
44
44
  * Calls the best available model
45
+ *
46
+ * Note: This should be private or protected but is public to be usable with duck typing
45
47
  */
46
- protected callCommonModel(prompt: Prompt): Promise<PromptResult>;
48
+ callCommonModel(prompt: Prompt): Promise<PromptResult>;
47
49
  /**
48
50
  * List all available models that can be used
49
51
  * This lists is a combination of all available models from all execution tools
@@ -65,4 +65,5 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
65
65
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
66
66
  * TODO: Maybe make custom OpenaiError
67
67
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
68
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
68
69
  */
@@ -53,4 +53,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
53
53
  * TODO: [🍓] Allow to list compatible models with each variant
54
54
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
55
55
  * TODO: [🍜] Add anonymous option
56
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
56
57
  */
@@ -5,6 +5,7 @@ import type { PrepareOptions } from './PrepareOptions';
5
5
  *
6
6
  * Note: This function does not validate logic of the pipeline
7
7
  * Note: This function acts as part of compilation process
8
+ * Note: When the pipeline is already prepared, it returns the same pipeline
8
9
  * @public exported from `@promptbook/core`
9
10
  */
10
11
  export declare function preparePipeline(pipeline: PipelineJson, options: PrepareOptions): Promise<PipelineJson>;
@@ -1,5 +1,5 @@
1
1
  import type { number_id } from '../typeAliases';
2
- import type { string_knowledge_source } from '../typeAliases';
2
+ import type { string_knowledge_source_content } from '../typeAliases';
3
3
  import type { string_name } from '../typeAliases';
4
4
  /**
5
5
  * Defines one source of knowledge in the pipeline
@@ -8,8 +8,14 @@ import type { string_name } from '../typeAliases';
8
8
  * @see https://github.com/webgptorg/promptbook/discussions/41
9
9
  */
10
10
  export type KnowledgeSourceJson = {
11
+ /**
12
+ * @@@
13
+ */
11
14
  readonly name: string_name;
12
- readonly source: string_knowledge_source;
15
+ /**
16
+ * @@@
17
+ */
18
+ readonly sourceContent: string_knowledge_source_content;
13
19
  };
14
20
  /**
15
21
  * Defines one source of knowledge in the pipeline after it has been prepared
@@ -85,7 +85,7 @@ export type string_name = string;
85
85
  * Semantic helper
86
86
  * Unique identifier of anything
87
87
  *
88
- * For example `"eventName"`
88
+ * For example `"eventTitle"`
89
89
  */
90
90
  export type string_parameter_name = string_name;
91
91
  /**
@@ -142,7 +142,7 @@ export type string_persona_description = string;
142
142
  *
143
143
  * @@@ string_knowledge_source vs string_knowledge_source_link
144
144
  */
145
- export type string_knowledge_source = string_knowledge_source_link | string_markdown;
145
+ export type string_knowledge_source_content = string_knowledge_source_link | string_markdown;
146
146
  /**
147
147
  * One link to knowledge source
148
148
  *
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Organizational helper to better mark the place where the more specific string type is missing
3
+ *
4
+ * @private within the repository
5
+ */
6
+ export type TODO_string = string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.63.3",
3
+ "version": "0.64.0-0",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.63.3"
50
+ "@promptbook/core": "0.64.0-0"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -13,7 +13,7 @@
13
13
  /**
14
14
  * The version of the Promptbook library
15
15
  */
16
- var PROMPTBOOK_VERSION = '0.63.2';
16
+ var PROMPTBOOK_VERSION = '0.63.4';
17
17
  // TODO: !!!! List here all the versions and annotate + put into script
18
18
 
19
19
  /*! *****************************************************************************