@promptbook/cli 0.65.0 → 0.66.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/README.md +4 -1
  2. package/esm/index.es.js +137 -61
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  6. package/esm/typings/src/_packages/utils.index.d.ts +10 -8
  7. package/esm/typings/src/config.d.ts +22 -0
  8. package/esm/typings/src/execution/LlmExecutionTools.d.ts +11 -5
  9. package/esm/typings/src/llm-providers/_common/config.d.ts +1 -6
  10. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +2 -2
  11. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
  13. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
  14. package/esm/typings/src/llm-providers/mocked/fakeTextToExpectations.d.ts +1 -0
  15. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +5 -1
  17. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +3 -0
  18. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +4 -1
  20. package/esm/typings/src/utils/currentDate.d.ts +2 -0
  21. package/esm/typings/src/utils/deepFreeze.d.ts +2 -1
  22. package/esm/typings/src/utils/environment/getGlobalScope.d.ts +12 -0
  23. package/esm/typings/src/utils/environment/isRunningInBrowser.d.ts +8 -0
  24. package/esm/typings/src/utils/environment/isRunningInNode.d.ts +8 -0
  25. package/esm/typings/src/utils/environment/isRunningInWebWorker.d.ts +8 -0
  26. package/esm/typings/src/utils/files/isDirectoryExisting.d.ts +3 -1
  27. package/esm/typings/src/utils/files/isFileExisting.d.ts +3 -1
  28. package/esm/typings/src/utils/files/listAllFiles.d.ts +3 -1
  29. package/esm/typings/src/utils/random/randomSeed.d.ts +1 -0
  30. package/package.json +3 -3
  31. package/umd/index.umd.js +137 -61
  32. package/umd/index.umd.js.map +1 -1
  33. package/esm/typings/src/utils/isRunningInWhatever.d.ts +0 -18
@@ -5,6 +5,7 @@ import type { AnthropicClaudeExecutionToolsOptions } from '../llm-providers/anth
5
5
  import type { AnthropicClaudeExecutionToolsDirectOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
6
6
  import type { AnthropicClaudeExecutionToolsProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
7
7
  import { createAnthropicClaudeExecutionTools } from '../llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools';
8
+ import { _ } from '../llm-providers/anthropic-claude/register1';
8
9
  export { PROMPTBOOK_VERSION };
9
10
  export { ANTHROPIC_CLAUDE_MODELS };
10
11
  export { AnthropicClaudeExecutionTools };
@@ -12,3 +13,4 @@ export type { AnthropicClaudeExecutionToolsOptions };
12
13
  export type { AnthropicClaudeExecutionToolsDirectOptions };
13
14
  export type { AnthropicClaudeExecutionToolsProxiedOptions };
14
15
  export { createAnthropicClaudeExecutionTools };
16
+ export { _ };
@@ -15,6 +15,9 @@ import { MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL } from '../config';
15
15
  import { EXECUTIONS_CACHE_DIRNAME } from '../config';
16
16
  import { PIPELINE_COLLECTION_BASE_FILENAME } from '../config';
17
17
  import { RESERVED_PARAMETER_NAMES } from '../config';
18
+ import { DEFAULT_REMOTE_URL } from '../config';
19
+ import { DEFAULT_REMOTE_URL_PATH } from '../config';
20
+ import { BOILERPLATE_LLM_TOOLS_CONFIGURATION_ } from '../config';
18
21
  import { pipelineJsonToString } from '../conversion/pipelineJsonToString';
19
22
  import type { PipelineStringToJsonOptions } from '../conversion/pipelineStringToJson';
20
23
  import { pipelineStringToJson } from '../conversion/pipelineStringToJson';
@@ -45,7 +48,6 @@ import { CallbackInterfaceTools } from '../knowledge/dialogs/callback/CallbackIn
45
48
  import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
46
49
  import { prepareKnowledgePieces } from '../knowledge/prepare-knowledge/_common/prepareKnowledgePieces';
47
50
  import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
48
- import { LLM_CONFIGURATION_BOILERPLATES } from '../llm-providers/_common/config';
49
51
  import { createLlmToolsFromConfiguration } from '../llm-providers/_common/createLlmToolsFromConfiguration';
50
52
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
51
53
  import { countTotalUsage } from '../llm-providers/_common/utils/count-total-usage/countTotalUsage';
@@ -80,6 +82,9 @@ export { MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL };
80
82
  export { EXECUTIONS_CACHE_DIRNAME };
81
83
  export { PIPELINE_COLLECTION_BASE_FILENAME };
82
84
  export { RESERVED_PARAMETER_NAMES };
85
+ export { DEFAULT_REMOTE_URL };
86
+ export { DEFAULT_REMOTE_URL_PATH };
87
+ export { BOILERPLATE_LLM_TOOLS_CONFIGURATION_ };
83
88
  export { pipelineJsonToString };
84
89
  export type { PipelineStringToJsonOptions };
85
90
  export { pipelineStringToJson };
@@ -110,7 +115,6 @@ export { CallbackInterfaceTools };
110
115
  export type { CallbackInterfaceToolsOptions };
111
116
  export { prepareKnowledgePieces };
112
117
  export { prepareKnowledgeFromMarkdown };
113
- export { LLM_CONFIGURATION_BOILERPLATES };
114
118
  export { createLlmToolsFromConfiguration };
115
119
  export { cacheLlmTools };
116
120
  export { countTotalUsage };
@@ -10,7 +10,11 @@ import { extractBlock } from '../postprocessing/utils/extractBlock';
10
10
  import { clonePipeline } from '../utils/clonePipeline';
11
11
  import { $currentDate } from '../utils/currentDate';
12
12
  import { deepClone } from '../utils/deepClone';
13
- import { deepFreeze } from '../utils/deepFreeze';
13
+ import { $deepFreeze } from '../utils/deepFreeze';
14
+ import { $getGlobalScope } from '../utils/environment/getGlobalScope';
15
+ import { $isRunningInBrowser } from '../utils/environment/isRunningInBrowser';
16
+ import { $isRunningInNode } from '../utils/environment/isRunningInNode';
17
+ import { $isRunningInWebWorker } from '../utils/environment/isRunningInWebWorker';
14
18
  import { countCharacters } from '../utils/expectation-counters/countCharacters';
15
19
  import { countLines } from '../utils/expectation-counters/countLines';
16
20
  import { countPages } from '../utils/expectation-counters/countPages';
@@ -20,9 +24,6 @@ import { countSentences } from '../utils/expectation-counters/countSentences';
20
24
  import { countWords } from '../utils/expectation-counters/countWords';
21
25
  import { CountUtils } from '../utils/expectation-counters/index';
22
26
  import { extractParameterNames } from '../utils/extractParameterNames';
23
- import { isRunningInBrowser } from '../utils/isRunningInWhatever';
24
- import { isRunningInNode } from '../utils/isRunningInWhatever';
25
- import { isRunningInWebWorker } from '../utils/isRunningInWhatever';
26
27
  import { capitalize } from '../utils/normalization/capitalize';
27
28
  import { decapitalize } from '../utils/normalization/decapitalize';
28
29
  import { DIACRITIC_VARIANTS_LETTERS } from '../utils/normalization/DIACRITIC_VARIANTS_LETTERS';
@@ -78,7 +79,11 @@ export { extractBlock };
78
79
  export { clonePipeline };
79
80
  export { $currentDate };
80
81
  export { deepClone };
81
- export { deepFreeze };
82
+ export { $deepFreeze };
83
+ export { $getGlobalScope };
84
+ export { $isRunningInBrowser };
85
+ export { $isRunningInNode };
86
+ export { $isRunningInWebWorker };
82
87
  export { countCharacters };
83
88
  export { countLines };
84
89
  export { countPages };
@@ -88,9 +93,6 @@ export { countSentences };
88
93
  export { countWords };
89
94
  export { CountUtils };
90
95
  export { extractParameterNames };
91
- export { isRunningInBrowser };
92
- export { isRunningInNode };
93
- export { isRunningInWebWorker };
94
96
  export { capitalize };
95
97
  export { decapitalize };
96
98
  export { DIACRITIC_VARIANTS_LETTERS };
@@ -1,3 +1,4 @@
1
+ import type { LlmToolsConfiguration } from './llm-providers/_common/LlmToolsConfiguration';
1
2
  /**
2
3
  * Warning message for the generated sections and files files
3
4
  *
@@ -107,9 +108,30 @@ export declare const RESERVED_PARAMETER_RESTRICTED: string;
107
108
  export declare const MOMENT_ARG_THRESHOLDS: {
108
109
  readonly ss: 3;
109
110
  };
111
+ /**
112
+ * @@@
113
+ *
114
+ * @public exported from `@promptbook/core`
115
+ */
116
+ export declare const DEFAULT_REMOTE_URL = "https://api.pavolhejny.com/";
117
+ /**
118
+ * @@@
119
+ *
120
+ * @public exported from `@promptbook/core`
121
+ */
122
+ export declare const DEFAULT_REMOTE_URL_PATH = "/promptbook/socket.io";
123
+ /**
124
+ * @@@
125
+ *
126
+ * @public exported from `@promptbook/core`
127
+ */
128
+ export declare const BOILERPLATE_LLM_TOOLS_CONFIGURATION_: LlmToolsConfiguration;
110
129
  /**
111
130
  * @@@
112
131
  *
113
132
  * @private within the repository
114
133
  */
115
134
  export declare const DEBUG_ALLOW_PAYED_TESTING: boolean;
135
+ /**
136
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
137
+ */
@@ -28,6 +28,16 @@ export type LlmExecutionTools = {
28
28
  * @example "Use all models from OpenAI"
29
29
  */
30
30
  readonly description: string_markdown;
31
+ /**
32
+ * Check comfiguration
33
+ *
34
+ * @returns nothing if configuration is correct
35
+ * @throws {Error} if configuration is incorrect
36
+ */
37
+ /**
38
+ * List all available models that can be used
39
+ */
40
+ listModels(): Promisable<Array<AvailableModel>>;
31
41
  /**
32
42
  * Calls a chat model
33
43
  */
@@ -40,10 +50,6 @@ export type LlmExecutionTools = {
40
50
  * Calls an embedding model
41
51
  */
42
52
  callEmbeddingModel?(prompt: Prompt): Promise<EmbeddingPromptResult>;
43
- /**
44
- * List all available models that can be used
45
- */
46
- listModels(): Promisable<Array<AvailableModel>>;
47
53
  };
48
54
  /**
49
55
  * Represents a model that can be used for prompt execution
@@ -63,8 +69,8 @@ export type AvailableModel = {
63
69
  readonly modelVariant: ModelVariant;
64
70
  };
65
71
  /**
72
+ * TODO: Implement destroyable pattern to free resources
66
73
  * TODO: [🏳] Add `callTranslationModel`
67
- * TODO: Maybe reorder `listModels` and put it befor `callChatModel`, `callCompletionModel`, `callEmbeddingModel`
68
74
  * TODO: [🧠] Emulation of one type of model with another one - emuate chat with completion; emulate translation with chat
69
75
  * TODO: [🍓][♐] Some heuristic to pick the best model in listed models
70
76
  * TODO: [🧠] Should or should not there be a word "GPT" in both callCompletionModel and callChatModel
@@ -1,15 +1,10 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
2
  import type { TODO_any } from '../../utils/organization/TODO_any';
3
- import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
- /**
5
- * @public exported from `@promptbook/core`
6
- */
7
- export declare const LLM_CONFIGURATION_BOILERPLATES: LlmToolsConfiguration;
8
3
  /**
9
4
  * @private internal type for `createLlmToolsFromConfiguration`
10
5
  */
11
6
  export declare const EXECUTION_TOOLS_CLASSES: Record<`create${string}`, (options: TODO_any) => LlmExecutionTools>;
12
7
  /**
13
- * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
8
+ * TODO: !!!!!!! Make global register for this
14
9
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
15
10
  */
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Anthropic Claude models with pricing
5
5
  *
6
- * Note: Done at 2024-05-25
6
+ * Note: Done at 2024-08-16
7
7
  *
8
8
  * @see https://docs.anthropic.com/en/docs/models-overview
9
9
  * @public exported from `@promptbook/anthropic-claude`
@@ -19,5 +19,5 @@ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
19
19
  * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
20
20
  * TODO: [🧠] Some mechanism to propagate unsureness
21
21
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
22
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
22
+ * TODO: [🎰] Some mechanism to auto-update available models
23
23
  */
@@ -0,0 +1,18 @@
1
+ import type Anthropic from '@anthropic-ai/sdk';
2
+ import type { PartialDeep } from 'type-fest';
3
+ import type { PromptResultUsage } from '../../execution/PromptResultUsage';
4
+ import type { Prompt } from '../../types/Prompt';
5
+ /**
6
+ * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
7
+ *
8
+ * @param promptContent The content of the prompt
9
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
10
+ * @param rawResponse The raw response from Anthropic Claude API
11
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
12
+ * @private internal utility of `AnthropicClaudeExecutionTools`
13
+ */
14
+ export declare function computeAnthropicClaudeUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
+ resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): PromptResultUsage;
16
+ /**
17
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
18
+ */
@@ -0,0 +1,4 @@
1
+ export {};
2
+ /**
3
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
4
+ */
@@ -0,0 +1,4 @@
1
+ /**
2
+ * @public exported from `@promptbook/anthropic-claude`
3
+ */
4
+ export declare const _: undefined;
@@ -3,6 +3,7 @@ import type { Expectations } from '../../types/PipelineJson/Expectations';
3
3
  /**
4
4
  * Gets the expectations and creates a fake text that meets the expectations
5
5
  *
6
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic
6
7
  * Note: You can provide postprocessing functions to modify the text before checking the expectations
7
8
  * The result will be the text BEFORE the postprocessing
8
9
  *
@@ -18,4 +18,4 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
18
18
  export declare function joinLlmExecutionTools(...llmExecutionTools: Array<LlmExecutionTools>): MultipleLlmExecutionTools;
19
19
  /**
20
20
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
21
- */
21
+ */
@@ -1,4 +1,5 @@
1
1
  import type OpenAI from 'openai';
2
+ import type { PartialDeep } from 'type-fest';
2
3
  import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
4
  import type { Prompt } from '../../types/Prompt';
4
5
  /**
@@ -11,4 +12,7 @@ import type { Prompt } from '../../types/Prompt';
11
12
  * @private internal utility of `OpenAiExecutionTools`
12
13
  */
13
14
  export declare function computeOpenaiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
14
- resultContent: string, rawResponse: Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>): PromptResultUsage;
15
+ resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): PromptResultUsage;
16
+ /**
17
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
18
+ */
@@ -1 +1,4 @@
1
1
  export {};
2
+ /**
3
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
4
+ */
@@ -18,7 +18,8 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
18
18
  /**
19
19
  * Note: [🤖] Add models of new variant
20
20
  * TODO: [🧠] Some mechanism to propagate unsureness
21
- * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
21
+ * TODO: [🎰] Some mechanism to auto-update available models
22
+ * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
22
23
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
23
24
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
24
25
  * @see https://openai.com/api/pricing/
@@ -25,7 +25,7 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
25
25
  /**
26
26
  * If set, only these models will be listed as available
27
27
  *
28
- * TODO: [🧠] !!!! Figure out better solution
28
+ * TODO: [🧠] !!!!!! Figure out better solution
29
29
  */
30
30
  readonly models?: Array<AvailableModel>;
31
31
  /**
@@ -51,3 +51,6 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
51
51
  */
52
52
  readonly clientId: client_id;
53
53
  });
54
+ /**
55
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
56
+ */
@@ -2,6 +2,8 @@ import type { string_date_iso8601 } from '../types/typeAliases';
2
2
  /**
3
3
  * Simple wrapper `new Date().toISOString()`
4
4
  *
5
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
6
+ *
5
7
  * @returns string_date branded type
6
8
  * @public exported from `@promptbook/utils`
7
9
  */
@@ -2,12 +2,13 @@ import type { ReadonlyDeep } from 'type-fest';
2
2
  /**
3
3
  * @@@
4
4
  *
5
+ * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
5
6
  * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
6
7
  *
7
8
  * @returns The same object as the input, but deeply frozen
8
9
  * @public exported from `@promptbook/utils`
9
10
  */
10
- export declare function deepFreeze<TObject>(objectValue: TObject): ReadonlyDeep<TObject>;
11
+ export declare function $deepFreeze<TObject>(objectValue: TObject): ReadonlyDeep<TObject>;
11
12
  /**
12
13
  * @@@
13
14
  * @@@
@@ -0,0 +1,12 @@
1
+ import type { really_any } from '../organization/really_any';
2
+ /**
3
+ * @@@
4
+ *
5
+ * Note: `$` is used to indicate that this function is not a pure function - it access global
6
+ *
7
+ * @public exported from `@promptbook/utils`
8
+ */
9
+ export declare function $getGlobalScope(): really_any;
10
+ /***
11
+ * TODO: !!!!! Make private and promptbook registry from this
12
+ */
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Detects if the code is running in a browser environment in main thread (Not in a web worker)
3
+ *
4
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
5
+ *
6
+ * @public exported from `@promptbook/utils`
7
+ */
8
+ export declare const $isRunningInBrowser: Function;
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Detects if the code is running in a Node.js environment
3
+ *
4
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
5
+ *
6
+ * @public exported from `@promptbook/utils`
7
+ */
8
+ export declare const $isRunningInNode: Function;
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Detects if the code is running in a web worker
3
+ *
4
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
5
+ *
6
+ * @public exported from `@promptbook/utils`
7
+ */
8
+ export declare const $isRunningInWebWorker: Function;
@@ -2,9 +2,11 @@ import type { string_folder_path } from '../../types/typeAliases';
2
2
  /**
3
3
  * Checks if the directory exists
4
4
  *
5
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
6
+ *
5
7
  * @private within the repository
6
8
  */
7
- export declare function isDirectoryExisting(directoryPath: string_folder_path): Promise<boolean>;
9
+ export declare function $isDirectoryExisting(directoryPath: string_folder_path): Promise<boolean>;
8
10
  /**
9
11
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
10
12
  * TODO: [🐠] This can be a validator - with variants that return true/false and variants that throw errors with meaningless messages
@@ -2,9 +2,11 @@ import type { string_file_path } from '../../types/typeAliases';
2
2
  /**
3
3
  * Checks if the file exists
4
4
  *
5
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
6
+ *
5
7
  * @private within the repository
6
8
  */
7
- export declare function isFileExisting(filePath: string_file_path): Promise<boolean>;
9
+ export declare function $isFileExisting(filePath: string_file_path): Promise<boolean>;
8
10
  /**
9
11
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
10
12
  * TODO: [🐠] This can be a validator - with variants that return true/false and variants that throw errors with meaningless messages
@@ -3,12 +3,14 @@ import type { string_folder_path } from '../../types/typeAliases';
3
3
  /**
4
4
  * Reads all files in the directory
5
5
  *
6
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
7
+ *
6
8
  * @param path
7
9
  * @param isRecursive
8
10
  * @returns List of all files in the directory
9
11
  * @private internal function of `createCollectionFromDirectory`
10
12
  */
11
- export declare function listAllFiles(path: string_folder_path, isRecursive: boolean): Promise<Array<string_file_path>>;
13
+ export declare function $listAllFiles(path: string_folder_path, isRecursive: boolean): Promise<Array<string_file_path>>;
12
14
  /**
13
15
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
14
16
  * TODO: [🖇] What about symlinks?
@@ -2,6 +2,7 @@ import type { number_seed } from '../../types/typeAliases';
2
2
  /**
3
3
  * Generates random seed
4
4
  *
5
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic
5
6
  * Warning: This function is not cryptographically secure (it uses Math.random internally)
6
7
  * @public exported from `@promptbook/utils`
7
8
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/cli",
3
- "version": "0.65.0",
3
+ "version": "0.66.0-1",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -44,12 +44,12 @@
44
44
  }
45
45
  ],
46
46
  "dependencies": {
47
- "@anthropic-ai/sdk": "0.21.1",
47
+ "@anthropic-ai/sdk": "0.26.1",
48
48
  "@azure/openai": "1.0.0-beta.12",
49
49
  "colors": "1.4.0",
50
50
  "commander": "12.0.0",
51
51
  "glob-promise": "6.0.5",
52
- "openai": "4.46.1",
52
+ "openai": "4.55.9",
53
53
  "prettier": "2.8.1",
54
54
  "socket.io-client": "4.7.2",
55
55
  "spacetrim": "0.11.39",