@promptbook/openai 0.71.0-0 → 0.72.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +5 -0
  2. package/esm/index.es.js +306 -28
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  6. package/esm/typings/src/_packages/openai.index.d.ts +8 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  8. package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +1 -1
  9. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +1 -1
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +0 -56
  11. package/esm/typings/src/execution/utils/usage-constants.d.ts +127 -0
  12. package/esm/typings/src/knowledge/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  13. package/esm/typings/src/knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -2
  15. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +3 -2
  16. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +37 -0
  19. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +14 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -2
  21. package/esm/typings/src/llm-providers/openai/createOpenAiAssistantExecutionTools.d.ts +15 -0
  22. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +9 -0
  23. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +9 -0
  24. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/scripting/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +1 -1
  27. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +1 -1
  28. package/esm/typings/src/storage/files-storage/FilesStorage.d.ts +1 -1
  29. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +2 -9
  30. package/package.json +2 -2
  31. package/umd/index.umd.js +312 -31
  32. package/umd/index.umd.js.map +1 -1
@@ -13,7 +13,7 @@ import type { string_title } from '../../types/typeAliases';
13
13
  * @public exported from `@promptbook/fake-llm`
14
14
  */
15
15
  export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
16
- private readonly options;
16
+ protected readonly options: CommonExecutionToolsOptions;
17
17
  constructor(options?: CommonExecutionToolsOptions);
18
18
  get title(): string_title & string_markdown_text;
19
19
  get description(): string_markdown;
@@ -14,7 +14,7 @@ import type { string_title } from '../../types/typeAliases';
14
14
  * @public exported from `@promptbook/fake-llm`
15
15
  */
16
16
  export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools {
17
- private readonly options;
17
+ protected readonly options: CommonExecutionToolsOptions;
18
18
  constructor(options?: CommonExecutionToolsOptions);
19
19
  get title(): string_title & string_markdown_text;
20
20
  get description(): string_markdown;
@@ -0,0 +1,37 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { ChatPromptResult } from '../../execution/PromptResult';
3
+ import type { Prompt } from '../../types/Prompt';
4
+ import type { string_markdown } from '../../types/typeAliases';
5
+ import type { string_markdown_text } from '../../types/typeAliases';
6
+ import type { string_title } from '../../types/typeAliases';
7
+ import type { OpenAiAssistantExecutionToolsOptions } from './OpenAiAssistantExecutionToolsOptions';
8
+ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
9
+ /**
10
+ * Execution Tools for calling OpenAI API Assistants
11
+ *
12
+ * This is usefull for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
13
+ *
14
+ * @public exported from `@promptbook/openai`
15
+ */
16
+ export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
17
+ private readonly assistantId?;
18
+ /**
19
+ * Creates OpenAI Execution Tools.
20
+ *
21
+ * @param options which are relevant are directly passed to the OpenAI client
22
+ */
23
+ constructor(options: OpenAiAssistantExecutionToolsOptions);
24
+ get title(): string_title & string_markdown_text;
25
+ get description(): string_markdown;
26
+ /**
27
+ * Calls OpenAI API to use a chat model.
28
+ */
29
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
30
+ }
31
+ /**
32
+ * TODO: !!!!!! DO not use colors - can be used in browser
33
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
34
+ * TODO: Maybe make custom OpenAiError
35
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
36
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
37
+ */
@@ -0,0 +1,14 @@
1
+ import type { ClientOptions } from 'openai';
2
+ import type { string_token } from '../../types/typeAliases';
3
+ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
4
+ /**
5
+ * Options for `OpenAiAssistantExecutionTools`
6
+ *
7
+ * @public exported from `@promptbook/openai`
8
+ */
9
+ export type OpenAiAssistantExecutionToolsOptions = OpenAiExecutionToolsOptions & ClientOptions & {
10
+ /**
11
+ * Which assistant to use
12
+ */
13
+ assistantId: string_token;
14
+ };
@@ -1,3 +1,4 @@
1
+ import OpenAI from 'openai';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -7,6 +8,8 @@ import type { Prompt } from '../../types/Prompt';
7
8
  import type { string_markdown } from '../../types/typeAliases';
8
9
  import type { string_markdown_text } from '../../types/typeAliases';
9
10
  import type { string_title } from '../../types/typeAliases';
11
+ import type { string_token } from '../../types/typeAliases';
12
+ import { OpenAiAssistantExecutionTools } from './OpenAiAssistantExecutionTools';
10
13
  import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
11
14
  /**
12
15
  * Execution Tools for calling OpenAI API
@@ -14,7 +17,7 @@ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions'
14
17
  * @public exported from `@promptbook/openai`
15
18
  */
16
19
  export declare class OpenAiExecutionTools implements LlmExecutionTools {
17
- private readonly options;
20
+ protected readonly options: OpenAiExecutionToolsOptions;
18
21
  /**
19
22
  * OpenAI API client.
20
23
  */
@@ -27,7 +30,14 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
27
30
  constructor(options?: OpenAiExecutionToolsOptions);
28
31
  get title(): string_title & string_markdown_text;
29
32
  get description(): string_markdown;
30
- private getClient;
33
+ getClient(): Promise<OpenAI>;
34
+ /**
35
+ * Create (sub)tools for calling OpenAI API Assistants
36
+ *
37
+ * @param assistantId Which assistant to use
38
+ * @returns Tools for calling OpenAI API Assistants with same token
39
+ */
40
+ createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools;
31
41
  /**
32
42
  * Check the `options` passed to `constructor`
33
43
  */
@@ -0,0 +1,15 @@
1
+ import { OpenAiAssistantExecutionTools } from './OpenAiAssistantExecutionTools';
2
+ import type { OpenAiAssistantExecutionToolsOptions } from './OpenAiAssistantExecutionToolsOptions';
3
+ /**
4
+ * Execution Tools for calling OpenAI API
5
+ *
6
+ * @public exported from `@promptbook/openai`
7
+ */
8
+ export declare const createOpenAiAssistantExecutionTools: ((options: OpenAiAssistantExecutionToolsOptions) => OpenAiAssistantExecutionTools) & {
9
+ packageName: string;
10
+ className: string;
11
+ };
12
+ /**
13
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
14
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15
+ */
@@ -7,3 +7,12 @@
7
7
  * @public exported from `@promptbook/cli`
8
8
  */
9
9
  export declare const _OpenAiMetadataRegistration: void;
10
+ /**
11
+ * @@@ registration1 of default configuration for Open AI
12
+ *
13
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
14
+ *
15
+ * @public exported from `@promptbook/core`
16
+ * @public exported from `@promptbook/cli`
17
+ */
18
+ export declare const _OpenAiAssistantMetadataRegistration: void;
@@ -7,6 +7,15 @@
7
7
  * @public exported from `@promptbook/cli`
8
8
  */
9
9
  export declare const _OpenAiRegistration: void;
10
+ /**
11
+ * @@@ registration2
12
+ *
13
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
14
+ *
15
+ * @public exported from `@promptbook/openai`
16
+ * @public exported from `@promptbook/cli`
17
+ */
18
+ export declare const _OpenAiAssistantRegistration: void;
10
19
  /**
11
20
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
12
21
  */
@@ -20,7 +20,7 @@ import type { RemoteLlmExecutionToolsOptions } from './interfaces/RemoteLlmExecu
20
20
  * @public exported from `@promptbook/remote-client`
21
21
  */
22
22
  export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
23
- private readonly options;
23
+ protected readonly options: RemoteLlmExecutionToolsOptions;
24
24
  constructor(options: RemoteLlmExecutionToolsOptions);
25
25
  get title(): string_title & string_markdown_text;
26
26
  get description(): string_markdown;
@@ -9,7 +9,7 @@ import type { JavascriptExecutionToolsOptions } from './JavascriptExecutionTools
9
9
  * @public exported from `@promptbook/execute-javascript`
10
10
  */
11
11
  export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: JavascriptExecutionToolsOptions;
13
13
  constructor(options?: JavascriptExecutionToolsOptions);
14
14
  /**
15
15
  * Executes a JavaScript
@@ -9,7 +9,7 @@ import type { ScriptExecutionToolsExecuteOptions } from '../../execution/ScriptE
9
9
  * @private still in development
10
10
  */
11
11
  export declare class PythonExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: CommonExecutionToolsOptions;
13
13
  constructor(options?: CommonExecutionToolsOptions);
14
14
  /**
15
15
  * Executes a Python
@@ -9,7 +9,7 @@ import type { ScriptExecutionToolsExecuteOptions } from '../../execution/ScriptE
9
9
  * @private still in development
10
10
  */
11
11
  export declare class TypescriptExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: CommonExecutionToolsOptions;
13
13
  constructor(options?: CommonExecutionToolsOptions);
14
14
  /**
15
15
  * Executes a TypeScript
@@ -6,7 +6,7 @@ import type { FilesStorageOptions } from './FilesStorageOptions';
6
6
  * @public exported from `@promptbook/node`
7
7
  */
8
8
  export declare class FilesStorage<TItem> implements PromptbookStorage<TItem> {
9
- private readonly options;
9
+ protected readonly options: FilesStorageOptions;
10
10
  constructor(options: FilesStorageOptions);
11
11
  /**
12
12
  * @@@
@@ -11,18 +11,11 @@ import type { string_name } from '../typeAliases';
11
11
  */
12
12
  export type KnowledgeSourceJson = {
13
13
  /**
14
- * Unique identifier of the knowledge source
14
+ * @@@
15
15
  */
16
16
  readonly name: string_name;
17
17
  /**
18
- * Source of one knowledge
19
- *
20
- * It can be a link, a relative path to file or direct text or combination of those
21
- *
22
- * For example `"https://pavolhejny.com/"`
23
- * For example `"./pavol-hejny-cv.pdf"`
24
- * For example `"Pavol Hejný has web https://pavolhejny.com/"`
25
- * For example `"Pavol Hejný is web developer and creator of Promptbook and Collboard"`
18
+ * @@@
26
19
  */
27
20
  readonly sourceContent: string_knowledge_source_content;
28
21
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.71.0-0",
3
+ "version": "0.72.0-1",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/openai.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.71.0-0"
54
+ "@promptbook/core": "0.72.0-1"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",