@promptbook/openai 0.52.0-29 โ†’ 0.52.0-30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -30,11 +30,9 @@ npm i ptbk
30
30
  npm i @promptbook/openai
31
31
  ```
32
32
 
33
- Wrapper around [OpenAI's SDK](https://www.npmjs.com/package/openai) to make it easier to use inside Promptbooks.
33
+ `@promptbook/openai` integrates [OpenAI's API](https://openai.com/) with [Promptbook](https://github.com/webgptorg/promptbook). It allows to execute Promptbooks with OpenAI GPT models.
34
34
 
35
-
36
-
37
- ## Usage
35
+ ## ๐Ÿงก Usage
38
36
 
39
37
  ```typescript
40
38
  import {
@@ -45,13 +43,13 @@ import {
45
43
  import { JavascriptExecutionTools } from '@promptbook/execute-javascript';
46
44
  import { OpenAiExecutionTools } from '@promptbook/openai';
47
45
 
48
- // Create whole Promptbook library
46
+ // โ–ถ Create whole Promptbook library
49
47
  const library = await createPromptbookLibraryFromDirectory('./promptbook-library');
50
48
 
51
- // Get one Promptbook
49
+ // โ–ถ Get one Promptbook
52
50
  const promptbook = await library.getPromptbookByUrl(`https://promptbook.studio/my-library/write-article.ptbk.md`);
53
51
 
54
- // Prepare tools
52
+ // โ–ถ Prepare tools
55
53
  const tools = {
56
54
  llm: new OpenAiExecutionTools({
57
55
  apiKey: process.env.OPENAI_API_KEY,
@@ -59,26 +57,79 @@ const tools = {
59
57
  script: [new JavascriptExecutionTools()],
60
58
  };
61
59
 
62
- // Create executor - the function that will execute the Promptbook
60
+ // โ–ถ Create executor - the function that will execute the Promptbook
63
61
  const promptbookExecutor = createPromptbookExecutor({ promptbook, tools });
64
62
 
65
- // Prepare input parameters
63
+ // โ–ถ Prepare input parameters
66
64
  const inputParameters = { word: 'cat' };
67
65
 
68
- // ๐Ÿš€ Execute the Promptbook
66
+ // ๐Ÿš€โ–ถ Execute the Promptbook
69
67
  const result = await promptbookExecutor(inputParameters);
70
68
 
71
- // Fail if the execution was not successful
69
+ // โ–ถ Fail if the execution was not successful
72
70
  assertsExecutionSuccessful(result);
73
71
 
74
- // Handle the result
72
+ // โ–ถ Handle the result
75
73
  const { isSuccessful, errors, outputParameters, executionReport } = result;
76
74
  console.info(outputParameters);
77
75
  ```
78
76
 
77
+ ## ๐Ÿ’• Usage of multiple LLM providers
78
+
79
+ You can use multiple LLM providers in one Promptbook execution. The best model will be chosen automatically according to the prompt and the model's capabilities.
80
+
81
+ ```typescript
82
+ import {
83
+ createPromptbookExecutor,
84
+ createPromptbookLibraryFromDirectory,
85
+ assertsExecutionSuccessful,
86
+ } from '@promptbook/core';
87
+ import { JavascriptExecutionTools } from '@promptbook/execute-javascript';
88
+ import { OpenAiExecutionTools } from '@promptbook/openai';
89
+
90
+ // โ–ถ Create whole Promptbook library
91
+ const library = await createPromptbookLibraryFromDirectory('./promptbook-library');
92
+
93
+ // โ–ถ Get one Promptbook
94
+ const promptbook = await library.getPromptbookByUrl(`https://promptbook.studio/my-library/write-article.ptbk.md`);
95
+
96
+ // โ–ถ Prepare tools
97
+ const tools = new MultipleLlmExecutionTools(
98
+ // Note: You can use multiple LLM providers in one Promptbook execution. The best model will be chosen automatically according to the prompt and the model's capabilities.
99
+ new OpenAiExecutionTools({
100
+ isVerbose: true,
101
+ apiKey: process.env.OPENAI_API_KEY,
102
+ }),
103
+ new AnthropicClaudeExecutionTools({
104
+ isVerbose: true,
105
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
106
+ }),
107
+ new AzureOpenAiExecutionTools({
108
+ isVerbose: true,
109
+ resourceName: process.env.AZUREOPENAI_RESOURCE_NAME,
110
+ deploymentName: process.env.AZUREOPENAI_DEPLOYMENT_NAME,
111
+ apiKey: process.env.AZUREOPENAI_API_KEY,
112
+ }),
113
+ );
79
114
 
115
+ // โ–ถ Create executor - the function that will execute the Promptbook
116
+ const promptbookExecutor = createPromptbookExecutor({ promptbook, tools });
80
117
 
81
- ## Other models
118
+ // โ–ถ Prepare input parameters
119
+ const inputParameters = { word: 'cat' };
120
+
121
+ // ๐Ÿš€โ–ถ Execute the Promptbook
122
+ const result = await promptbookExecutor(inputParameters);
123
+
124
+ // โ–ถ Fail if the execution was not successful
125
+ assertsExecutionSuccessful(result);
126
+
127
+ // โ–ถ Handle the result
128
+ const { isSuccessful, errors, outputParameters, executionReport } = result;
129
+ console.info(outputParameters);
130
+ ```
131
+
132
+ ## ๐Ÿ’™ Integration with other models
82
133
 
83
134
  See the other models available in the Promptbook package:
84
135
 
@@ -86,6 +137,8 @@ See the other models available in the Promptbook package:
86
137
  - [Anthropic Claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)
87
138
 
88
139
 
140
+
141
+
89
142
  ---
90
143
 
91
144
  Rest of the documentation is common for **entire promptbook ecosystem**:
package/esm/index.es.js CHANGED
@@ -675,7 +675,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
675
675
  return OpenAiExecutionTools;
676
676
  }());
677
677
  /**
678
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
678
+ * TODO: [๐Ÿง ][๐Ÿง™โ€โ™‚๏ธ] Maybe there can be some wizzard for thoose who want to use just OpenAI
679
679
  * TODO: Maybe Create some common util for gptChat and gptComplete
680
680
  * TODO: Maybe make custom OpenaiError
681
681
  */
@@ -36,7 +36,6 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
36
36
  }
37
37
  /**
38
38
  * TODO: [๐Ÿง ] Maybe handle errors via transformAnthropicError (like transformAzureError)
39
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
40
39
  * TODO: Maybe Create some common util for gptChat and gptComplete
41
40
  * TODO: Maybe make custom OpenaiError
42
41
  */
@@ -35,7 +35,6 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
35
35
  listModels(): Promise<Array<AvailableModel>>;
36
36
  }
37
37
  /**
38
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
39
38
  * TODO: Maybe Create some common util for gptChat and gptComplete
40
39
  * TODO: Maybe make custom AzureOpenaiError
41
40
  */
@@ -5,6 +5,3 @@ import { OpenAiExecutionTools } from '../openai/OpenAiExecutionTools';
5
5
  */
6
6
  export declare class LangtailExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
7
7
  }
8
- /**
9
- * TODO: [๐Ÿ“][โ™] Allow to list the available prompts in Langtail
10
- */
@@ -39,7 +39,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
39
39
  listModels(): Array<AvailableModel>;
40
40
  }
41
41
  /**
42
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
42
+ * TODO: [๐Ÿง ][๐Ÿง™โ€โ™‚๏ธ] Maybe there can be some wizzard for thoose who want to use just OpenAI
43
43
  * TODO: Maybe Create some common util for gptChat and gptComplete
44
44
  * TODO: Maybe make custom OpenaiError
45
45
  */
@@ -35,6 +35,6 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
35
35
  listModels(): Promise<Array<AvailableModel>>;
36
36
  }
37
37
  /**
38
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
38
+ * TODO: [๐Ÿ“] Allow to list compatible models with each variant
39
39
  * TODO: [๐Ÿคนโ€โ™‚๏ธ] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
40
40
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.52.0-29",
3
+ "version": "0.52.0-30",
4
4
  "description": "Library to supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -48,7 +48,7 @@
48
48
  }
49
49
  ],
50
50
  "peerDependencies": {
51
- "@promptbook/core": "0.52.0-29"
51
+ "@promptbook/core": "0.52.0-30"
52
52
  },
53
53
  "main": "./umd/index.umd.js",
54
54
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -683,7 +683,7 @@
683
683
  return OpenAiExecutionTools;
684
684
  }());
685
685
  /**
686
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
686
+ * TODO: [๐Ÿง ][๐Ÿง™โ€โ™‚๏ธ] Maybe there can be some wizzard for thoose who want to use just OpenAI
687
687
  * TODO: Maybe Create some common util for gptChat and gptComplete
688
688
  * TODO: Maybe make custom OpenaiError
689
689
  */
@@ -36,7 +36,6 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
36
36
  }
37
37
  /**
38
38
  * TODO: [๐Ÿง ] Maybe handle errors via transformAnthropicError (like transformAzureError)
39
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
40
39
  * TODO: Maybe Create some common util for gptChat and gptComplete
41
40
  * TODO: Maybe make custom OpenaiError
42
41
  */
@@ -35,7 +35,6 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
35
35
  listModels(): Promise<Array<AvailableModel>>;
36
36
  }
37
37
  /**
38
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
39
38
  * TODO: Maybe Create some common util for gptChat and gptComplete
40
39
  * TODO: Maybe make custom AzureOpenaiError
41
40
  */
@@ -5,6 +5,3 @@ import { OpenAiExecutionTools } from '../openai/OpenAiExecutionTools';
5
5
  */
6
6
  export declare class LangtailExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
7
7
  }
8
- /**
9
- * TODO: [๐Ÿ“][โ™] Allow to list the available prompts in Langtail
10
- */
@@ -39,7 +39,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
39
39
  listModels(): Array<AvailableModel>;
40
40
  }
41
41
  /**
42
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
42
+ * TODO: [๐Ÿง ][๐Ÿง™โ€โ™‚๏ธ] Maybe there can be some wizzard for thoose who want to use just OpenAI
43
43
  * TODO: Maybe Create some common util for gptChat and gptComplete
44
44
  * TODO: Maybe make custom OpenaiError
45
45
  */
@@ -35,6 +35,6 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
35
35
  listModels(): Promise<Array<AvailableModel>>;
36
36
  }
37
37
  /**
38
- * TODO: [๐Ÿ“][โ™] Allow to list compatible models with each variant
38
+ * TODO: [๐Ÿ“] Allow to list compatible models with each variant
39
39
  * TODO: [๐Ÿคนโ€โ™‚๏ธ] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
40
40
  */