@pipedream/openai 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/actions/analyze-image-content/analyze-image-content.mjs +7 -1
- package/actions/chat/chat.mjs +1 -1
- package/actions/chat-using-file-search/chat-using-file-search.mjs +1 -1
- package/actions/chat-using-functions/chat-using-functions.mjs +1 -1
- package/actions/chat-using-web-search/chat-using-web-search.mjs +1 -1
- package/actions/classify-items-into-categories/classify-items-into-categories.mjs +1 -1
- package/actions/common/common.mjs +7 -2
- package/actions/convert-text-to-speech/convert-text-to-speech.mjs +8 -3
- package/actions/create-batch/create-batch.mjs +7 -1
- package/actions/create-embeddings/create-embeddings.mjs +1 -1
- package/actions/create-image/create-image.mjs +7 -2
- package/actions/create-transcription/create-transcription.mjs +7 -1
- package/actions/retrieve-file-content/retrieve-file-content.mjs +6 -1
- package/actions/send-prompt/send-prompt.mjs +1 -1
- package/actions/summarize/summarize.mjs +1 -1
- package/actions/translate-text/translate-text.mjs +1 -1
- package/actions/upload-file/upload-file.mjs +7 -1
- package/package.json +1 -1
|
@@ -8,7 +8,7 @@ export default {
|
|
|
8
8
|
key: "openai-analyze-image-content",
|
|
9
9
|
name: "Analyze Image Content",
|
|
10
10
|
description: "Send a message or question about an image and receive a response. [See the documentation](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun)",
|
|
11
|
-
version: "1.0.
|
|
11
|
+
version: "1.0.1",
|
|
12
12
|
type: "action",
|
|
13
13
|
props: {
|
|
14
14
|
openai,
|
|
@@ -33,6 +33,12 @@ export default {
|
|
|
33
33
|
description: "The image to process. Provide either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.jpg`). Supported image types: jpeg, jpg, png, gif, webp",
|
|
34
34
|
optional: true,
|
|
35
35
|
},
|
|
36
|
+
syncDir: {
|
|
37
|
+
type: "dir",
|
|
38
|
+
accessMode: "read",
|
|
39
|
+
sync: true,
|
|
40
|
+
optional: true,
|
|
41
|
+
},
|
|
36
42
|
},
|
|
37
43
|
async run({ $ }) {
|
|
38
44
|
const { id: assistantId } = await this.openai.createAssistant({
|
package/actions/chat/chat.mjs
CHANGED
|
@@ -5,7 +5,7 @@ import constants from "../../common/constants.mjs";
|
|
|
5
5
|
export default {
|
|
6
6
|
...common,
|
|
7
7
|
name: "Chat",
|
|
8
|
-
version: "0.3.
|
|
8
|
+
version: "0.3.1",
|
|
9
9
|
key: "openai-chat",
|
|
10
10
|
description: "The Chat API, using the `gpt-3.5-turbo` or `gpt-4` model. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
|
|
11
11
|
type: "action",
|
|
@@ -5,7 +5,7 @@ import constants from "../../common/constants.mjs";
|
|
|
5
5
|
export default {
|
|
6
6
|
...common,
|
|
7
7
|
name: "Chat using File Search",
|
|
8
|
-
version: "0.0.
|
|
8
|
+
version: "0.0.5",
|
|
9
9
|
key: "openai-chat-using-file-search",
|
|
10
10
|
description: "Chat with your files knowledge base (vector stores). [See the documentation](https://platform.openai.com/docs/guides/tools-file-search)",
|
|
11
11
|
type: "action",
|
|
@@ -5,7 +5,7 @@ import constants from "../../common/constants.mjs";
|
|
|
5
5
|
export default {
|
|
6
6
|
...common,
|
|
7
7
|
name: "Chat using Functions",
|
|
8
|
-
version: "0.0.
|
|
8
|
+
version: "0.0.6",
|
|
9
9
|
key: "openai-chat-using-functions",
|
|
10
10
|
description: "Chat with your models and allow them to invoke functions. Optionally, you can build and invoke workflows as functions. [See the documentation](https://platform.openai.com/docs/guides/function-calling)",
|
|
11
11
|
type: "action",
|
|
@@ -5,7 +5,7 @@ import constants from "../../common/constants.mjs";
|
|
|
5
5
|
export default {
|
|
6
6
|
...common,
|
|
7
7
|
name: "Chat using Web Search",
|
|
8
|
-
version: "0.0.
|
|
8
|
+
version: "0.0.5",
|
|
9
9
|
key: "openai-chat-using-web-search",
|
|
10
10
|
description: "Chat using the web search tool. [See the documentation](https://platform.openai.com/docs/guides/tools-web-search)",
|
|
11
11
|
type: "action",
|
|
@@ -3,7 +3,7 @@ import common from "../common/common-helper.mjs";
|
|
|
3
3
|
export default {
|
|
4
4
|
...common,
|
|
5
5
|
name: "Classify Items into Categories",
|
|
6
|
-
version: "0.1.
|
|
6
|
+
version: "0.1.7",
|
|
7
7
|
key: "openai-classify-items-into-categories",
|
|
8
8
|
description: "Classify items into specific categories using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
|
|
9
9
|
type: "action",
|
|
@@ -59,10 +59,9 @@ export default {
|
|
|
59
59
|
},
|
|
60
60
|
methods: {
|
|
61
61
|
_getCommonArgs() {
|
|
62
|
-
|
|
62
|
+
const args = {
|
|
63
63
|
model: this.modelId,
|
|
64
64
|
prompt: this.prompt,
|
|
65
|
-
max_tokens: this.maxTokens,
|
|
66
65
|
temperature: this.temperature
|
|
67
66
|
? +this.temperature
|
|
68
67
|
: this.temperature,
|
|
@@ -80,6 +79,12 @@ export default {
|
|
|
80
79
|
best_of: this.bestOf,
|
|
81
80
|
user: this.user,
|
|
82
81
|
};
|
|
82
|
+
if (this.modelId.startsWith("o1") || this.modelId.startsWith("o3") || this.modelId.startsWith("o4")) {
|
|
83
|
+
args.max_completion_tokens = this.maxTokens;
|
|
84
|
+
} else {
|
|
85
|
+
args.max_tokens = this.maxTokens;
|
|
86
|
+
}
|
|
87
|
+
return args;
|
|
83
88
|
},
|
|
84
89
|
async _getUserMessageContent() {
|
|
85
90
|
let content = [];
|
|
@@ -5,7 +5,7 @@ export default {
|
|
|
5
5
|
key: "openai-convert-text-to-speech",
|
|
6
6
|
name: "Convert Text to Speech (TTS)",
|
|
7
7
|
description: "Generates audio from the input text. [See the documentation](https://platform.openai.com/docs/api-reference/audio/createSpeech)",
|
|
8
|
-
version: "0.0.
|
|
8
|
+
version: "0.0.15",
|
|
9
9
|
type: "action",
|
|
10
10
|
props: {
|
|
11
11
|
openai,
|
|
@@ -42,7 +42,12 @@ export default {
|
|
|
42
42
|
outputFile: {
|
|
43
43
|
type: "string",
|
|
44
44
|
label: "Output Filename",
|
|
45
|
-
description: "The filename of the output audio file that will be written to the `/tmp` folder, e.g.
|
|
45
|
+
description: "The filename of the output audio file that will be written to the `/tmp` folder, e.g. `myFile.mp3`",
|
|
46
|
+
},
|
|
47
|
+
syncDir: {
|
|
48
|
+
type: "dir",
|
|
49
|
+
accessMode: "write",
|
|
50
|
+
sync: true,
|
|
46
51
|
},
|
|
47
52
|
},
|
|
48
53
|
async run({ $ }) {
|
|
@@ -60,7 +65,7 @@ export default {
|
|
|
60
65
|
|
|
61
66
|
const outputFilePath = this.outputFile.includes("tmp/")
|
|
62
67
|
? this.outputFile
|
|
63
|
-
:
|
|
68
|
+
: `${process.env.STASH_DIR || "/tmp"}/${this.outputFile}`;
|
|
64
69
|
|
|
65
70
|
await fs.promises.writeFile(outputFilePath, Buffer.from(response));
|
|
66
71
|
|
|
@@ -9,7 +9,7 @@ export default {
|
|
|
9
9
|
key: "openai-create-batch",
|
|
10
10
|
name: "Create Batch",
|
|
11
11
|
description: "Creates and executes a batch from an uploaded file of requests. [See the documentation](https://platform.openai.com/docs/api-reference/batch/create)",
|
|
12
|
-
version: "0.1.
|
|
12
|
+
version: "0.1.1",
|
|
13
13
|
type: "action",
|
|
14
14
|
props: {
|
|
15
15
|
openai,
|
|
@@ -41,6 +41,12 @@ export default {
|
|
|
41
41
|
"metadata",
|
|
42
42
|
],
|
|
43
43
|
},
|
|
44
|
+
syncDir: {
|
|
45
|
+
type: "dir",
|
|
46
|
+
accessMode: "read",
|
|
47
|
+
sync: true,
|
|
48
|
+
optional: true,
|
|
49
|
+
},
|
|
44
50
|
},
|
|
45
51
|
async run({ $ }) {
|
|
46
52
|
if (!this.fileId && !this.filePath) {
|
|
@@ -4,7 +4,7 @@ import common from "../common/common.mjs";
|
|
|
4
4
|
|
|
5
5
|
export default {
|
|
6
6
|
name: "Create Embeddings",
|
|
7
|
-
version: "0.0.
|
|
7
|
+
version: "0.0.19",
|
|
8
8
|
key: "openai-create-embeddings",
|
|
9
9
|
description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. [See the documentation](https://platform.openai.com/docs/api-reference/embeddings)",
|
|
10
10
|
type: "action",
|
|
@@ -4,7 +4,7 @@ import fs from "fs";
|
|
|
4
4
|
|
|
5
5
|
export default {
|
|
6
6
|
name: "Create Image (Dall-E)",
|
|
7
|
-
version: "0.1.
|
|
7
|
+
version: "0.1.23",
|
|
8
8
|
key: "openai-create-image",
|
|
9
9
|
description: "Creates an image given a prompt returning a URL to the image. [See the documentation](https://platform.openai.com/docs/api-reference/images)",
|
|
10
10
|
type: "action",
|
|
@@ -39,6 +39,11 @@ export default {
|
|
|
39
39
|
options: constants.IMAGE_SIZES,
|
|
40
40
|
default: "1024x1024",
|
|
41
41
|
},
|
|
42
|
+
syncDir: {
|
|
43
|
+
type: "dir",
|
|
44
|
+
accessMode: "write",
|
|
45
|
+
sync: true,
|
|
46
|
+
},
|
|
42
47
|
},
|
|
43
48
|
async additionalProps() {
|
|
44
49
|
const props = {};
|
|
@@ -106,7 +111,7 @@ export default {
|
|
|
106
111
|
: this.filename.replace(/(\.[^/.]+)$/, `_${i}$1`);
|
|
107
112
|
const outputFilePath = filename.includes("tmp/")
|
|
108
113
|
? filename
|
|
109
|
-
:
|
|
114
|
+
: `${process.env.STASH_DIR || "/tmp"}/${filename}`;
|
|
110
115
|
await fs.writeFileSync(outputFilePath, Buffer.from(response.data[0].b64_json.toString(), "base64"));
|
|
111
116
|
fileData.push({
|
|
112
117
|
tmp: [
|
|
@@ -6,7 +6,7 @@ export default {
|
|
|
6
6
|
key: "openai-create-transcription",
|
|
7
7
|
name: "Create Transcription",
|
|
8
8
|
description: "Transcribes audio into the input language. [See the documentation](https://platform.openai.com/docs/api-reference/audio/createTranscription)",
|
|
9
|
-
version: "0.3.
|
|
9
|
+
version: "0.3.1",
|
|
10
10
|
type: "action",
|
|
11
11
|
props: {
|
|
12
12
|
openai,
|
|
@@ -70,6 +70,12 @@ export default {
|
|
|
70
70
|
description: "The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.",
|
|
71
71
|
optional: true,
|
|
72
72
|
},
|
|
73
|
+
syncDir: {
|
|
74
|
+
type: "dir",
|
|
75
|
+
accessMode: "read",
|
|
76
|
+
sync: true,
|
|
77
|
+
optional: true,
|
|
78
|
+
},
|
|
73
79
|
},
|
|
74
80
|
methods: {
|
|
75
81
|
createTranscription(opts = {}) {
|
|
@@ -5,7 +5,7 @@ export default {
|
|
|
5
5
|
key: "openai-retrieve-file-content",
|
|
6
6
|
name: "Retrieve File Content",
|
|
7
7
|
description: "Retrieves the contents of the specified file. [See the documentation](https://platform.openai.com/docs/api-reference/files/retrieve-content)",
|
|
8
|
-
version: "0.0.
|
|
8
|
+
version: "0.0.16",
|
|
9
9
|
type: "action",
|
|
10
10
|
props: {
|
|
11
11
|
openai,
|
|
@@ -21,6 +21,11 @@ export default {
|
|
|
21
21
|
description: "Optionally, download the file to the `/tmp` directory using the given filename",
|
|
22
22
|
optional: true,
|
|
23
23
|
},
|
|
24
|
+
syncDir: {
|
|
25
|
+
type: "dir",
|
|
26
|
+
accessMode: "write",
|
|
27
|
+
sync: true,
|
|
28
|
+
},
|
|
24
29
|
},
|
|
25
30
|
async run({ $ }) {
|
|
26
31
|
const response = await this.openai.retrieveFileContent({
|
|
@@ -4,7 +4,7 @@ import common from "../common/common.mjs";
|
|
|
4
4
|
export default {
|
|
5
5
|
...common,
|
|
6
6
|
name: "Create Completion (Send Prompt)",
|
|
7
|
-
version: "0.1.
|
|
7
|
+
version: "0.1.18",
|
|
8
8
|
key: "openai-send-prompt",
|
|
9
9
|
description: "OpenAI recommends using the **Chat** action for the latest `gpt-3.5-turbo` API, since it's faster and 10x cheaper. This action creates a completion for the provided prompt and parameters using the older `/completions` API. [See the documentation](https://beta.openai.com/docs/api-reference/completions/create)",
|
|
10
10
|
type: "action",
|
|
@@ -4,7 +4,7 @@ import constants from "../../common/constants.mjs";
|
|
|
4
4
|
export default {
|
|
5
5
|
...common,
|
|
6
6
|
name: "Summarize Text",
|
|
7
|
-
version: "0.1.
|
|
7
|
+
version: "0.1.7",
|
|
8
8
|
key: "openai-summarize",
|
|
9
9
|
description: "Summarizes text using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
|
|
10
10
|
type: "action",
|
|
@@ -9,7 +9,7 @@ const langOptions = lang.LANGUAGES.map((l) => ({
|
|
|
9
9
|
export default {
|
|
10
10
|
...common,
|
|
11
11
|
name: "Translate Text (Whisper)",
|
|
12
|
-
version: "0.1.
|
|
12
|
+
version: "0.1.7",
|
|
13
13
|
key: "openai-translate-text",
|
|
14
14
|
description: "Translate text from one language to another using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
|
|
15
15
|
type: "action",
|
|
@@ -6,7 +6,7 @@ export default {
|
|
|
6
6
|
key: "openai-upload-file",
|
|
7
7
|
name: "Upload File",
|
|
8
8
|
description: "Upload a file that can be used across various endpoints/features. The size of individual files can be a maximum of 512mb. [See the documentation](https://platform.openai.com/docs/api-reference/files/create)",
|
|
9
|
-
version: "0.1.
|
|
9
|
+
version: "0.1.1",
|
|
10
10
|
type: "action",
|
|
11
11
|
props: {
|
|
12
12
|
openai,
|
|
@@ -22,6 +22,12 @@ export default {
|
|
|
22
22
|
"purpose",
|
|
23
23
|
],
|
|
24
24
|
},
|
|
25
|
+
syncDir: {
|
|
26
|
+
type: "dir",
|
|
27
|
+
accessMode: "read",
|
|
28
|
+
sync: true,
|
|
29
|
+
optional: true,
|
|
30
|
+
},
|
|
25
31
|
},
|
|
26
32
|
async run({ $ }) {
|
|
27
33
|
const {
|