@pipedream/openai 0.0.1 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +41 -7
- package/README.md +22 -0
- package/actions/chat/chat.mjs +56 -0
- package/actions/classify-items-into-categories/classify-items-into-categories.mjs +56 -0
- package/actions/common/common-helper.mjs +55 -0
- package/actions/common/common.mjs +141 -0
- package/actions/common/constants.mjs +6 -3
- package/actions/common/lang.mjs +736 -0
- package/actions/create-embeddings/create-embeddings.mjs +51 -0
- package/actions/create-image/create-image.mjs +6 -13
- package/actions/create-transcription/create-transcription.mjs +133 -0
- package/actions/send-prompt/send-prompt.mjs +14 -75
- package/actions/summarize/summarize.mjs +55 -0
- package/actions/translate-text/translate-text.mjs +62 -0
- package/app/openai.app.mjs +179 -0
- package/package.json +8 -4
- package/openai.app.mjs +0 -57
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import openai from "../../app/openai.app.mjs";
|
|
2
|
+
import common from "../common/common.mjs";
|
|
3
|
+
import { ConfigurationError } from "@pipedream/platform";
|
|
4
|
+
|
|
5
|
+
export default {
|
|
6
|
+
name: "Create Embeddings",
|
|
7
|
+
version: "0.0.1",
|
|
8
|
+
key: "openai-create-embeddings",
|
|
9
|
+
description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. [See the docs here](https://platform.openai.com/docs/api-reference/embeddings)",
|
|
10
|
+
type: "action",
|
|
11
|
+
props: {
|
|
12
|
+
openai,
|
|
13
|
+
modelId: {
|
|
14
|
+
propDefinition: [
|
|
15
|
+
openai,
|
|
16
|
+
"embeddingsModelId",
|
|
17
|
+
],
|
|
18
|
+
},
|
|
19
|
+
input: {
|
|
20
|
+
label: "Input",
|
|
21
|
+
description: "Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.",
|
|
22
|
+
type: "string[]",
|
|
23
|
+
},
|
|
24
|
+
user: common.props.user,
|
|
25
|
+
},
|
|
26
|
+
async run({ $ }) {
|
|
27
|
+
// Confirm no element is more than 8192 tokens in length
|
|
28
|
+
for (const [
|
|
29
|
+
i,
|
|
30
|
+
element,
|
|
31
|
+
] of this.input.entries()) {
|
|
32
|
+
if (element.length > 8192) {
|
|
33
|
+
throw new ConfigurationError(`Element #${i} is more than 8192 tokens in length. Each input must not exceed 8192 tokens in length.`);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const response = await this.openai.createEmbeddings({
|
|
38
|
+
$,
|
|
39
|
+
args: {
|
|
40
|
+
model: this.modelId,
|
|
41
|
+
input: this.input,
|
|
42
|
+
},
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
if (response) {
|
|
46
|
+
$.export("$summary", "Successfully created embeddings");
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
return response;
|
|
50
|
+
},
|
|
51
|
+
};
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import openai from "../../openai.app.mjs";
|
|
1
|
+
import openai from "../../app/openai.app.mjs";
|
|
2
2
|
import constants from "../common/constants.mjs";
|
|
3
3
|
|
|
4
4
|
export default {
|
|
5
5
|
name: "Create Image",
|
|
6
|
-
version: "0.0
|
|
6
|
+
version: "0.1.0",
|
|
7
7
|
key: "openai-create-image",
|
|
8
|
-
description: "Creates an image given a prompt. [See docs here](https://
|
|
8
|
+
description: "Creates an image given a prompt. returns a URL to the image. [See docs here](https://platform.openai.com/docs/api-reference/images)",
|
|
9
9
|
type: "action",
|
|
10
10
|
props: {
|
|
11
11
|
openai,
|
|
@@ -27,18 +27,11 @@ export default {
|
|
|
27
27
|
optional: true,
|
|
28
28
|
options: constants.IMAGE_SIZES,
|
|
29
29
|
},
|
|
30
|
-
responseFormat: {
|
|
31
|
-
label: "Response Format",
|
|
32
|
-
description: "The format in which the generated images are returned.",
|
|
33
|
-
type: "string",
|
|
34
|
-
optional: true,
|
|
35
|
-
options: constants.RESPONSE_FORMATS,
|
|
36
|
-
},
|
|
37
30
|
},
|
|
38
31
|
async run({ $ }) {
|
|
39
32
|
const response = await this.openai.createImage({
|
|
40
33
|
$,
|
|
41
|
-
|
|
34
|
+
args: {
|
|
42
35
|
prompt: this.prompt,
|
|
43
36
|
n: this.n,
|
|
44
37
|
size: this.size,
|
|
@@ -46,8 +39,8 @@ export default {
|
|
|
46
39
|
},
|
|
47
40
|
});
|
|
48
41
|
|
|
49
|
-
if (response) {
|
|
50
|
-
$.export("$summary", `Successfully created
|
|
42
|
+
if (response.data.length) {
|
|
43
|
+
$.export("$summary", `Successfully created ${response.data.length} images`);
|
|
51
44
|
}
|
|
52
45
|
|
|
53
46
|
return response;
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
import fs from "fs";
|
|
2
|
+
import got from "got";
|
|
3
|
+
import { extname } from "path";
|
|
4
|
+
import FormData from "form-data";
|
|
5
|
+
import { ConfigurationError } from "@pipedream/platform";
|
|
6
|
+
import common from "../common/common.mjs";
|
|
7
|
+
import constants from "../common/constants.mjs";
|
|
8
|
+
import lang from "../common/lang.mjs";
|
|
9
|
+
import openai from "../../app/openai.app.mjs";
|
|
10
|
+
|
|
11
|
+
const COMMON_AUDIO_FORMATS_TEXT = "Your audio file must be in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.";
|
|
12
|
+
|
|
13
|
+
export default {
|
|
14
|
+
name: "Create Transcription",
|
|
15
|
+
version: "0.0.1",
|
|
16
|
+
key: "openai-create-transcription",
|
|
17
|
+
description: "Transcribes audio into the input language. [See docs here](https://platform.openai.com/docs/api-reference/audio/create).",
|
|
18
|
+
type: "action",
|
|
19
|
+
props: {
|
|
20
|
+
openai,
|
|
21
|
+
uploadType: {
|
|
22
|
+
label: "Audio Upload Type",
|
|
23
|
+
description: "Are you uploading an audio file from [your workflow's `/tmp` directory](https://pipedream.com/docs/code/nodejs/working-with-files/#the-tmp-directory), or providing a URL to the file?",
|
|
24
|
+
type: "string",
|
|
25
|
+
options: [
|
|
26
|
+
"File",
|
|
27
|
+
"URL",
|
|
28
|
+
],
|
|
29
|
+
reloadProps: true,
|
|
30
|
+
},
|
|
31
|
+
language: {
|
|
32
|
+
label: "Language",
|
|
33
|
+
description: "**Optional**. The language of the input audio. Supplying the input language will improve accuracy and latency.",
|
|
34
|
+
type: "string",
|
|
35
|
+
optional: true,
|
|
36
|
+
default: "en",
|
|
37
|
+
options: lang.LANGUAGES.map((l) => ({
|
|
38
|
+
label: l.label,
|
|
39
|
+
value: l.value,
|
|
40
|
+
})),
|
|
41
|
+
},
|
|
42
|
+
},
|
|
43
|
+
async additionalProps() {
|
|
44
|
+
const props = {};
|
|
45
|
+
switch (this.uploadType) {
|
|
46
|
+
case "File":
|
|
47
|
+
props.path = {
|
|
48
|
+
type: "string",
|
|
49
|
+
label: "File Path",
|
|
50
|
+
description: `A path to your audio file to transcribe, e.g. \`/tmp/audio.mp3\`. ${COMMON_AUDIO_FORMATS_TEXT} Add the appropriate extension (mp3, mp4, etc.) on your filename — OpenAI uses the extension to determine the file type. [See the Pipedream docs on saving files to \`/tmp\`](https://pipedream.com/docs/code/nodejs/working-with-files/#writing-a-file-to-tmp)`,
|
|
51
|
+
};
|
|
52
|
+
break;
|
|
53
|
+
case "URL":
|
|
54
|
+
props.url = {
|
|
55
|
+
type: "string",
|
|
56
|
+
label: "URL",
|
|
57
|
+
description: `A public URL to the audio file to transcribe. This URL must point directly to the audio file, not a webpage that links to the audio file. ${COMMON_AUDIO_FORMATS_TEXT}`,
|
|
58
|
+
};
|
|
59
|
+
break;
|
|
60
|
+
default:
|
|
61
|
+
throw new ConfigurationError("Invalid upload type specified. Please provide 'File' or 'URL'.");
|
|
62
|
+
}
|
|
63
|
+
// Because we need to display the file or URL above, and not below, these optional props
|
|
64
|
+
// TODO: Will be fixed when we render optional props correctly when used with additionalProps
|
|
65
|
+
props.prompt = {
|
|
66
|
+
label: "Prompt",
|
|
67
|
+
description: "**Optional** text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language.",
|
|
68
|
+
type: "string",
|
|
69
|
+
optional: true,
|
|
70
|
+
};
|
|
71
|
+
props.responseFormat = {
|
|
72
|
+
label: "Response Format",
|
|
73
|
+
description: "**Optional**. The format of the response. The default is `json`.",
|
|
74
|
+
type: "string",
|
|
75
|
+
default: "json",
|
|
76
|
+
optional: true,
|
|
77
|
+
options: constants.TRANSCRIPTION_FORMATS,
|
|
78
|
+
};
|
|
79
|
+
props.temperature = common.props.temperature;
|
|
80
|
+
|
|
81
|
+
return props;
|
|
82
|
+
},
|
|
83
|
+
async run({ $ }) {
|
|
84
|
+
const {
|
|
85
|
+
url,
|
|
86
|
+
path,
|
|
87
|
+
} = this;
|
|
88
|
+
|
|
89
|
+
if (!url && !path) {
|
|
90
|
+
throw new Error("Must specify either File URL or File Path");
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const form = new FormData();
|
|
94
|
+
form.append("model", "whisper-1");
|
|
95
|
+
if (this.prompt) form.append("prompt", this.prompt);
|
|
96
|
+
if (this.temperature) form.append("temperature", this.temperature);
|
|
97
|
+
if (this.language) form.append("language", this.language);
|
|
98
|
+
if (this.responseFormat) form.append("response_format", this.responseFormat);
|
|
99
|
+
|
|
100
|
+
if (path) {
|
|
101
|
+
if (!fs.existsSync(path)) {
|
|
102
|
+
throw new Error(`${path} does not exist`);
|
|
103
|
+
}
|
|
104
|
+
const readStream = fs.createReadStream(path);
|
|
105
|
+
form.append("file", readStream);
|
|
106
|
+
} else if (url) {
|
|
107
|
+
const ext = extname(url);
|
|
108
|
+
// OpenAI only supports a few audio formats and uses the extension to determine the format
|
|
109
|
+
const tempFilePath = `/tmp/audioFile${ext}`;
|
|
110
|
+
|
|
111
|
+
const writeStream = fs.createWriteStream(tempFilePath);
|
|
112
|
+
const responseStream = got.stream(url);
|
|
113
|
+
responseStream.pipe(writeStream);
|
|
114
|
+
await new Promise((resolve, reject) => {
|
|
115
|
+
writeStream.on("finish", resolve);
|
|
116
|
+
writeStream.on("error", reject);
|
|
117
|
+
responseStream.on("error", reject);
|
|
118
|
+
});
|
|
119
|
+
const readStream = fs.createReadStream(tempFilePath);
|
|
120
|
+
form.append("file", readStream);
|
|
121
|
+
}
|
|
122
|
+
const response = await this.openai.createTranscription({
|
|
123
|
+
$,
|
|
124
|
+
form,
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
if (response) {
|
|
128
|
+
$.export("$summary", "Successfully created transcription");
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
return response;
|
|
132
|
+
},
|
|
133
|
+
};
|
|
@@ -1,105 +1,44 @@
|
|
|
1
|
-
import openai from "../../openai.app.mjs";
|
|
1
|
+
import openai from "../../app/openai.app.mjs";
|
|
2
|
+
import common from "../common/common.mjs";
|
|
2
3
|
|
|
3
4
|
export default {
|
|
4
|
-
|
|
5
|
-
|
|
5
|
+
...common,
|
|
6
|
+
name: "Create Completion (Send Prompt)",
|
|
7
|
+
version: "0.1.0",
|
|
6
8
|
key: "openai-send-prompt",
|
|
7
|
-
description: "
|
|
9
|
+
description: "OpenAI recommends using the **Chat** action for the latest `gpt-3.5-turbo` API, since it's faster and 10x cheaper. This action creates a completion for the provided prompt and parameters using the older `/completions` API. [See docs here](https://beta.openai.com/docs/api-reference/completions/create)",
|
|
8
10
|
type: "action",
|
|
9
11
|
props: {
|
|
10
12
|
openai,
|
|
11
13
|
modelId: {
|
|
12
14
|
propDefinition: [
|
|
13
15
|
openai,
|
|
14
|
-
"
|
|
16
|
+
"completionModelId",
|
|
15
17
|
],
|
|
16
18
|
},
|
|
17
19
|
prompt: {
|
|
18
20
|
label: "Prompt",
|
|
19
|
-
description: "The prompt
|
|
20
|
-
type: "string
|
|
21
|
-
optional: true,
|
|
21
|
+
description: "The prompt to generate completions for",
|
|
22
|
+
type: "string",
|
|
22
23
|
},
|
|
23
24
|
suffix: {
|
|
24
25
|
label: "Suffix",
|
|
25
|
-
description: "The suffix that comes after a completion of inserted text
|
|
26
|
-
type: "string",
|
|
27
|
-
optional: true,
|
|
28
|
-
},
|
|
29
|
-
maxTokens: {
|
|
30
|
-
label: "Max Tokens",
|
|
31
|
-
description: "The maximum number of [tokens](https://beta.openai.com/tokenizer) to generate in the completion.",
|
|
32
|
-
type: "integer",
|
|
33
|
-
optional: true,
|
|
34
|
-
},
|
|
35
|
-
temperature: {
|
|
36
|
-
label: "Temperature",
|
|
37
|
-
description: "What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.",
|
|
38
|
-
type: "string",
|
|
39
|
-
optional: true,
|
|
40
|
-
},
|
|
41
|
-
topP: {
|
|
42
|
-
label: "Top P",
|
|
43
|
-
description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.",
|
|
44
|
-
type: "string",
|
|
45
|
-
optional: true,
|
|
46
|
-
},
|
|
47
|
-
n: {
|
|
48
|
-
label: "N",
|
|
49
|
-
description: "How many completions to generate for each prompt.",
|
|
50
|
-
type: "string",
|
|
51
|
-
optional: true,
|
|
52
|
-
},
|
|
53
|
-
stop: {
|
|
54
|
-
label: "Stop",
|
|
55
|
-
description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.",
|
|
56
|
-
type: "string[]",
|
|
57
|
-
optional: true,
|
|
58
|
-
},
|
|
59
|
-
presencePenalty: {
|
|
60
|
-
label: "Presence Penalty",
|
|
61
|
-
description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
|
|
62
|
-
type: "string",
|
|
63
|
-
optional: true,
|
|
64
|
-
},
|
|
65
|
-
frequencyPenalty: {
|
|
66
|
-
label: "Frequency Penalty",
|
|
67
|
-
description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
|
26
|
+
description: "The suffix that comes after a completion of inserted text",
|
|
68
27
|
type: "string",
|
|
69
28
|
optional: true,
|
|
70
29
|
},
|
|
30
|
+
...common.props,
|
|
71
31
|
bestOf: {
|
|
72
32
|
label: "Best Of",
|
|
73
|
-
description: "Generates best_of completions server-side and returns the \"best\" (the one with the highest log probability per token).
|
|
33
|
+
description: "Generates best_of completions server-side and returns the \"best\" (the one with the highest log probability per token). If set, results cannot be streamed.",
|
|
74
34
|
type: "integer",
|
|
75
35
|
optional: true,
|
|
76
36
|
},
|
|
77
37
|
},
|
|
78
38
|
async run({ $ }) {
|
|
79
|
-
const response = await this.openai.
|
|
39
|
+
const response = await this.openai.createCompletion({
|
|
80
40
|
$,
|
|
81
|
-
|
|
82
|
-
model: this.modelId,
|
|
83
|
-
prompt: this.prompt,
|
|
84
|
-
max_tokens: this.maxTokens,
|
|
85
|
-
temperature: this.temperature
|
|
86
|
-
? +this.temperature
|
|
87
|
-
: this.temperature,
|
|
88
|
-
top_p: this.topP
|
|
89
|
-
? +this.topP
|
|
90
|
-
: this.topP,
|
|
91
|
-
n: this.n
|
|
92
|
-
? +this.n
|
|
93
|
-
: this.n,
|
|
94
|
-
stop: this.stop,
|
|
95
|
-
presence_penalty: this.presencePenalty
|
|
96
|
-
? +this.presencePenalty
|
|
97
|
-
: this.presencePenalty,
|
|
98
|
-
frequency_penalty: this.frequencyPenalty
|
|
99
|
-
? +this.frequencyPenalty
|
|
100
|
-
: this.frequencyPenalty,
|
|
101
|
-
best_of: this.bestOf,
|
|
102
|
-
},
|
|
41
|
+
args: this._getCommonArgs(),
|
|
103
42
|
});
|
|
104
43
|
|
|
105
44
|
if (response) {
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import common from "../common/common-helper.mjs";
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
...common,
|
|
5
|
+
name: "Summarize Text",
|
|
6
|
+
version: "0.0.1",
|
|
7
|
+
key: "openai-summarize",
|
|
8
|
+
description: "Summarizes text using the Chat API",
|
|
9
|
+
type: "action",
|
|
10
|
+
props: {
|
|
11
|
+
text: {
|
|
12
|
+
label: "Text",
|
|
13
|
+
description: "The text to summarize",
|
|
14
|
+
type: "string",
|
|
15
|
+
},
|
|
16
|
+
length: {
|
|
17
|
+
label: "Summary Length",
|
|
18
|
+
description: "The length of the summary",
|
|
19
|
+
type: "string",
|
|
20
|
+
optional: true,
|
|
21
|
+
options: [
|
|
22
|
+
"word",
|
|
23
|
+
"sentence",
|
|
24
|
+
"paragraph",
|
|
25
|
+
"page",
|
|
26
|
+
],
|
|
27
|
+
},
|
|
28
|
+
...common.props,
|
|
29
|
+
},
|
|
30
|
+
methods: {
|
|
31
|
+
...common.methods,
|
|
32
|
+
systemInstructions() {
|
|
33
|
+
return "Your goal is to summarize the text the user provides. Please follow the length guidelines presented in the prompt.";
|
|
34
|
+
},
|
|
35
|
+
maxLength() {
|
|
36
|
+
return this.length
|
|
37
|
+
? ` in one ${this.length}`
|
|
38
|
+
: "";
|
|
39
|
+
},
|
|
40
|
+
userMessage() {
|
|
41
|
+
return `Summarize the following text${this.maxLength()}: ${this.text}`;
|
|
42
|
+
},
|
|
43
|
+
formatOutput({
|
|
44
|
+
messages, response,
|
|
45
|
+
}) {
|
|
46
|
+
if (!messages || !response) {
|
|
47
|
+
throw new Error("Invalid API output, please reach out to https://pipedream.com/support");
|
|
48
|
+
}
|
|
49
|
+
return {
|
|
50
|
+
summary: response.choices?.[0]?.message?.content,
|
|
51
|
+
messages,
|
|
52
|
+
};
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
};
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import common from "../common/common-helper.mjs";
|
|
2
|
+
import lang from "../common/lang.mjs";
|
|
3
|
+
|
|
4
|
+
const langOptions = lang.LANGUAGES.map((l) => ({
|
|
5
|
+
label: l.label,
|
|
6
|
+
value: l.value,
|
|
7
|
+
}));
|
|
8
|
+
|
|
9
|
+
export default {
|
|
10
|
+
...common,
|
|
11
|
+
name: "Translate Text",
|
|
12
|
+
version: "0.0.1",
|
|
13
|
+
key: "openai-translate-text",
|
|
14
|
+
description: "Translate text from one language to another using the Chat API",
|
|
15
|
+
type: "action",
|
|
16
|
+
props: {
|
|
17
|
+
...common.props,
|
|
18
|
+
text: {
|
|
19
|
+
label: "Text",
|
|
20
|
+
description: "Text to translate",
|
|
21
|
+
type: "string",
|
|
22
|
+
},
|
|
23
|
+
sourceLang: {
|
|
24
|
+
label: "Source language",
|
|
25
|
+
description: "The language of your provided text",
|
|
26
|
+
type: "string",
|
|
27
|
+
options: langOptions,
|
|
28
|
+
},
|
|
29
|
+
targetLang: {
|
|
30
|
+
label: "Target language",
|
|
31
|
+
description: "The language you want to translate your text to",
|
|
32
|
+
type: "string",
|
|
33
|
+
options: langOptions,
|
|
34
|
+
},
|
|
35
|
+
},
|
|
36
|
+
methods: {
|
|
37
|
+
...common.methods,
|
|
38
|
+
systemInstructions() {
|
|
39
|
+
return "Your goal is to translate the text the user provides. Please follow the language guidelines presented in the prompt.";
|
|
40
|
+
},
|
|
41
|
+
userMessage() {
|
|
42
|
+
return `Translate the following text from ISO 639-1 ${this.sourceLang} to ISO 639-1 ${this.targetLang}:\n\n${this.text}`;
|
|
43
|
+
},
|
|
44
|
+
summarize() {
|
|
45
|
+
return `Translated text from ${this.sourceLang} to ${this.targetLang}`;
|
|
46
|
+
},
|
|
47
|
+
formatOutput({
|
|
48
|
+
messages, response,
|
|
49
|
+
}) {
|
|
50
|
+
if (!messages || !response) {
|
|
51
|
+
throw new Error("Invalid API output, please reach out to https://pipedream.com/support");
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return {
|
|
55
|
+
translation: response.choices?.[0]?.message?.content,
|
|
56
|
+
source_lang: this.sourceLang,
|
|
57
|
+
target_lang: this.targetLang,
|
|
58
|
+
messages,
|
|
59
|
+
};
|
|
60
|
+
},
|
|
61
|
+
},
|
|
62
|
+
};
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
import { axios } from "@pipedream/platform";
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
type: "app",
|
|
5
|
+
app: "openai",
|
|
6
|
+
propDefinitions: {
|
|
7
|
+
completionModelId: {
|
|
8
|
+
label: "Model",
|
|
9
|
+
description: "The ID of the model to use for completions. **This action doesn't support the ChatGPT `turbo` models**. Use the **Chat** action for those, instead.",
|
|
10
|
+
type: "string",
|
|
11
|
+
async options() {
|
|
12
|
+
return (await this.getCompletionModels({})).map((model) => model.id);
|
|
13
|
+
},
|
|
14
|
+
default: "text-davinci-003",
|
|
15
|
+
},
|
|
16
|
+
chatCompletionModelId: {
|
|
17
|
+
label: "Model",
|
|
18
|
+
description: "The ID of the model to use for chat completions",
|
|
19
|
+
type: "string",
|
|
20
|
+
async options() {
|
|
21
|
+
return (await this.getChatCompletionModels({})).map((model) => model.id);
|
|
22
|
+
},
|
|
23
|
+
default: "gpt-3.5-turbo",
|
|
24
|
+
},
|
|
25
|
+
embeddingsModelId: {
|
|
26
|
+
label: "Model",
|
|
27
|
+
description: "The ID of the embeddings model to use. OpenAI recommends using `text-embedding-ada-002` for nearly all use cases: \"It's better, cheaper, and simpler to use. [Read the blog post announcement](https://openai.com/blog/new-and-improved-embedding-model)\".",
|
|
28
|
+
type: "string",
|
|
29
|
+
async options() {
|
|
30
|
+
return (await this.getEmbeddingsModels({})).map((model) => model.id);
|
|
31
|
+
},
|
|
32
|
+
default: "text-embedding-ada-002",
|
|
33
|
+
},
|
|
34
|
+
},
|
|
35
|
+
methods: {
|
|
36
|
+
_apiKey() {
|
|
37
|
+
return this.$auth.api_key;
|
|
38
|
+
},
|
|
39
|
+
_baseApiUrl() {
|
|
40
|
+
return "https://api.openai.com/v1";
|
|
41
|
+
},
|
|
42
|
+
_commonHeaders() {
|
|
43
|
+
return {
|
|
44
|
+
"Authorization": `Bearer ${this._apiKey()}`,
|
|
45
|
+
"Accept": "application/json",
|
|
46
|
+
"User-Agent": "@PipedreamHQ/pipedream v1.0",
|
|
47
|
+
};
|
|
48
|
+
},
|
|
49
|
+
async _makeRequest({
|
|
50
|
+
$ = this,
|
|
51
|
+
path,
|
|
52
|
+
...args
|
|
53
|
+
} = {}) {
|
|
54
|
+
return axios($, {
|
|
55
|
+
url: `${this._baseApiUrl()}${path}`,
|
|
56
|
+
headers: {
|
|
57
|
+
...this._commonHeaders(),
|
|
58
|
+
},
|
|
59
|
+
...args,
|
|
60
|
+
});
|
|
61
|
+
},
|
|
62
|
+
async models({ $ }) {
|
|
63
|
+
const { data: models } = await this._makeRequest({
|
|
64
|
+
$,
|
|
65
|
+
path: "/models",
|
|
66
|
+
});
|
|
67
|
+
return models.sort((a, b) => a?.id.localeCompare(b?.id));
|
|
68
|
+
},
|
|
69
|
+
async getChatCompletionModels({ $ }) {
|
|
70
|
+
const models = await this.models({
|
|
71
|
+
$,
|
|
72
|
+
});
|
|
73
|
+
return models.filter((model) => model.id.match(/turbo/gi));
|
|
74
|
+
},
|
|
75
|
+
async getCompletionModels({ $ }) {
|
|
76
|
+
const models = await this.models({
|
|
77
|
+
$,
|
|
78
|
+
});
|
|
79
|
+
return models.filter((model) => {
|
|
80
|
+
const { id } = model;
|
|
81
|
+
return (
|
|
82
|
+
id.match(/^(?=.*\b(babbage|davinci|ada|curie)\b)(?!.*\b(whisper|turbo|edit|insert|search|embedding|similarity|001)\b).*$/gm)
|
|
83
|
+
);
|
|
84
|
+
});
|
|
85
|
+
},
|
|
86
|
+
async getEmbeddingsModels({ $ }) {
|
|
87
|
+
const models = await this.models({
|
|
88
|
+
$,
|
|
89
|
+
});
|
|
90
|
+
return models.filter((model) => {
|
|
91
|
+
const { id } = model;
|
|
92
|
+
return (
|
|
93
|
+
id.match(/^(text-embedding-ada-002|.*-(davinci|curie|babbage|ada)-.*-001)$/gm)
|
|
94
|
+
);
|
|
95
|
+
});
|
|
96
|
+
},
|
|
97
|
+
async _makeCompletion({
|
|
98
|
+
$, path, args,
|
|
99
|
+
}) {
|
|
100
|
+
const data = await this._makeRequest({
|
|
101
|
+
$,
|
|
102
|
+
path,
|
|
103
|
+
method: "POST",
|
|
104
|
+
data: args,
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
// For completions, return the text of the first choice at the top-level
|
|
108
|
+
let generated_text;
|
|
109
|
+
if (path === "/completions") {
|
|
110
|
+
const { choices } = data;
|
|
111
|
+
generated_text = choices?.[0]?.text;
|
|
112
|
+
}
|
|
113
|
+
// For chat completions, return the assistant message at the top-level
|
|
114
|
+
let generated_message;
|
|
115
|
+
if (path === "/chat/completions") {
|
|
116
|
+
const { choices } = data;
|
|
117
|
+
generated_message = choices?.[0]?.message;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return {
|
|
121
|
+
generated_text,
|
|
122
|
+
generated_message,
|
|
123
|
+
...data,
|
|
124
|
+
};
|
|
125
|
+
},
|
|
126
|
+
async createCompletion({
|
|
127
|
+
$, args,
|
|
128
|
+
}) {
|
|
129
|
+
return this._makeCompletion({
|
|
130
|
+
$,
|
|
131
|
+
path: "/completions",
|
|
132
|
+
args,
|
|
133
|
+
});
|
|
134
|
+
},
|
|
135
|
+
async createChatCompletion({
|
|
136
|
+
$, args,
|
|
137
|
+
}) {
|
|
138
|
+
return this._makeCompletion({
|
|
139
|
+
$,
|
|
140
|
+
path: "/chat/completions",
|
|
141
|
+
args,
|
|
142
|
+
});
|
|
143
|
+
},
|
|
144
|
+
async createImage({
|
|
145
|
+
$, args,
|
|
146
|
+
}) {
|
|
147
|
+
return this._makeRequest({
|
|
148
|
+
$,
|
|
149
|
+
path: "/images/generations",
|
|
150
|
+
data: args,
|
|
151
|
+
method: "POST",
|
|
152
|
+
});
|
|
153
|
+
},
|
|
154
|
+
async createEmbeddings({
|
|
155
|
+
$, args,
|
|
156
|
+
}) {
|
|
157
|
+
return this._makeRequest({
|
|
158
|
+
$,
|
|
159
|
+
path: "/embeddings",
|
|
160
|
+
data: args,
|
|
161
|
+
method: "POST",
|
|
162
|
+
});
|
|
163
|
+
},
|
|
164
|
+
async createTranscription({
|
|
165
|
+
$, form,
|
|
166
|
+
}) {
|
|
167
|
+
return this._makeRequest({
|
|
168
|
+
$,
|
|
169
|
+
path: "/audio/transcriptions",
|
|
170
|
+
method: "POST",
|
|
171
|
+
headers: {
|
|
172
|
+
...this._commonHeaders(),
|
|
173
|
+
"Content-Type": `multipart/form-data; boundary=${form._boundary}`,
|
|
174
|
+
},
|
|
175
|
+
data: form,
|
|
176
|
+
});
|
|
177
|
+
},
|
|
178
|
+
},
|
|
179
|
+
};
|
package/package.json
CHANGED
|
@@ -1,20 +1,24 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@pipedream/openai",
|
|
3
|
-
"version": "0.0
|
|
3
|
+
"version": "0.1.0",
|
|
4
4
|
"description": "Pipedream OpenAI Components",
|
|
5
|
-
"main": "openai.app.mjs",
|
|
5
|
+
"main": "app/openai.app.mjs",
|
|
6
6
|
"keywords": [
|
|
7
7
|
"pipedream",
|
|
8
8
|
"openai"
|
|
9
9
|
],
|
|
10
10
|
"homepage": "https://pipedream.com/apps/openai",
|
|
11
11
|
"author": "Pipedream <support@pipedream.com> (https://pipedream.com/)",
|
|
12
|
-
"license": "MIT",
|
|
13
12
|
"gitHead": "e12480b94cc03bed4808ebc6b13e7fdb3a1ba535",
|
|
14
13
|
"publishConfig": {
|
|
15
14
|
"access": "public"
|
|
16
15
|
},
|
|
17
16
|
"dependencies": {
|
|
18
|
-
"@pipedream/platform": "^1.2.1"
|
|
17
|
+
"@pipedream/platform": "^1.2.1",
|
|
18
|
+
"@pipedream/types": "^0.1.4",
|
|
19
|
+
"openai": "^3.2.1"
|
|
20
|
+
},
|
|
21
|
+
"devDependencies": {
|
|
22
|
+
"@types/node": "^17.0.45"
|
|
19
23
|
}
|
|
20
24
|
}
|