@pipedream/openai 0.0.2 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/actions/chat/chat.mjs +56 -0
- package/actions/classify-items-into-categories/classify-items-into-categories.mjs +56 -0
- package/actions/common/common-helper.mjs +55 -0
- package/actions/common/common.mjs +141 -0
- package/actions/common/constants.mjs +6 -3
- package/actions/common/lang.mjs +736 -0
- package/actions/create-embeddings/create-embeddings.mjs +51 -0
- package/actions/create-image/create-image.mjs +6 -13
- package/actions/create-transcription/create-transcription.mjs +133 -0
- package/actions/send-prompt/send-prompt.mjs +14 -75
- package/actions/summarize/summarize.mjs +55 -0
- package/actions/translate-text/translate-text.mjs +62 -0
- package/app/openai.app.mjs +179 -0
- package/package.json +8 -3
- package/openai.app.mjs +0 -57
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import openai from "../../app/openai.app.mjs";
|
|
2
|
+
import common from "../common/common.mjs";
|
|
3
|
+
|
|
4
|
+
export default {
|
|
5
|
+
...common,
|
|
6
|
+
name: "Chat",
|
|
7
|
+
version: "0.1.0",
|
|
8
|
+
key: "openai-chat",
|
|
9
|
+
description: "The Chat API, using the `gpt-3.5-turbo` model. [See docs here](https://platform.openai.com/docs/api-reference/chat)",
|
|
10
|
+
type: "action",
|
|
11
|
+
props: {
|
|
12
|
+
openai,
|
|
13
|
+
modelId: {
|
|
14
|
+
propDefinition: [
|
|
15
|
+
openai,
|
|
16
|
+
"chatCompletionModelId",
|
|
17
|
+
],
|
|
18
|
+
},
|
|
19
|
+
userMessage: {
|
|
20
|
+
label: "User Message",
|
|
21
|
+
type: "string",
|
|
22
|
+
description: "The user messages provide instructions to the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.",
|
|
23
|
+
},
|
|
24
|
+
systemInstructions: {
|
|
25
|
+
label: "System Instructions",
|
|
26
|
+
type: "string",
|
|
27
|
+
description: "The system message helps set the behavior of the assistant. For example: \"You are a helpful assistant.\" [See these docs](https://platform.openai.com/docs/guides/chat/instructing-chat-models) for tips on writing good instructions.",
|
|
28
|
+
optional: true,
|
|
29
|
+
},
|
|
30
|
+
messages: {
|
|
31
|
+
label: "Prior Message History",
|
|
32
|
+
type: "string[]",
|
|
33
|
+
description: "_Advanced_. Because [the models have no memory of past chat requests](https://platform.openai.com/docs/guides/chat/introduction), all relevant information must be supplied via the conversation. You can provide [an array of messages](https://platform.openai.com/docs/guides/chat/introduction) from prior conversations here. If this param is set, the action ignores the values passed to **System Instructions** and **Assistant Response**, appends the new **User Message** to the end of this array, and sends it to the API.",
|
|
34
|
+
optional: true,
|
|
35
|
+
},
|
|
36
|
+
...common.props,
|
|
37
|
+
},
|
|
38
|
+
async run({ $ }) {
|
|
39
|
+
const args = this._getChatArgs();
|
|
40
|
+
const response = await this.openai.createChatCompletion({
|
|
41
|
+
$,
|
|
42
|
+
args,
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
if (response) {
|
|
46
|
+
$.export("$summary", `Successfully sent chat with id ${response.id}`);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
const { messages } = args;
|
|
50
|
+
return {
|
|
51
|
+
original_messages: messages,
|
|
52
|
+
original_messages_with_assistant_response: messages.concat(response.choices[0]?.message),
|
|
53
|
+
...response,
|
|
54
|
+
};
|
|
55
|
+
},
|
|
56
|
+
};
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import common from "../common/common-helper.mjs";
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
...common,
|
|
5
|
+
name: "Classify Items into Categories",
|
|
6
|
+
version: "0.0.2",
|
|
7
|
+
key: "openai-classify-items-into-categories",
|
|
8
|
+
description: "Classify items into specific categories using the Chat API",
|
|
9
|
+
type: "action",
|
|
10
|
+
props: {
|
|
11
|
+
...common.props,
|
|
12
|
+
items: {
|
|
13
|
+
label: "Items",
|
|
14
|
+
description: "Items to categorize",
|
|
15
|
+
type: "string[]",
|
|
16
|
+
},
|
|
17
|
+
categories: {
|
|
18
|
+
label: "Categories",
|
|
19
|
+
description: "Categories to classify items into",
|
|
20
|
+
type: "string[]",
|
|
21
|
+
},
|
|
22
|
+
},
|
|
23
|
+
methods: {
|
|
24
|
+
...common.methods,
|
|
25
|
+
systemInstructions() {
|
|
26
|
+
return "Your goal is to categorize items into specific categories and produce ONLY JSON. The user will provide both the items and categories. Please only categorize items into the specific categories, and no others, and output ONLY JSON.";
|
|
27
|
+
},
|
|
28
|
+
outputFormat() {
|
|
29
|
+
return "Please only categorize items into the specific categories, and no others. Output a valid JSON string — an array of objects, where each object has the following properties: item, category. Do not return any English text other than the JSON, either before or after the JSON. I need to parse the response as JSON, and parsing will fail if you return any English before or after the JSON";
|
|
30
|
+
},
|
|
31
|
+
userMessage() {
|
|
32
|
+
return `Categorize each of the following items:\n\n${this.items.join("\n")}\n\ninto one of the following categories:\n\n${this.categories.join("\n")}\n\n${this.outputFormat()}}`;
|
|
33
|
+
},
|
|
34
|
+
summary() {
|
|
35
|
+
return `Categorized ${this.items.length} items into ${this.categories.length} categories`;
|
|
36
|
+
},
|
|
37
|
+
formatOutput({
|
|
38
|
+
messages, response,
|
|
39
|
+
}) {
|
|
40
|
+
if (!messages || !response) {
|
|
41
|
+
throw new Error("Invalid API output, please reach out to https://pipedream.com/support");
|
|
42
|
+
}
|
|
43
|
+
const assistantResponse = response.choices?.[0]?.message?.content;
|
|
44
|
+
let categorizations = assistantResponse;
|
|
45
|
+
try {
|
|
46
|
+
categorizations = JSON.parse(assistantResponse);
|
|
47
|
+
} catch (err) {
|
|
48
|
+
console.log("Failed to parse output, assistant returned malformed JSON");
|
|
49
|
+
}
|
|
50
|
+
return {
|
|
51
|
+
categorizations,
|
|
52
|
+
messages,
|
|
53
|
+
};
|
|
54
|
+
},
|
|
55
|
+
},
|
|
56
|
+
};
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import openai from "../../app/openai.app.mjs";
|
|
2
|
+
import common from "./common.mjs";
|
|
3
|
+
|
|
4
|
+
export default {
|
|
5
|
+
...common,
|
|
6
|
+
props: {
|
|
7
|
+
openai,
|
|
8
|
+
...common.props,
|
|
9
|
+
},
|
|
10
|
+
methods: {
|
|
11
|
+
...common.methods,
|
|
12
|
+
systemInstructions() {
|
|
13
|
+
throw new Error("systemInstructions() must be implemented by the component");
|
|
14
|
+
},
|
|
15
|
+
userMessage() {
|
|
16
|
+
throw new Error("userMessage() must be implemented by the component");
|
|
17
|
+
},
|
|
18
|
+
summary() {
|
|
19
|
+
return;
|
|
20
|
+
},
|
|
21
|
+
formatOutput() {
|
|
22
|
+
throw new Error("formatOutput() must be implemented by the component");
|
|
23
|
+
},
|
|
24
|
+
},
|
|
25
|
+
async run({ $ }) {
|
|
26
|
+
const messages = [
|
|
27
|
+
{
|
|
28
|
+
role: "system",
|
|
29
|
+
content: this.systemInstructions(),
|
|
30
|
+
},
|
|
31
|
+
{
|
|
32
|
+
role: "user",
|
|
33
|
+
content: this.userMessage(),
|
|
34
|
+
},
|
|
35
|
+
];
|
|
36
|
+
const args = {
|
|
37
|
+
...this._getCommonArgs(),
|
|
38
|
+
model: "gpt-3.5-turbo",
|
|
39
|
+
messages,
|
|
40
|
+
};
|
|
41
|
+
const response = await this.openai.createChatCompletion({
|
|
42
|
+
$,
|
|
43
|
+
args,
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
if (this.summary() && response) {
|
|
47
|
+
$.export("$summary", this.summary());
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
return this.formatOutput({
|
|
51
|
+
response,
|
|
52
|
+
messages,
|
|
53
|
+
});
|
|
54
|
+
},
|
|
55
|
+
};
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import { ConfigurationError } from "@pipedream/platform";
|
|
2
|
+
|
|
3
|
+
const CHAT_DOCS_MESSAGE_FORMAT_URL = "https://platform.openai.com/docs/guides/chat/introduction";
|
|
4
|
+
|
|
5
|
+
export default {
|
|
6
|
+
props: {
|
|
7
|
+
maxTokens: {
|
|
8
|
+
label: "Max Tokens",
|
|
9
|
+
description: "The maximum number of [tokens](https://beta.openai.com/tokenizer) to generate in the completion.",
|
|
10
|
+
type: "integer",
|
|
11
|
+
optional: true,
|
|
12
|
+
},
|
|
13
|
+
temperature: {
|
|
14
|
+
label: "Temperature",
|
|
15
|
+
description: "**Optional**. What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.",
|
|
16
|
+
type: "string",
|
|
17
|
+
optional: true,
|
|
18
|
+
},
|
|
19
|
+
topP: {
|
|
20
|
+
label: "Top P",
|
|
21
|
+
description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.",
|
|
22
|
+
type: "string",
|
|
23
|
+
optional: true,
|
|
24
|
+
},
|
|
25
|
+
n: {
|
|
26
|
+
label: "N",
|
|
27
|
+
description: "How many completions to generate for each prompt",
|
|
28
|
+
type: "integer",
|
|
29
|
+
optional: true,
|
|
30
|
+
},
|
|
31
|
+
stop: {
|
|
32
|
+
label: "Stop",
|
|
33
|
+
description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.",
|
|
34
|
+
type: "string[]",
|
|
35
|
+
optional: true,
|
|
36
|
+
},
|
|
37
|
+
presencePenalty: {
|
|
38
|
+
label: "Presence Penalty",
|
|
39
|
+
description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
|
|
40
|
+
type: "string",
|
|
41
|
+
optional: true,
|
|
42
|
+
},
|
|
43
|
+
frequencyPenalty: {
|
|
44
|
+
label: "Frequency Penalty",
|
|
45
|
+
description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
|
46
|
+
type: "string",
|
|
47
|
+
optional: true,
|
|
48
|
+
},
|
|
49
|
+
user: {
|
|
50
|
+
label: "User",
|
|
51
|
+
description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more here](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).",
|
|
52
|
+
type: "string",
|
|
53
|
+
optional: true,
|
|
54
|
+
},
|
|
55
|
+
},
|
|
56
|
+
methods: {
|
|
57
|
+
_getCommonArgs() {
|
|
58
|
+
return {
|
|
59
|
+
model: this.modelId,
|
|
60
|
+
prompt: this.prompt,
|
|
61
|
+
max_tokens: this.maxTokens,
|
|
62
|
+
temperature: this.temperature
|
|
63
|
+
? +this.temperature
|
|
64
|
+
: this.temperature,
|
|
65
|
+
top_p: this.topP
|
|
66
|
+
? +this.topP
|
|
67
|
+
: this.topP,
|
|
68
|
+
n: this.n,
|
|
69
|
+
stop: this.stop,
|
|
70
|
+
presence_penalty: this.presencePenalty
|
|
71
|
+
? +this.presencePenalty
|
|
72
|
+
: this.presencePenalty,
|
|
73
|
+
frequency_penalty: this.frequencyPenalty
|
|
74
|
+
? +this.frequencyPenalty
|
|
75
|
+
: this.frequencyPenalty,
|
|
76
|
+
best_of: this.bestOf,
|
|
77
|
+
user: this.user,
|
|
78
|
+
};
|
|
79
|
+
},
|
|
80
|
+
_getChatArgs() {
|
|
81
|
+
if (this.messages && this.messages.length && !this.userMessage) {
|
|
82
|
+
throw new ConfigurationError(
|
|
83
|
+
`When you provide previous messages, you must provide the next User Message for the assistant to answer. See the OpenAI Chat format docs here: ${CHAT_DOCS_MESSAGE_FORMAT_URL}`,
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
let messages = [];
|
|
87
|
+
if (this.messages) {
|
|
88
|
+
for (const message of this.messages) {
|
|
89
|
+
console.log(`Message: ${JSON.stringify(message)}`);
|
|
90
|
+
let parsed;
|
|
91
|
+
try {
|
|
92
|
+
if (typeof message === "string") {
|
|
93
|
+
parsed = JSON.parse(message);
|
|
94
|
+
} else {
|
|
95
|
+
parsed = message;
|
|
96
|
+
}
|
|
97
|
+
} catch (err) {
|
|
98
|
+
throw new ConfigurationError(
|
|
99
|
+
`Please provide a valid array of chat messages. See the docs here: ${CHAT_DOCS_MESSAGE_FORMAT_URL}`,
|
|
100
|
+
);
|
|
101
|
+
|
|
102
|
+
}
|
|
103
|
+
if (!parsed.role) {
|
|
104
|
+
throw new ConfigurationError(
|
|
105
|
+
`The following message doesn't have a "role" property:\n\n${JSON.stringify(message, null, 2)}\n\nSee the docs here: ${CHAT_DOCS_MESSAGE_FORMAT_URL}`,
|
|
106
|
+
);
|
|
107
|
+
}
|
|
108
|
+
if (!parsed.content) {
|
|
109
|
+
throw new ConfigurationError(
|
|
110
|
+
`The following message doesn't have a "content" property:\n\n${JSON.stringify(message, null, 2)}\n\nSee the docs here: ${CHAT_DOCS_MESSAGE_FORMAT_URL}`,
|
|
111
|
+
);
|
|
112
|
+
}
|
|
113
|
+
messages.push(parsed);
|
|
114
|
+
}
|
|
115
|
+
// Finally, we want to append the user message to the end of the array
|
|
116
|
+
if (this.userMessage) {
|
|
117
|
+
messages.push({
|
|
118
|
+
"role": "user",
|
|
119
|
+
"content": this.userMessage,
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
} else {
|
|
123
|
+
if (this.systemInstructions) {
|
|
124
|
+
messages.push({
|
|
125
|
+
"role": "system",
|
|
126
|
+
"content": this.systemInstructions,
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
messages.push({
|
|
130
|
+
"role": "user",
|
|
131
|
+
"content": this.userMessage,
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
return {
|
|
136
|
+
...this._getCommonArgs(),
|
|
137
|
+
messages,
|
|
138
|
+
};
|
|
139
|
+
},
|
|
140
|
+
},
|
|
141
|
+
};
|