@pipedream/openai 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,12 @@
1
1
  import openai from "../../openai.app.mjs";
2
2
  import common from "../common/common.mjs";
3
3
  import constants from "../../common/constants.mjs";
4
+ import { ConfigurationError } from "@pipedream/platform";
4
5
 
5
6
  export default {
6
7
  ...common,
7
8
  name: "Chat",
8
- version: "0.2.0",
9
+ version: "0.2.1",
9
10
  key: "openai-chat",
10
11
  description: "The Chat API, using the `gpt-3.5-turbo` or `gpt-4` model. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
11
12
  type: "action",
@@ -38,7 +39,13 @@ export default {
38
39
  images: {
39
40
  label: "Images",
40
41
  type: "string[]",
41
- description: "Provide one or more images to [OpenAI's vision model](https://platform.openai.com/docs/guides/vision). Accepts URLs or base64 encoded strings. Compatible with the `gpt4-vision-preview model`",
42
+ description: "Provide one or more images to [OpenAI's vision model](https://platform.openai.com/docs/guides/vision). Accepts URLs or base64 encoded strings. Compatible with the `gpt4-vision-preview` model",
43
+ optional: true,
44
+ },
45
+ audio: {
46
+ type: "string",
47
+ label: "Audio",
48
+ description: "Provide the file path to an audio file in the `/tmp` directory. For use with the `gpt-4o-audio-preview` model. Currently supports `wav` and `mp3` files.",
42
49
  optional: true,
43
50
  },
44
51
  responseFormat: {
@@ -65,6 +72,10 @@ export default {
65
72
  };
66
73
  },
67
74
  async run({ $ }) {
75
+ if (this.audio && !this.modelId.includes("gpt-4o-audio-preview")) {
76
+ throw new ConfigurationError("Use of audio files requires using the `gpt-4o-audio-preview` model.");
77
+ }
78
+
68
79
  const args = this._getChatArgs();
69
80
 
70
81
  const response = await this.openai.createChatCompletion({
@@ -3,7 +3,7 @@ import common from "../common/common-helper.mjs";
3
3
  export default {
4
4
  ...common,
5
5
  name: "Classify Items into Categories",
6
- version: "0.1.0",
6
+ version: "0.1.1",
7
7
  key: "openai-classify-items-into-categories",
8
8
  description: "Classify items into specific categories using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
9
9
  type: "action",
@@ -1,6 +1,7 @@
1
1
  import { ConfigurationError } from "@pipedream/platform";
2
2
  import constants from "../../common/constants.mjs";
3
3
  import { parse } from "../../common/helpers.mjs";
4
+ import fs from "fs";
4
5
 
5
6
  const CHAT_DOCS_MESSAGE_FORMAT_URL = "https://platform.openai.com/docs/guides/chat/introduction";
6
7
 
@@ -92,6 +93,20 @@ export default {
92
93
  }
93
94
  }
94
95
 
96
+ if (this.audio) {
97
+ const fileContent = fs.readFileSync(this.audio.includes("tmp/")
98
+ ? this.audio
99
+ : `/tmp/${this.audio}`).toString("base64");
100
+ const extension = this.audio.match(/\.(\w+)$/)?.[1];
101
+ content.push({
102
+ type: "input_audio",
103
+ input_audio: {
104
+ data: fileContent,
105
+ format: extension,
106
+ },
107
+ });
108
+ }
109
+
95
110
  content.push({
96
111
  "type": "text",
97
112
  "text": this.userMessage,
@@ -4,7 +4,7 @@ import common from "../common/common.mjs";
4
4
 
5
5
  export default {
6
6
  name: "Create Embeddings",
7
- version: "0.0.12",
7
+ version: "0.0.13",
8
8
  key: "openai-create-embeddings",
9
9
  description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. [See the documentation](https://platform.openai.com/docs/api-reference/embeddings)",
10
10
  type: "action",
@@ -24,7 +24,7 @@ const pipelineAsync = promisify(stream.pipeline);
24
24
 
25
25
  export default {
26
26
  name: "Create Transcription (Whisper)",
27
- version: "0.1.12",
27
+ version: "0.1.13",
28
28
  key: "openai-create-transcription",
29
29
  description: "Transcribes audio into the input language. [See the documentation](https://platform.openai.com/docs/api-reference/audio/create).",
30
30
  type: "action",
@@ -4,7 +4,7 @@ import common from "../common/common.mjs";
4
4
  export default {
5
5
  ...common,
6
6
  name: "Create Completion (Send Prompt)",
7
- version: "0.1.11",
7
+ version: "0.1.12",
8
8
  key: "openai-send-prompt",
9
9
  description: "OpenAI recommends using the **Chat** action for the latest `gpt-3.5-turbo` API, since it's faster and 10x cheaper. This action creates a completion for the provided prompt and parameters using the older `/completions` API. [See the documentation](https://beta.openai.com/docs/api-reference/completions/create)",
10
10
  type: "action",
@@ -4,7 +4,7 @@ import constants from "../../common/constants.mjs";
4
4
  export default {
5
5
  ...common,
6
6
  name: "Summarize Text",
7
- version: "0.1.0",
7
+ version: "0.1.1",
8
8
  key: "openai-summarize",
9
9
  description: "Summarizes text using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
10
10
  type: "action",
@@ -9,7 +9,7 @@ const langOptions = lang.LANGUAGES.map((l) => ({
9
9
  export default {
10
10
  ...common,
11
11
  name: "Translate Text (Whisper)",
12
- version: "0.1.0",
12
+ version: "0.1.1",
13
13
  key: "openai-translate-text",
14
14
  description: "Translate text from one language to another using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
15
15
  type: "action",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pipedream/openai",
3
- "version": "0.6.0",
3
+ "version": "0.6.1",
4
4
  "description": "Pipedream OpenAI Components",
5
5
  "main": "openai.app.mjs",
6
6
  "keywords": [