@pipedream/openai 0.10.1 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,14 @@
1
1
  import openai from "../../openai.app.mjs";
2
2
  import common from "../common/common-assistants.mjs";
3
3
  import FormData from "form-data";
4
- import fs from "fs";
4
+ import { getFileStreamAndMetadata } from "@pipedream/platform";
5
5
 
6
6
  export default {
7
7
  ...common,
8
8
  key: "openai-analyze-image-content",
9
9
  name: "Analyze Image Content",
10
10
  description: "Send a message or question about an image and receive a response. [See the documentation](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun)",
11
- version: "0.1.6",
11
+ version: "1.0.0",
12
12
  type: "action",
13
13
  props: {
14
14
  openai,
@@ -17,12 +17,6 @@ export default {
17
17
  label: "Message",
18
18
  description: "The message or question to send",
19
19
  },
20
- imageUrl: {
21
- type: "string",
22
- label: "Image URL",
23
- description: "The URL of the image to analyze. Must be a supported image types: jpeg, jpg, png, gif, webp",
24
- optional: true,
25
- },
26
20
  imageFileId: {
27
21
  propDefinition: [
28
22
  openai,
@@ -35,8 +29,8 @@ export default {
35
29
  },
36
30
  filePath: {
37
31
  type: "string",
38
- label: "File Path",
39
- description: "The path to a file in the `/tmp` directory. [See the documentation on working with files](https://pipedream.com/docs/code/nodejs/working-with-files/#writing-a-file-to-tmp)",
32
+ label: "File Path or URL",
33
+ description: "The image to process. Provide either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.jpg`). Supported image types: jpeg, jpg, png, gif, webp",
40
34
  optional: true,
41
35
  },
42
36
  },
@@ -83,11 +77,15 @@ export default {
83
77
  }
84
78
  if (this.filePath) {
85
79
  const fileData = new FormData();
86
- const content = fs.createReadStream(this.filePath.includes("tmp/")
87
- ? this.filePath
88
- : `/tmp/${this.filePath}`);
80
+ const {
81
+ stream, metadata,
82
+ } = await getFileStreamAndMetadata(this.filePath);
89
83
  fileData.append("purpose", "vision");
90
- fileData.append("file", content);
84
+ fileData.append("file", stream, {
85
+ contentType: metadata.contentType,
86
+ knownLength: metadata.size,
87
+ filename: metadata.name,
88
+ });
91
89
 
92
90
  const { id } = await this.openai.uploadFile({
93
91
  $,
@@ -1,12 +1,11 @@
1
1
  import openai from "../../openai.app.mjs";
2
2
  import common from "../common/common.mjs";
3
3
  import constants from "../../common/constants.mjs";
4
- import { ConfigurationError } from "@pipedream/platform";
5
4
 
6
5
  export default {
7
6
  ...common,
8
7
  name: "Chat",
9
- version: "0.2.9",
8
+ version: "0.3.1",
10
9
  key: "openai-chat",
11
10
  description: "The Chat API, using the `gpt-3.5-turbo` or `gpt-4` model. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
12
11
  type: "action",
@@ -44,13 +43,13 @@ export default {
44
43
  images: {
45
44
  label: "Images",
46
45
  type: "string[]",
47
- description: "Provide one or more images to [OpenAI's vision model](https://platform.openai.com/docs/guides/vision). Accepts URLs or base64 encoded strings. Compatible with the `gpt4-vision-preview` model",
46
+ description: "Provide one or more images to [OpenAI's vision model](https://platform.openai.com/docs/guides/vision). Each entry should be either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.jpg`), or raw base64-encoded image data. Compatible with the `gpt4-vision-preview` model",
48
47
  optional: true,
49
48
  },
50
49
  audio: {
51
50
  type: "string",
52
51
  label: "Audio",
53
- description: "Provide the file path to an audio file in the `/tmp` directory. For use with the `gpt-4o-audio-preview` model. Currently supports `wav` and `mp3` files.",
52
+ description: "The audio file to upload. Provide either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.mp3`). For use with the `gpt-4o-audio-preview` model. Currently supports `wav` and `mp3` files.",
54
53
  optional: true,
55
54
  },
56
55
  responseFormat: {
@@ -145,11 +144,7 @@ export default {
145
144
  },
146
145
  },
147
146
  async run({ $ }) {
148
- if (this.audio && !this.modelId.includes("gpt-4o-audio-preview")) {
149
- throw new ConfigurationError("Use of audio files requires using the `gpt-4o-audio-preview` model.");
150
- }
151
-
152
- const args = this._getChatArgs();
147
+ const args = await this._getChatArgs();
153
148
 
154
149
  const response = await this.openai.createChatCompletion({
155
150
  $,
@@ -5,7 +5,7 @@ import constants from "../../common/constants.mjs";
5
5
  export default {
6
6
  ...common,
7
7
  name: "Chat using File Search",
8
- version: "0.0.4",
8
+ version: "0.0.5",
9
9
  key: "openai-chat-using-file-search",
10
10
  description: "Chat with your files knowledge base (vector stores). [See the documentation](https://platform.openai.com/docs/guides/tools-file-search)",
11
11
  type: "action",
@@ -5,7 +5,7 @@ import constants from "../../common/constants.mjs";
5
5
  export default {
6
6
  ...common,
7
7
  name: "Chat using Functions",
8
- version: "0.0.5",
8
+ version: "0.0.6",
9
9
  key: "openai-chat-using-functions",
10
10
  description: "Chat with your models and allow them to invoke functions. Optionally, you can build and invoke workflows as functions. [See the documentation](https://platform.openai.com/docs/guides/function-calling)",
11
11
  type: "action",
@@ -5,7 +5,7 @@ import constants from "../../common/constants.mjs";
5
5
  export default {
6
6
  ...common,
7
7
  name: "Chat using Web Search",
8
- version: "0.0.4",
8
+ version: "0.0.5",
9
9
  key: "openai-chat-using-web-search",
10
10
  description: "Chat using the web search tool. [See the documentation](https://platform.openai.com/docs/guides/tools-web-search)",
11
11
  type: "action",
@@ -3,7 +3,7 @@ import common from "../common/common-helper.mjs";
3
3
  export default {
4
4
  ...common,
5
5
  name: "Classify Items into Categories",
6
- version: "0.1.6",
6
+ version: "0.1.7",
7
7
  key: "openai-classify-items-into-categories",
8
8
  description: "Classify items into specific categories using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
9
9
  type: "action",
@@ -1,7 +1,8 @@
1
- import { ConfigurationError } from "@pipedream/platform";
1
+ import {
2
+ ConfigurationError, getFileStreamAndMetadata,
3
+ } from "@pipedream/platform";
2
4
  import constants from "../../common/constants.mjs";
3
5
  import { parse } from "../../common/helpers.mjs";
4
- import fs from "fs";
5
6
 
6
7
  const CHAT_DOCS_MESSAGE_FORMAT_URL = "https://platform.openai.com/docs/guides/chat/introduction";
7
8
 
@@ -58,10 +59,9 @@ export default {
58
59
  },
59
60
  methods: {
60
61
  _getCommonArgs() {
61
- return {
62
+ const args = {
62
63
  model: this.modelId,
63
64
  prompt: this.prompt,
64
- max_tokens: this.maxTokens,
65
65
  temperature: this.temperature
66
66
  ? +this.temperature
67
67
  : this.temperature,
@@ -79,25 +79,51 @@ export default {
79
79
  best_of: this.bestOf,
80
80
  user: this.user,
81
81
  };
82
+ if (this.modelId.startsWith("o1") || this.modelId.startsWith("o3") || this.modelId.startsWith("o4")) {
83
+ args.max_completion_tokens = this.maxTokens;
84
+ } else {
85
+ args.max_tokens = this.maxTokens;
86
+ }
87
+ return args;
82
88
  },
83
- _getUserMessageContent() {
89
+ async _getUserMessageContent() {
84
90
  let content = [];
85
91
  if (this.images) {
86
92
  for (const image of this.images) {
93
+ let base64Image = image;
94
+ let imageType = "image/jpeg";
95
+ if (image.startsWith("http") || image.includes("tmp/")) {
96
+ const {
97
+ stream, metadata,
98
+ } = await getFileStreamAndMetadata(image);
99
+ const chunks = [];
100
+ for await (const chunk of stream) {
101
+ chunks.push(chunk);
102
+ }
103
+ base64Image = Buffer.concat(chunks).toString("base64");
104
+ if (metadata.contentType) imageType = metadata.contentType;
105
+ }
87
106
  content.push({
88
107
  "type": "image_url",
89
108
  "image_url": {
90
- "url": image,
109
+ "url": base64Image.startsWith("data:")
110
+ ? base64Image
111
+ : `data:${imageType};base64,${base64Image}`,
91
112
  },
92
113
  });
93
114
  }
94
115
  }
95
116
 
96
117
  if (this.audio) {
97
- const fileContent = fs.readFileSync(this.audio.includes("tmp/")
98
- ? this.audio
99
- : `/tmp/${this.audio}`).toString("base64");
100
- const extension = this.audio.match(/\.(\w+)$/)?.[1];
118
+ const {
119
+ stream, metadata,
120
+ } = await getFileStreamAndMetadata(this.audio);
121
+ const chunks = [];
122
+ for await (const chunk of stream) {
123
+ chunks.push(chunk);
124
+ }
125
+ const fileContent = Buffer.concat(chunks).toString("base64");
126
+ const extension = metadata.name.split(".").pop();
101
127
  content.push({
102
128
  type: "input_audio",
103
129
  input_audio: {
@@ -114,7 +140,7 @@ export default {
114
140
 
115
141
  return content;
116
142
  },
117
- _getChatArgs() {
143
+ async _getChatArgs() {
118
144
  if (this.messages && this.messages.length && !this.userMessage) {
119
145
  throw new ConfigurationError(
120
146
  `When you provide previous messages, you must provide the next User Message for the assistant to answer. See the OpenAI Chat format docs here: ${CHAT_DOCS_MESSAGE_FORMAT_URL}`,
@@ -160,7 +186,7 @@ export default {
160
186
 
161
187
  messages.push({
162
188
  "role": "user",
163
- "content": this._getUserMessageContent(),
189
+ "content": await this._getUserMessageContent(),
164
190
  });
165
191
 
166
192
  const responseFormat = {};
@@ -1,14 +1,15 @@
1
1
  import openai from "../../openai.app.mjs";
2
2
  import constants from "../../common/constants.mjs";
3
- import { ConfigurationError } from "@pipedream/platform";
3
+ import {
4
+ ConfigurationError, getFileStreamAndMetadata,
5
+ } from "@pipedream/platform";
4
6
  import FormData from "form-data";
5
- import fs from "fs";
6
7
 
7
8
  export default {
8
9
  key: "openai-create-batch",
9
10
  name: "Create Batch",
10
11
  description: "Creates and executes a batch from an uploaded file of requests. [See the documentation](https://platform.openai.com/docs/api-reference/batch/create)",
11
- version: "0.0.9",
12
+ version: "0.1.0",
12
13
  type: "action",
13
14
  props: {
14
15
  openai,
@@ -30,8 +31,8 @@ export default {
30
31
  },
31
32
  filePath: {
32
33
  type: "string",
33
- label: "File Path",
34
- description: "The path to a .jsonl file in the `/tmp` directory. [See the documentation on working with files](https://pipedream.com/docs/code/nodejs/working-with-files/#writing-a-file-to-tmp)",
34
+ label: "File Path or URL",
35
+ description: "The .jsonl file to process. Provide either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.jpg`)",
35
36
  optional: true,
36
37
  },
37
38
  metadata: {
@@ -49,11 +50,15 @@ export default {
49
50
  let fileId = this.fileId;
50
51
  if (this.filePath) {
51
52
  const fileData = new FormData();
52
- const content = fs.createReadStream(this.filePath.includes("tmp/")
53
- ? this.filePath
54
- : `/tmp/${this.filePath}`);
53
+ const {
54
+ stream, metadata,
55
+ } = await getFileStreamAndMetadata(this.filePath);
55
56
  fileData.append("purpose", "batch");
56
- fileData.append("file", content);
57
+ fileData.append("file", stream, {
58
+ contentType: metadata.contentType,
59
+ knownLength: metadata.size,
60
+ filename: metadata.name,
61
+ });
57
62
 
58
63
  const { id } = await this.openai.uploadFile({
59
64
  $,
@@ -4,7 +4,7 @@ import common from "../common/common.mjs";
4
4
 
5
5
  export default {
6
6
  name: "Create Embeddings",
7
- version: "0.0.18",
7
+ version: "0.0.19",
8
8
  key: "openai-create-embeddings",
9
9
  description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. [See the documentation](https://platform.openai.com/docs/api-reference/embeddings)",
10
10
  type: "action",
@@ -1,12 +1,12 @@
1
+ import { getFileStreamAndMetadata } from "@pipedream/platform";
1
2
  import openai from "../../openai.app.mjs";
2
3
  import FormData from "form-data";
3
- import fs from "fs";
4
4
 
5
5
  export default {
6
6
  key: "openai-create-transcription",
7
7
  name: "Create Transcription",
8
8
  description: "Transcribes audio into the input language. [See the documentation](https://platform.openai.com/docs/api-reference/audio/createTranscription)",
9
- version: "0.2.0",
9
+ version: "0.3.0",
10
10
  type: "action",
11
11
  props: {
12
12
  openai,
@@ -90,11 +90,15 @@ export default {
90
90
  } = this;
91
91
 
92
92
  const data = new FormData();
93
- const content = fs.createReadStream(file.includes("tmp/")
94
- ? file
95
- : `/tmp/${file}`);
93
+ const {
94
+ stream, metadata,
95
+ } = await getFileStreamAndMetadata(file);
96
96
 
97
- data.append("file", content);
97
+ data.append("file", stream, {
98
+ contentType: metadata.contentType,
99
+ knownLength: metadata.size,
100
+ filename: metadata.name,
101
+ });
98
102
 
99
103
  for (const [
100
104
  key,
@@ -4,7 +4,7 @@ import common from "../common/common.mjs";
4
4
  export default {
5
5
  ...common,
6
6
  name: "Create Completion (Send Prompt)",
7
- version: "0.1.17",
7
+ version: "0.1.18",
8
8
  key: "openai-send-prompt",
9
9
  description: "OpenAI recommends using the **Chat** action for the latest `gpt-3.5-turbo` API, since it's faster and 10x cheaper. This action creates a completion for the provided prompt and parameters using the older `/completions` API. [See the documentation](https://beta.openai.com/docs/api-reference/completions/create)",
10
10
  type: "action",
@@ -4,7 +4,7 @@ import constants from "../../common/constants.mjs";
4
4
  export default {
5
5
  ...common,
6
6
  name: "Summarize Text",
7
- version: "0.1.6",
7
+ version: "0.1.7",
8
8
  key: "openai-summarize",
9
9
  description: "Summarizes text using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
10
10
  type: "action",
@@ -9,7 +9,7 @@ const langOptions = lang.LANGUAGES.map((l) => ({
9
9
  export default {
10
10
  ...common,
11
11
  name: "Translate Text (Whisper)",
12
- version: "0.1.6",
12
+ version: "0.1.7",
13
13
  key: "openai-translate-text",
14
14
  description: "Translate text from one language to another using the Chat API. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
15
15
  type: "action",
@@ -1,12 +1,12 @@
1
1
  import FormData from "form-data";
2
- import fs from "fs";
3
2
  import openai from "../../openai.app.mjs";
3
+ import { getFileStreamAndMetadata } from "@pipedream/platform";
4
4
 
5
5
  export default {
6
6
  key: "openai-upload-file",
7
7
  name: "Upload File",
8
8
  description: "Upload a file that can be used across various endpoints/features. The size of individual files can be a maximum of 512mb. [See the documentation](https://platform.openai.com/docs/api-reference/files/create)",
9
- version: "0.0.18",
9
+ version: "0.1.0",
10
10
  type: "action",
11
11
  props: {
12
12
  openai,
@@ -28,11 +28,15 @@ export default {
28
28
  file, purpose,
29
29
  } = this;
30
30
  const data = new FormData();
31
- const content = fs.createReadStream(file.includes("tmp/")
32
- ? file
33
- : `/tmp/${file}`);
31
+ const {
32
+ stream, metadata,
33
+ } = await getFileStreamAndMetadata(file);
34
34
  data.append("purpose", purpose);
35
- data.append("file", content);
35
+ data.append("file", stream, {
36
+ contentType: metadata.contentType,
37
+ knownLength: metadata.size,
38
+ filename: metadata.name,
39
+ });
36
40
 
37
41
  const response = await this.openai.uploadFile({
38
42
  $,
package/openai.app.mjs CHANGED
@@ -255,8 +255,8 @@ export default {
255
255
  },
256
256
  file: {
257
257
  type: "string",
258
- label: "File",
259
- description: "The path to a file in the `/tmp` directory. [See the documentation on working with files](https://pipedream.com/docs/code/nodejs/working-with-files/#writing-a-file-to-tmp). See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files.",
258
+ label: "File Path or URL",
259
+ description: "The file to process. Provide either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.txt`). See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files.",
260
260
  },
261
261
  purpose: {
262
262
  type: "string",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pipedream/openai",
3
- "version": "0.10.1",
3
+ "version": "1.0.1",
4
4
  "description": "Pipedream OpenAI Components",
5
5
  "main": "openai.app.mjs",
6
6
  "keywords": [
@@ -14,7 +14,7 @@
14
14
  "access": "public"
15
15
  },
16
16
  "dependencies": {
17
- "@pipedream/platform": "^3.0.3",
17
+ "@pipedream/platform": "^3.1.0",
18
18
  "@pipedream/types": "^0.1.4",
19
19
  "axios": "^1.6.2",
20
20
  "bottleneck": "^2.19.5",