@inductiv/node-red-openai-api 1.85.4 → 1.87.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -40,10 +40,34 @@ After installation, find your node in the **AI** palette category labeled "OpenA
40
40
  - **Configurable and Flexible**: Adapt to a wide range of project requirements, making it easy to integrate AI into your IoT solutions.
41
41
  - **Powerful Combinations**: Utilize Node-RED's diverse nodes to build complex, AI-driven IoT workflows with ease.
42
42
 
43
- ## Release Notes (v1.85.4)
44
-
45
- - **Ehancement:** Upgraded the OpenAI API Library dependency from [v4.85.1](https://github.com/openai/openai-node/releases/tag/v4.85.1) to [v4.85.4](https://github.com/openai/openai-node/releases/tag/v4.85.4).
46
- - **Enhancement:** Implemented [Chat Completion storage methods](https://platform.openai.com/docs/api-reference/chat).
43
+ ## Release Notes (v1.87.3)
44
+
45
+ - **Ehancement:** Upgraded the OpenAI API Library dependency from [v4.85.4](https://github.com/openai/openai-node/releases/tag/v4.85.4) to [v4.87.3](https://github.com/openai/openai-node/releases/tag/v4.87.3).
46
+ - **Features:** Implemented [Responses](https://platform.openai.com/docs/api-reference/responses) API capabilities.
47
+ - OpenAI's most advanced interface for generating model responses.
48
+ - Create stateful interactions with the model, using the output of previous responses as input and more.
49
+ - 🔥 Native agentic web search capabilities:
50
+
51
+ ```javascript
52
+ msg.payload = {
53
+ "model": "gpt-4o-mini",
54
+ "tools": [{ type: "web_search_preview" }],
55
+ "input": "What was a positive news story from today?"
56
+ }
57
+ ```
58
+
59
+ - 🔥 Native agentic computer use capabilities.
60
+ - 🔥 Reasoning model control properties:
61
+
62
+ ```javascript
63
+ msg.payload = {
64
+ "model": "o3-mini",
65
+ "input": "How much wood would a woodchuck chuck?",
66
+ "reasoning": {
67
+ "effort": "high"
68
+ }
69
+ }
70
+ ```
47
71
 
48
72
  ## What's New in Version 1.x
49
73
 
package/lib.js CHANGED
@@ -254,7 +254,10 @@ let OpenaiApi = (function () {
254
254
  const openai = new OpenAI(this.clientParams);
255
255
  const { completion_id, ...options } = parameters.payload;
256
256
 
257
- const response = await openai.chat.completions.retrieve(completion_id, options);
257
+ const response = await openai.chat.completions.retrieve(
258
+ completion_id,
259
+ options
260
+ );
258
261
 
259
262
  return response;
260
263
  }
@@ -263,7 +266,10 @@ let OpenaiApi = (function () {
263
266
  const openai = new OpenAI(this.clientParams);
264
267
  const { completion_id, ...options } = parameters.payload;
265
268
 
266
- const response = await openai.chat.completions.messages.list(completion_id, options);
269
+ const response = await openai.chat.completions.messages.list(
270
+ completion_id,
271
+ options
272
+ );
267
273
 
268
274
  return response.data;
269
275
  }
@@ -279,7 +285,10 @@ let OpenaiApi = (function () {
279
285
  const openai = new OpenAI(this.clientParams);
280
286
  const { completion_id, ...body } = parameters.payload;
281
287
 
282
- const response = await openai.chat.completions.update(completion_id, body);
288
+ const response = await openai.chat.completions.update(
289
+ completion_id,
290
+ body
291
+ );
283
292
 
284
293
  return response;
285
294
  }
@@ -288,7 +297,10 @@ let OpenaiApi = (function () {
288
297
  const openai = new OpenAI(this.clientParams);
289
298
  const { completion_id, ...options } = parameters.payload;
290
299
 
291
- const response = await openai.chat.completions.del(completion_id, options);
300
+ const response = await openai.chat.completions.del(
301
+ completion_id,
302
+ options
303
+ );
292
304
 
293
305
  return response;
294
306
  }
@@ -483,6 +495,59 @@ let OpenaiApi = (function () {
483
495
  return response;
484
496
  }
485
497
 
498
+ // >>> Begin Responses functions
499
+ async createModelResponse(parameters) {
500
+ const { _node, ...params } = parameters;
501
+ const openai = new OpenAI(this.clientParams);
502
+ const response = await openai.responses.create(parameters.payload);
503
+
504
+ if (params.payload.stream) {
505
+ _node.status({
506
+ fill: "green",
507
+ shape: "dot",
508
+ text: "OpenaiApi.status.streaming",
509
+ });
510
+ for await (const chunk of response) {
511
+ if (typeof chunk === "object") {
512
+ const newMsg = { ...parameters.msg, payload: chunk };
513
+ _node.send(newMsg);
514
+ }
515
+ }
516
+ _node.status({});
517
+ } else {
518
+ return response;
519
+ }
520
+ }
521
+
522
+ async getModelResponse(parameters) {
523
+ const openai = new OpenAI(this.clientParams);
524
+ const { response_id, ...params } = parameters.payload;
525
+ const response = await openai.responses.retrieve(
526
+ response_id,
527
+ params
528
+ );
529
+
530
+ return response;
531
+ }
532
+
533
+ async deleteModelResponse(parameters) {
534
+ const openai = new OpenAI(this.clientParams);
535
+ const { response_id, ...params } = parameters.payload;
536
+ const response = await openai.responses.del(response_id, params);
537
+
538
+ return response;
539
+ }
540
+
541
+ async listInputItems(parameters) {
542
+ const openai = new OpenAI(this.clientParams);
543
+ const { response_id, ...params } = parameters.payload;
544
+ const list = await openai.responses.inputItems.list(response_id, params);
545
+
546
+ return [...list.data];
547
+ }
548
+
549
+ // <<< End Responses functions
550
+
486
551
  async createModeration(parameters) {
487
552
  const openai = new OpenAI(this.clientParams);
488
553
  const response = await openai.moderations.create(parameters.payload);
@@ -99,6 +99,10 @@
99
99
  "retrieveModel": "retrieve model",
100
100
  "deleteModel": "delete fine-tune model",
101
101
  "createModeration": "create moderation",
102
+ "createModelResponse": "create model response",
103
+ "getModelResponse": "retrieve model response",
104
+ "deleteModelResponse": "delete model response",
105
+ "listInputItems": "list input items",
102
106
  "listAssistants": "list assistants",
103
107
  "order": "order",
104
108
  "before": "before",
package/node.html CHANGED
@@ -153,7 +153,7 @@
153
153
  ></option>
154
154
  </optgroup>
155
155
 
156
- <optgroup style="font-style: normal;" label="🗨️ Chat">
156
+ <optgroup style="font-style: normal;" label="💬 Chat">
157
157
  <option
158
158
  value="createChatCompletion"
159
159
  data-i18n="OpenaiApi.parameters.createChatCompletion"
@@ -289,7 +289,28 @@
289
289
  data-i18n="OpenaiApi.parameters.createModeration"
290
290
  ></option>
291
291
  </optgroup>
292
+ <optgroup style="font-style: normal;" label="🗨️ Responses">
293
+ <option
294
+ value="createModelResponse"
295
+ data-i18n="OpenaiApi.parameters.createModelResponse"
296
+ ></option>
297
+
298
+ <option
299
+ value="getModelResponse"
300
+ data-i18n="OpenaiApi.parameters.getModelResponse"
301
+ ></option>
292
302
 
303
+ <option
304
+ value="deleteModelResponse"
305
+ data-i18n="OpenaiApi.parameters.deleteModelResponse"
306
+ ></option>
307
+
308
+ <option
309
+ value="listInputItems"
310
+ data-i18n="OpenaiApi.parameters.listInputItems"
311
+ ></option>
312
+
313
+ </optgroup>
293
314
  <optgroup style="font-style: normal;" label="🔄 Runs (Beta)">
294
315
  <option
295
316
  value="createThreadAndRun"
@@ -1269,7 +1290,7 @@
1269
1290
 
1270
1291
  <section>
1271
1292
  <details>
1272
- <summary style="font-weight: bold;">🗨️ Chat</summary>
1293
+ <summary style="font-weight: bold;">💬 Chat</summary>
1273
1294
  <a
1274
1295
  href="https://platform.openai.com/docs/api-reference/chat"
1275
1296
  target="_blank"
@@ -2914,6 +2935,333 @@
2914
2935
 
2915
2936
  </details>
2916
2937
  </section>
2938
+ <!-- End Moderations docs -->
2939
+
2940
+ <!-- Begin Responses docs -->
2941
+ <section>
2942
+ <details>
2943
+ <summary style="font-weight: bold;">🗨️ Responses</summary>
2944
+ <a
2945
+ href="https://platform.openai.com/docs/api-reference/responses"
2946
+ target="_blank"
2947
+ >Official Documentation
2948
+ <i class="fa fa-external-link fa-sm" aria-hidden="true"></i
2949
+ ></a>
2950
+ <h4 style="font-weight: bolder;"> ⋙ Create Model Response</h4>
2951
+ <p>Creates a model response.</p>
2952
+ <dl class="message-properties">
2953
+ <h4>msg.payload Properties</h4>
2954
+
2955
+ <dt>
2956
+ input
2957
+ <a
2958
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-input"
2959
+ target="_blank"
2960
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
2961
+ ></a>
2962
+ <span class="property-type">string | array</span>
2963
+ </dt>
2964
+ <dd>Text, image, or file inputs to the model, used to generate a response.</dd>
2965
+
2966
+ <dt>
2967
+ model
2968
+ <a
2969
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-model"
2970
+ target="_blank"
2971
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
2972
+ ></a>
2973
+ <span class="property-type">string</span>
2974
+ </dt>
2975
+ <dd>Model ID used to generate the response.</dd>
2976
+
2977
+ <dt class="optional">
2978
+ include
2979
+ <a
2980
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-include"
2981
+ target="_blank"
2982
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
2983
+ ></a>
2984
+ <span class="property-type">array</span>
2985
+ </dt>
2986
+ <dd>Specify additional output data to include in the model response.</dd>
2987
+
2988
+ <dt class="optional">
2989
+ instructions
2990
+ <a
2991
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-instructions"
2992
+ target="_blank"
2993
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
2994
+ ></a>
2995
+ <span class="property-type">string</span>
2996
+ </dt>
2997
+ <dd>Inserts a system (or developer) message as the first item in the model's context.</dd>
2998
+
2999
+ <dt class="optional">
3000
+ max_output_tokens
3001
+ <a
3002
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-max_output_tokens"
3003
+ target="_blank"
3004
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3005
+ ></a>
3006
+ <span class="property-type">integer</span>
3007
+ </dt>
3008
+ <dd>An upper bound for the number of tokens that can be generated for a response.</dd>
3009
+
3010
+ <dt class="optional">
3011
+ metadata
3012
+ <a
3013
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-metadata"
3014
+ target="_blank"
3015
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3016
+ ></a>
3017
+ <span class="property-type">object</span>
3018
+ </dt>
3019
+ <dd>Set of 16 key-value pairs that can be attached to an object.</dd>
3020
+
3021
+ <dt class="optional">
3022
+ parallel_tool_calls
3023
+ <a
3024
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-parallel_tool_calls"
3025
+ target="_blank"
3026
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3027
+ ></a>
3028
+ <span class="property-type">boolean</span>
3029
+ </dt>
3030
+ <dd>Whether to allow the model to run tool calls in parallel.</dd>
3031
+
3032
+ <dt class="optional">
3033
+ parallel_tool_calls
3034
+ <a
3035
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-parallel_tool_calls"
3036
+ target="_blank"
3037
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3038
+ ></a>
3039
+ <span class="property-type">boolean</span>
3040
+ </dt>
3041
+ <dd>Whether to allow the model to run tool calls in parallel.</dd>
3042
+
3043
+ <dt class="optional">
3044
+ previous_response_id
3045
+ <a
3046
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-previous_response_id"
3047
+ target="_blank"
3048
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3049
+ ></a>
3050
+ <span class="property-type">string</span>
3051
+ </dt>
3052
+ <dd>The unique ID of the previous response to the model.</dd>
3053
+
3054
+ <dt class="optional">
3055
+ reasoning
3056
+ <a
3057
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-reasoning"
3058
+ target="_blank"
3059
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3060
+ ></a>
3061
+ <span class="property-type">object</span>
3062
+ </dt>
3063
+ <dd>o-series models only. See official docs for option properties.</dd>
3064
+
3065
+ <dt class="optional">
3066
+ store
3067
+ <a
3068
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-store"
3069
+ target="_blank"
3070
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3071
+ ></a>
3072
+ <span class="property-type">boolean</span>
3073
+ </dt>
3074
+ <dd>Whether to store the generated model response for later retrieval via API.</dd>
3075
+
3076
+ <dt class="optional">
3077
+ stream
3078
+ <a
3079
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-stream"
3080
+ target="_blank"
3081
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3082
+ ></a>
3083
+ <span class="property-type">boolean</span>
3084
+ </dt>
3085
+ <dd>If set to true, the model response data will be streamed to the client as it is generated using server-sent events.</dd>
3086
+
3087
+ <dt class="optional">
3088
+ temperature
3089
+ <a
3090
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-temperature"
3091
+ target="_blank"
3092
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3093
+ ></a>
3094
+ <span class="property-type">number</span>
3095
+ </dt>
3096
+ <dd>What sampling temperature to use, between 0 and 2.</dd>
3097
+
3098
+ <dt class="optional">
3099
+ text
3100
+ <a
3101
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-text"
3102
+ target="_blank"
3103
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3104
+ ></a>
3105
+ <span class="property-type">object</span>
3106
+ </dt>
3107
+ <dd>Configuration options for a text response from the model.</dd>
3108
+
3109
+ <dt class="optional">
3110
+ tool_choice
3111
+ <a
3112
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-tool_choice"
3113
+ target="_blank"
3114
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3115
+ ></a>
3116
+ <span class="property-type">string or object</span>
3117
+ </dt>
3118
+ <dd>How the model should select which tool (or tools) to use when generating a response.</dd>
3119
+
3120
+ <dt class="optional">
3121
+ tools
3122
+ <a
3123
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-tools"
3124
+ target="_blank"
3125
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3126
+ ></a>
3127
+ <span class="property-type">array</span>
3128
+ </dt>
3129
+ <dd>An array of tools the model may call while generating a response.</dd>
3130
+
3131
+ <dt class="optional">
3132
+ top_p
3133
+ <a
3134
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-top_p"
3135
+ target="_blank"
3136
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3137
+ ></a>
3138
+ <span class="property-type">number</span>
3139
+ </dt>
3140
+ <dd>An alternative to sampling with temperature, called nucleus sampling.</dd>
3141
+
3142
+ <dt class="optional">
3143
+ truncation
3144
+ <a
3145
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-truncation"
3146
+ target="_blank"
3147
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3148
+ ></a>
3149
+ <span class="property-type">string</span>
3150
+ </dt>
3151
+ <dd>The truncation strategy to use for the model response.</dd>
3152
+
3153
+ <dt class="optional">
3154
+ user
3155
+ <a
3156
+ href="https://platform.openai.com/docs/api-reference/responses/create#responses-create-user"
3157
+ target="_blank"
3158
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3159
+ ></a>
3160
+ <span class="property-type">string</span>
3161
+ </dt>
3162
+ <dd>A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.</dd>
3163
+
3164
+ </dl>
3165
+
3166
+ <h4 style="font-weight: bolder;"> ⋙ Get Model Response</h4>
3167
+ <p>Retrieves a model response with the given ID.</p>
3168
+ <dl class="message-properties">
3169
+ <h4>msg.payload Properties</h4>
3170
+
3171
+ <dt>
3172
+ response_id
3173
+ <a
3174
+ href="https://platform.openai.com/docs/api-reference/responses/get#responses-get-response_id"
3175
+ target="_blank"
3176
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3177
+ ></a>
3178
+ <span class="property-type">string</span>
3179
+ </dt>
3180
+ <dd>The ID of the response to retrieve.</dd>
3181
+ </dl>
3182
+
3183
+ <h4 style="font-weight: bolder;"> ⋙ Delete Model Response</h4>
3184
+ <p>Deletes a model response with the given ID.</p>
3185
+ <dl class="message-properties">
3186
+ <h4>msg.payload Properties</h4>
3187
+
3188
+ <dt>
3189
+ response_id
3190
+ <a
3191
+ href="https://platform.openai.com/docs/api-reference/responses/delete#responses-delete-response_id"
3192
+ target="_blank"
3193
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3194
+ ></a>
3195
+ <span class="property-type">string</span>
3196
+ </dt>
3197
+ <dd>The ID of the response to delete.</dd>
3198
+ </dl>
3199
+
3200
+ <h4 style="font-weight: bolder;"> ⋙ List Input Items</h4>
3201
+ <p>Returns a list of input items for a given response.</p>
3202
+ <dl class="message-properties">
3203
+ <h4>msg.payload Properties</h4>
3204
+
3205
+ <dt>
3206
+ response_id
3207
+ <a
3208
+ href="https://platform.openai.com/docs/api-reference/responses/input-items#responses-input-items-response_id"
3209
+ target="_blank"
3210
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3211
+ ></a>
3212
+ <span class="property-type">string</span>
3213
+ </dt>
3214
+ <dd>The ID of the response to retrieve input items for.</dd>
3215
+
3216
+ <dt class="optional">
3217
+ after
3218
+ <a
3219
+ href="https://platform.openai.com/docs/api-reference/responses/input-items#responses-input-items-after"
3220
+ target="_blank"
3221
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3222
+ ></a>
3223
+ <span class="property-type">string</span>
3224
+ </dt>
3225
+ <dd>An item ID to list items after, used in pagination.</dd>
3226
+
3227
+ <dt class="optional">
3228
+ before
3229
+ <a
3230
+ href="https://platform.openai.com/docs/api-reference/responses/input-items#responses-input-items-before"
3231
+ target="_blank"
3232
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3233
+ ></a>
3234
+ <span class="property-type">string</span>
3235
+ </dt>
3236
+ <dd>An item ID to list items before, used in pagination.</dd>
3237
+
3238
+ <dt class="optional">
3239
+ limit
3240
+ <a
3241
+ href="https://platform.openai.com/docs/api-reference/responses/input-items#responses-input-items-limit"
3242
+ target="_blank"
3243
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3244
+ ></a>
3245
+ <span class="property-type">integer</span>
3246
+ </dt>
3247
+ <dd>A limit on the number of objects to be returned.</dd>
3248
+
3249
+ <dt class="optional">
3250
+ order
3251
+ <a
3252
+ href="https://platform.openai.com/docs/api-reference/responses/input-items#responses-input-items-order"
3253
+ target="_blank"
3254
+ ><i class="fa fa-external-link fa-sm" aria-hidden="true"></i
3255
+ ></a>
3256
+ <span class="property-type">string</span>
3257
+ </dt>
3258
+ <dd>The order to return the input items in. Default is <code>asc</code>.</dd>
3259
+
3260
+ </dl>
3261
+
3262
+ </details>
3263
+ </section>
3264
+ <!-- End Responses docs -->
2917
3265
 
2918
3266
  <!-- Begin Runs docs -->
2919
3267
  <section>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@inductiv/node-red-openai-api",
3
- "version": "1.85.4",
3
+ "version": "1.87.3",
4
4
  "description": "Enhance your Node-RED projects with advanced AI capabilities.",
5
5
  "main": "node.js",
6
6
  "engines": {
@@ -30,7 +30,7 @@
30
30
  "low-code"
31
31
  ],
32
32
  "dependencies": {
33
- "openai": "~4.85.4"
33
+ "openai": "~4.87.3"
34
34
  },
35
35
  "author": "Allan Bunch",
36
36
  "license": "MIT",