@ionos-cloud/n8n-nodes-ionos-cloud 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,741 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.openaiDescriptions = exports.openaiOperations = void 0;
4
+ exports.openaiOperations = [
5
+ {
6
+ displayName: 'Operation',
7
+ name: 'operation',
8
+ type: 'options',
9
+ noDataExpression: true,
10
+ displayOptions: {
11
+ show: {
12
+ resource: ['openai'],
13
+ },
14
+ },
15
+ options: [
16
+ {
17
+ name: 'Chat Completion',
18
+ value: 'chatCompletion',
19
+ description: 'Create chat completions using OpenAI-compatible API',
20
+ action: 'Create chat completion',
21
+ routing: {
22
+ request: {
23
+ method: 'POST',
24
+ url: '=/v1/chat/completions',
25
+ baseURL: 'https://openai.inference.de-txl.ionos.com',
26
+ },
27
+ },
28
+ },
29
+ {
30
+ name: 'Completion',
31
+ value: 'completion',
32
+ description: 'Create text completions using OpenAI-compatible API',
33
+ action: 'Create completion',
34
+ routing: {
35
+ request: {
36
+ method: 'POST',
37
+ url: '=/v1/completions',
38
+ baseURL: 'https://openai.inference.de-txl.ionos.com',
39
+ },
40
+ },
41
+ },
42
+ {
43
+ name: 'Create Embeddings',
44
+ value: 'embeddings',
45
+ description: 'Create embedding vectors from text',
46
+ action: 'Create embeddings',
47
+ routing: {
48
+ request: {
49
+ method: 'POST',
50
+ url: '=/v1/embeddings',
51
+ baseURL: 'https://openai.inference.de-txl.ionos.com',
52
+ },
53
+ },
54
+ },
55
+ {
56
+ name: 'Generate Image',
57
+ value: 'generateImage',
58
+ description: 'Generate images from text prompts',
59
+ action: 'Generate image',
60
+ routing: {
61
+ request: {
62
+ method: 'POST',
63
+ url: '=/v1/images/generations',
64
+ baseURL: 'https://openai.inference.de-txl.ionos.com',
65
+ },
66
+ },
67
+ },
68
+ {
69
+ name: 'Get Many',
70
+ value: 'getAll',
71
+ description: 'List available models in OpenAI-compatible format',
72
+ action: 'Get many models',
73
+ routing: {
74
+ request: {
75
+ method: 'GET',
76
+ url: '=/v1/models',
77
+ baseURL: 'https://openai.inference.de-txl.ionos.com',
78
+ },
79
+ output: {
80
+ postReceive: [
81
+ {
82
+ type: 'rootProperty',
83
+ properties: {
84
+ property: 'data',
85
+ },
86
+ },
87
+ ],
88
+ },
89
+ },
90
+ },
91
+ ],
92
+ default: 'chatCompletion',
93
+ },
94
+ ];
95
+ exports.openaiDescriptions = [
96
+ {
97
+ displayName: 'Model',
98
+ name: 'model',
99
+ type: 'string',
100
+ required: true,
101
+ displayOptions: {
102
+ show: {
103
+ resource: ['openai'],
104
+ operation: ['chatCompletion'],
105
+ },
106
+ },
107
+ default: 'meta-llama/Llama-3.3-70B-Instruct',
108
+ description: 'ID of the model to use',
109
+ routing: {
110
+ send: {
111
+ type: 'body',
112
+ property: 'model',
113
+ },
114
+ },
115
+ },
116
+ {
117
+ displayName: 'Messages',
118
+ name: 'messages',
119
+ type: 'fixedCollection',
120
+ required: true,
121
+ typeOptions: {
122
+ multipleValues: true,
123
+ },
124
+ displayOptions: {
125
+ show: {
126
+ resource: ['openai'],
127
+ operation: ['chatCompletion'],
128
+ },
129
+ },
130
+ default: {},
131
+ description: 'The messages to generate chat completions for',
132
+ options: [
133
+ {
134
+ name: 'messageValues',
135
+ displayName: 'Message',
136
+ values: [
137
+ {
138
+ displayName: 'Role',
139
+ name: 'role',
140
+ type: 'options',
141
+ options: [
142
+ {
143
+ name: 'System',
144
+ value: 'system',
145
+ },
146
+ {
147
+ name: 'User',
148
+ value: 'user',
149
+ },
150
+ {
151
+ name: 'Assistant',
152
+ value: 'assistant',
153
+ },
154
+ ],
155
+ default: 'user',
156
+ description: 'The role of the message',
157
+ },
158
+ {
159
+ displayName: 'Content',
160
+ name: 'content',
161
+ type: 'string',
162
+ default: '',
163
+ description: 'The content of the message',
164
+ typeOptions: {
165
+ rows: 4,
166
+ },
167
+ },
168
+ ],
169
+ },
170
+ ],
171
+ routing: {
172
+ send: {
173
+ type: 'body',
174
+ property: 'messages',
175
+ value: '={{ $value.messageValues }}',
176
+ },
177
+ },
178
+ },
179
+ {
180
+ displayName: 'Additional Fields',
181
+ name: 'additionalFields',
182
+ type: 'collection',
183
+ placeholder: 'Add Field',
184
+ default: {},
185
+ displayOptions: {
186
+ show: {
187
+ resource: ['openai'],
188
+ operation: ['chatCompletion'],
189
+ },
190
+ },
191
+ options: [
192
+ {
193
+ displayName: 'Frequency Penalty',
194
+ name: 'frequency_penalty',
195
+ type: 'number',
196
+ default: 0,
197
+ description: 'Penalize new tokens based on their frequency in the text so far',
198
+ routing: {
199
+ send: {
200
+ type: 'body',
201
+ property: 'frequency_penalty',
202
+ },
203
+ },
204
+ },
205
+ {
206
+ displayName: 'Logit Bias',
207
+ name: 'logit_bias',
208
+ type: 'json',
209
+ default: '{}',
210
+ description: 'Modify the probability of specific tokens appearing in the completion',
211
+ routing: {
212
+ send: {
213
+ type: 'body',
214
+ property: 'logit_bias',
215
+ },
216
+ },
217
+ },
218
+ {
219
+ displayName: 'Max Completion Tokens',
220
+ name: 'max_completion_tokens',
221
+ type: 'number',
222
+ default: 16,
223
+ description: 'Maximum number of tokens to generate',
224
+ routing: {
225
+ send: {
226
+ type: 'body',
227
+ property: 'max_completion_tokens',
228
+ },
229
+ },
230
+ },
231
+ {
232
+ displayName: 'Max Tokens',
233
+ name: 'max_tokens',
234
+ type: 'number',
235
+ default: 16,
236
+ description: 'Maximum number of tokens to generate (deprecated, use max_completion_tokens)',
237
+ routing: {
238
+ send: {
239
+ type: 'body',
240
+ property: 'max_tokens',
241
+ },
242
+ },
243
+ },
244
+ {
245
+ displayName: 'Number of Completions',
246
+ name: 'n',
247
+ type: 'number',
248
+ default: 1,
249
+ description: 'Number of chat completion choices to generate',
250
+ routing: {
251
+ send: {
252
+ type: 'body',
253
+ property: 'n',
254
+ },
255
+ },
256
+ },
257
+ {
258
+ displayName: 'Presence Penalty',
259
+ name: 'presence_penalty',
260
+ type: 'number',
261
+ default: 0,
262
+ description: 'Penalize new tokens based on their existence in the text so far',
263
+ routing: {
264
+ send: {
265
+ type: 'body',
266
+ property: 'presence_penalty',
267
+ },
268
+ },
269
+ },
270
+ {
271
+ displayName: 'Response Format',
272
+ name: 'response_format',
273
+ type: 'json',
274
+ default: '{}',
275
+ description: 'Object specifying the format of the response (e.g., {"type": "json_object"})',
276
+ routing: {
277
+ send: {
278
+ type: 'body',
279
+ property: 'response_format',
280
+ },
281
+ },
282
+ },
283
+ {
284
+ displayName: 'Stop',
285
+ name: 'stop',
286
+ type: 'string',
287
+ default: '',
288
+ description: 'Up to 4 sequences where the API will stop generating tokens (comma-separated)',
289
+ routing: {
290
+ send: {
291
+ type: 'body',
292
+ property: 'stop',
293
+ value: '={{ $value.split(",").map(s => s.trim()) }}',
294
+ },
295
+ },
296
+ },
297
+ {
298
+ displayName: 'Stream',
299
+ name: 'stream',
300
+ type: 'boolean',
301
+ default: false,
302
+ description: 'Whether to stream partial message deltas',
303
+ routing: {
304
+ send: {
305
+ type: 'body',
306
+ property: 'stream',
307
+ },
308
+ },
309
+ },
310
+ {
311
+ displayName: 'Temperature',
312
+ name: 'temperature',
313
+ type: 'number',
314
+ default: 1,
315
+ description: 'Sampling temperature (0-2)',
316
+ routing: {
317
+ send: {
318
+ type: 'body',
319
+ property: 'temperature',
320
+ },
321
+ },
322
+ },
323
+ {
324
+ displayName: 'Tool Choice',
325
+ name: 'tool_choice',
326
+ type: 'json',
327
+ default: '{}',
328
+ description: 'Controls which tool is called (none, auto, required, or specific function)',
329
+ routing: {
330
+ send: {
331
+ type: 'body',
332
+ property: 'tool_choice',
333
+ },
334
+ },
335
+ },
336
+ {
337
+ displayName: 'Tools',
338
+ name: 'tools',
339
+ type: 'json',
340
+ default: '[]',
341
+ description: 'List of tools/functions the model may call (max 128)',
342
+ routing: {
343
+ send: {
344
+ type: 'body',
345
+ property: 'tools',
346
+ },
347
+ },
348
+ },
349
+ {
350
+ displayName: 'Top P',
351
+ name: 'top_p',
352
+ type: 'number',
353
+ default: 1,
354
+ description: 'Alternative to sampling with temperature',
355
+ routing: {
356
+ send: {
357
+ type: 'body',
358
+ property: 'top_p',
359
+ },
360
+ },
361
+ },
362
+ {
363
+ displayName: 'User',
364
+ name: 'user',
365
+ type: 'string',
366
+ default: '',
367
+ description: 'Unique identifier for the end-user',
368
+ routing: {
369
+ send: {
370
+ type: 'body',
371
+ property: 'user',
372
+ },
373
+ },
374
+ },
375
+ ],
376
+ },
377
+ {
378
+ displayName: 'Model',
379
+ name: 'model',
380
+ type: 'string',
381
+ required: true,
382
+ displayOptions: {
383
+ show: {
384
+ resource: ['openai'],
385
+ operation: ['completion'],
386
+ },
387
+ },
388
+ default: 'meta-llama/Llama-3.3-70B-Instruct',
389
+ description: 'ID of the model to use',
390
+ routing: {
391
+ send: {
392
+ type: 'body',
393
+ property: 'model',
394
+ },
395
+ },
396
+ },
397
+ {
398
+ displayName: 'Prompt',
399
+ name: 'prompt',
400
+ type: 'string',
401
+ required: true,
402
+ displayOptions: {
403
+ show: {
404
+ resource: ['openai'],
405
+ operation: ['completion'],
406
+ },
407
+ },
408
+ default: '',
409
+ description: 'The prompt to generate completions from',
410
+ typeOptions: {
411
+ rows: 4,
412
+ },
413
+ routing: {
414
+ send: {
415
+ type: 'body',
416
+ property: 'prompt',
417
+ },
418
+ },
419
+ },
420
+ {
421
+ displayName: 'Additional Fields',
422
+ name: 'additionalFields',
423
+ type: 'collection',
424
+ placeholder: 'Add Field',
425
+ default: {},
426
+ displayOptions: {
427
+ show: {
428
+ resource: ['openai'],
429
+ operation: ['completion'],
430
+ },
431
+ },
432
+ options: [
433
+ {
434
+ displayName: 'Frequency Penalty',
435
+ name: 'frequency_penalty',
436
+ type: 'number',
437
+ default: 0,
438
+ description: 'Penalize new tokens based on their frequency',
439
+ routing: {
440
+ send: {
441
+ type: 'body',
442
+ property: 'frequency_penalty',
443
+ },
444
+ },
445
+ },
446
+ {
447
+ displayName: 'Logit Bias',
448
+ name: 'logit_bias',
449
+ type: 'json',
450
+ default: '{}',
451
+ description: 'Modify the probability of specific tokens appearing in the completion',
452
+ routing: {
453
+ send: {
454
+ type: 'body',
455
+ property: 'logit_bias',
456
+ },
457
+ },
458
+ },
459
+ {
460
+ displayName: 'Max Tokens',
461
+ name: 'max_tokens',
462
+ type: 'number',
463
+ default: 16,
464
+ description: 'Maximum number of tokens to generate',
465
+ routing: {
466
+ send: {
467
+ type: 'body',
468
+ property: 'max_tokens',
469
+ },
470
+ },
471
+ },
472
+ {
473
+ displayName: 'Number of Completions',
474
+ name: 'n',
475
+ type: 'number',
476
+ default: 1,
477
+ description: 'Number of completions to generate',
478
+ routing: {
479
+ send: {
480
+ type: 'body',
481
+ property: 'n',
482
+ },
483
+ },
484
+ },
485
+ {
486
+ displayName: 'Presence Penalty',
487
+ name: 'presence_penalty',
488
+ type: 'number',
489
+ default: 0,
490
+ description: 'Penalize new tokens based on their existence',
491
+ routing: {
492
+ send: {
493
+ type: 'body',
494
+ property: 'presence_penalty',
495
+ },
496
+ },
497
+ },
498
+ {
499
+ displayName: 'Stop',
500
+ name: 'stop',
501
+ type: 'string',
502
+ default: '',
503
+ description: 'Sequences where the API will stop (comma-separated)',
504
+ routing: {
505
+ send: {
506
+ type: 'body',
507
+ property: 'stop',
508
+ value: '={{ $value.split(",").map(s => s.trim()) }}',
509
+ },
510
+ },
511
+ },
512
+ {
513
+ displayName: 'Stream',
514
+ name: 'stream',
515
+ type: 'boolean',
516
+ default: false,
517
+ description: 'Whether to stream partial deltas',
518
+ routing: {
519
+ send: {
520
+ type: 'body',
521
+ property: 'stream',
522
+ },
523
+ },
524
+ },
525
+ {
526
+ displayName: 'Temperature',
527
+ name: 'temperature',
528
+ type: 'number',
529
+ default: 1,
530
+ description: 'Sampling temperature',
531
+ routing: {
532
+ send: {
533
+ type: 'body',
534
+ property: 'temperature',
535
+ },
536
+ },
537
+ },
538
+ {
539
+ displayName: 'Top P',
540
+ name: 'top_p',
541
+ type: 'number',
542
+ default: 1,
543
+ description: 'Alternative to sampling with temperature',
544
+ routing: {
545
+ send: {
546
+ type: 'body',
547
+ property: 'top_p',
548
+ },
549
+ },
550
+ },
551
+ {
552
+ displayName: 'User',
553
+ name: 'user',
554
+ type: 'string',
555
+ default: '',
556
+ description: 'Unique identifier for the end-user',
557
+ routing: {
558
+ send: {
559
+ type: 'body',
560
+ property: 'user',
561
+ },
562
+ },
563
+ },
564
+ ],
565
+ },
566
+ {
567
+ displayName: 'Model',
568
+ name: 'model',
569
+ type: 'string',
570
+ required: true,
571
+ displayOptions: {
572
+ show: {
573
+ resource: ['openai'],
574
+ operation: ['embeddings'],
575
+ },
576
+ },
577
+ default: 'intfloat/e5-large-v2',
578
+ description: 'ID of the embedding model to use',
579
+ routing: {
580
+ send: {
581
+ type: 'body',
582
+ property: 'model',
583
+ },
584
+ },
585
+ },
586
+ {
587
+ displayName: 'Input',
588
+ name: 'input',
589
+ type: 'string',
590
+ required: true,
591
+ displayOptions: {
592
+ show: {
593
+ resource: ['openai'],
594
+ operation: ['embeddings'],
595
+ },
596
+ },
597
+ default: '',
598
+ description: 'Text to create embeddings for (or comma-separated for multiple)',
599
+ typeOptions: {
600
+ rows: 4,
601
+ },
602
+ routing: {
603
+ send: {
604
+ type: 'body',
605
+ property: 'input',
606
+ value: '={{ $value.includes(",") ? $value.split(",").map(s => s.trim()) : $value }}',
607
+ },
608
+ },
609
+ },
610
+ {
611
+ displayName: 'Model',
612
+ name: 'model',
613
+ type: 'string',
614
+ required: true,
615
+ displayOptions: {
616
+ show: {
617
+ resource: ['openai'],
618
+ operation: ['generateImage'],
619
+ },
620
+ },
621
+ default: 'stabilityai/stable-diffusion-xl-base-1.0',
622
+ description: 'ID of the image generation model to use',
623
+ routing: {
624
+ send: {
625
+ type: 'body',
626
+ property: 'model',
627
+ },
628
+ },
629
+ },
630
+ {
631
+ displayName: 'Prompt',
632
+ name: 'prompt',
633
+ type: 'string',
634
+ required: true,
635
+ displayOptions: {
636
+ show: {
637
+ resource: ['openai'],
638
+ operation: ['generateImage'],
639
+ },
640
+ },
641
+ default: '',
642
+ description: 'Text description of the desired image',
643
+ typeOptions: {
644
+ rows: 4,
645
+ },
646
+ routing: {
647
+ send: {
648
+ type: 'body',
649
+ property: 'prompt',
650
+ },
651
+ },
652
+ },
653
+ {
654
+ displayName: 'Additional Fields',
655
+ name: 'additionalFields',
656
+ type: 'collection',
657
+ placeholder: 'Add Field',
658
+ default: {},
659
+ displayOptions: {
660
+ show: {
661
+ resource: ['openai'],
662
+ operation: ['generateImage'],
663
+ },
664
+ },
665
+ options: [
666
+ {
667
+ displayName: 'Number of Images',
668
+ name: 'n',
669
+ type: 'number',
670
+ default: 1,
671
+ description: 'Number of images to generate',
672
+ routing: {
673
+ send: {
674
+ type: 'body',
675
+ property: 'n',
676
+ },
677
+ },
678
+ },
679
+ {
680
+ displayName: 'Response Format',
681
+ name: 'response_format',
682
+ type: 'options',
683
+ options: [
684
+ {
685
+ name: 'Base64 JSON',
686
+ value: 'b64_json',
687
+ },
688
+ ],
689
+ default: 'b64_json',
690
+ description: 'Format of the response',
691
+ routing: {
692
+ send: {
693
+ type: 'body',
694
+ property: 'response_format',
695
+ },
696
+ },
697
+ },
698
+ {
699
+ displayName: 'Size',
700
+ name: 'size',
701
+ type: 'options',
702
+ options: [
703
+ {
704
+ name: '1024x1024',
705
+ value: '1024x1024',
706
+ },
707
+ {
708
+ name: '1024x1792',
709
+ value: '1024x1792',
710
+ },
711
+ {
712
+ name: '1792x1024',
713
+ value: '1792x1024',
714
+ },
715
+ ],
716
+ default: '1024x1024',
717
+ description: 'Size of the generated image',
718
+ routing: {
719
+ send: {
720
+ type: 'body',
721
+ property: 'size',
722
+ },
723
+ },
724
+ },
725
+ {
726
+ displayName: 'User',
727
+ name: 'user',
728
+ type: 'string',
729
+ default: '',
730
+ description: 'Unique identifier for the end-user',
731
+ routing: {
732
+ send: {
733
+ type: 'body',
734
+ property: 'user',
735
+ },
736
+ },
737
+ },
738
+ ],
739
+ },
740
+ ];
741
+ //# sourceMappingURL=openai.js.map