substarte 120240617.1.9

Sign up to get free protection for your applications and to get access to all the features.
package/src/OpenAPI.ts ADDED
@@ -0,0 +1,4701 @@
1
+ /**
2
+ * This file was auto-generated by openapi-typescript.
3
+ * Do not make direct changes to the file.
4
+ */
5
+
6
+ export interface paths {
7
+ "/Experimental": {
8
+ /**
9
+ * Experimental
10
+ * @description Experimental node.
11
+ */
12
+ post: operations["Experimental"];
13
+ };
14
+ "/Box": {
15
+ /**
16
+ * Box
17
+ * @description Combine multiple values into a single output.
18
+ */
19
+ post: operations["Box"];
20
+ };
21
+ "/If": {
22
+ /**
23
+ * If
24
+ * @description Return one of two options based on a condition.
25
+ */
26
+ post: operations["If"];
27
+ };
28
+ "/RunPython": {
29
+ /**
30
+ * RunPython
31
+ * @description Run code using a Python interpreter.
32
+ */
33
+ post: operations["RunPython"];
34
+ };
35
+ "/ComputeText": {
36
+ /**
37
+ * ComputeText
38
+ * @description Compute text using a language model.
39
+ */
40
+ post: operations["ComputeText"];
41
+ };
42
+ "/MultiComputeText": {
43
+ /**
44
+ * MultiComputeText
45
+ * @description Generate multiple text choices using a language model.
46
+ */
47
+ post: operations["MultiComputeText"];
48
+ };
49
+ "/BatchComputeText": {
50
+ /**
51
+ * BatchComputeText
52
+ * @description Compute text for multiple prompts in batch using a language model.
53
+ */
54
+ post: operations["BatchComputeText"];
55
+ };
56
+ "/BatchComputeJSON": {
57
+ /**
58
+ * BatchComputeJSON
59
+ * @description Compute JSON for multiple prompts in batch using a language model.
60
+ */
61
+ post: operations["BatchComputeJSON"];
62
+ };
63
+ "/ComputeJSON": {
64
+ /**
65
+ * ComputeJSON
66
+ * @description Compute JSON using a language model.
67
+ */
68
+ post: operations["ComputeJSON"];
69
+ };
70
+ "/MultiComputeJSON": {
71
+ /**
72
+ * MultiComputeJSON
73
+ * @description Compute multiple JSON choices using a language model.
74
+ */
75
+ post: operations["MultiComputeJSON"];
76
+ };
77
+ "/Mistral7BInstruct": {
78
+ /**
79
+ * Mistral7BInstruct
80
+ * @description Compute text using [Mistral 7B Instruct](https://mistral.ai/news/announcing-mistral-7b).
81
+ */
82
+ post: operations["Mistral7BInstruct"];
83
+ };
84
+ "/Mixtral8x7BInstruct": {
85
+ /**
86
+ * Mixtral8x7BInstruct
87
+ * @description Compute text using instruct-tuned [Mixtral 8x7B](https://mistral.ai/news/mixtral-of-experts/).
88
+ */
89
+ post: operations["Mixtral8x7BInstruct"];
90
+ };
91
+ "/Llama3Instruct8B": {
92
+ /**
93
+ * Llama3Instruct8B
94
+ * @description Compute text using instruct-tuned [Llama 3 8B](https://llama.meta.com/llama3/).
95
+ */
96
+ post: operations["Llama3Instruct8B"];
97
+ };
98
+ "/Llama3Instruct70B": {
99
+ /**
100
+ * Llama3Instruct70B
101
+ * @description Compute text using instruct-tuned [Llama 3 70B](https://llama.meta.com/llama3/).
102
+ */
103
+ post: operations["Llama3Instruct70B"];
104
+ };
105
+ "/Firellava13B": {
106
+ /**
107
+ * Firellava13B
108
+ * @description Compute text with image input using [FireLLaVA 13B](https://fireworks.ai/blog/firellava-the-first-commercially-permissive-oss-llava-model).
109
+ */
110
+ post: operations["Firellava13B"];
111
+ };
112
+ "/GenerateImage": {
113
+ /**
114
+ * GenerateImage
115
+ * @description Generate an image.
116
+ */
117
+ post: operations["GenerateImage"];
118
+ };
119
+ "/MultiGenerateImage": {
120
+ /**
121
+ * MultiGenerateImage
122
+ * @description Generate multiple images.
123
+ */
124
+ post: operations["MultiGenerateImage"];
125
+ };
126
+ "/InpaintImage": {
127
+ /**
128
+ * InpaintImage
129
+ * @description Edit an image using image generation inside part of the image or the full image.
130
+ */
131
+ post: operations["InpaintImage"];
132
+ };
133
+ "/MultiInpaintImage": {
134
+ /**
135
+ * MultiInpaintImage
136
+ * @description Edit multiple images using image generation.
137
+ */
138
+ post: operations["MultiInpaintImage"];
139
+ };
140
+ "/StableDiffusionXLLightning": {
141
+ /**
142
+ * StableDiffusionXLLightning
143
+ * @description Generate an image using [Stable Diffusion XL Lightning](https://arxiv.org/abs/2402.13929).
144
+ */
145
+ post: operations["StableDiffusionXLLightning"];
146
+ };
147
+ "/StableDiffusionXLInpaint": {
148
+ /**
149
+ * StableDiffusionXLInpaint
150
+ * @description Edit an image using [Stable Diffusion XL](https://arxiv.org/abs/2307.01952). Supports inpainting (edit part of the image with a mask) and image-to-image (edit the full image).
151
+ */
152
+ post: operations["StableDiffusionXLInpaint"];
153
+ };
154
+ "/StableDiffusionXLControlNet": {
155
+ /**
156
+ * StableDiffusionXLControlNet
157
+ * @description Generate an image with generation structured by an input image, using Stable Diffusion XL with [ControlNet](https://arxiv.org/abs/2302.05543).
158
+ */
159
+ post: operations["StableDiffusionXLControlNet"];
160
+ };
161
+ "/StableVideoDiffusion": {
162
+ /**
163
+ * StableVideoDiffusion
164
+ * @description Generates a video using a still image as conditioning frame.
165
+ */
166
+ post: operations["StableVideoDiffusion"];
167
+ };
168
+ "/InterpolateFrames": {
169
+ /**
170
+ * InterpolateFrames
171
+ * @description Generates a interpolation frames between each adjacent frames.
172
+ */
173
+ post: operations["InterpolateFrames"];
174
+ };
175
+ "/TranscribeSpeech": {
176
+ /**
177
+ * TranscribeSpeech
178
+ * @description Transcribe speech in an audio or video file.
179
+ */
180
+ post: operations["TranscribeSpeech"];
181
+ };
182
+ "/GenerateSpeech": {
183
+ /**
184
+ * GenerateSpeech
185
+ * @description Generate speech from text.
186
+ */
187
+ post: operations["GenerateSpeech"];
188
+ };
189
+ "/RemoveBackground": {
190
+ /**
191
+ * RemoveBackground
192
+ * @description Remove the background from an image and return the foreground segment as a cut-out or a mask.
193
+ */
194
+ post: operations["RemoveBackground"];
195
+ };
196
+ "/EraseImage": {
197
+ /**
198
+ * EraseImage
199
+ * @description Erase the masked part of an image, e.g. to remove an object by inpainting.
200
+ */
201
+ post: operations["EraseImage"];
202
+ };
203
+ "/UpscaleImage": {
204
+ /**
205
+ * UpscaleImage
206
+ * @description Upscale an image using image generation.
207
+ */
208
+ post: operations["UpscaleImage"];
209
+ };
210
+ "/SegmentUnderPoint": {
211
+ /**
212
+ * SegmentUnderPoint
213
+ * @description Segment an image under a point and return the segment.
214
+ */
215
+ post: operations["SegmentUnderPoint"];
216
+ };
217
+ "/SegmentAnything": {
218
+ /**
219
+ * SegmentAnything
220
+ * @description Segment an image using [SegmentAnything](https://github.com/facebookresearch/segment-anything).
221
+ */
222
+ post: operations["SegmentAnything"];
223
+ };
224
+ "/SplitDocument": {
225
+ /**
226
+ * SplitDocument
227
+ * @description Split document into text segments.
228
+ */
229
+ post: operations["SplitDocument"];
230
+ };
231
+ "/EmbedText": {
232
+ /**
233
+ * EmbedText
234
+ * @description Generate embedding for a text document.
235
+ */
236
+ post: operations["EmbedText"];
237
+ };
238
+ "/MultiEmbedText": {
239
+ /**
240
+ * MultiEmbedText
241
+ * @description Generate embeddings for multiple text documents.
242
+ */
243
+ post: operations["MultiEmbedText"];
244
+ };
245
+ "/EmbedImage": {
246
+ /**
247
+ * EmbedImage
248
+ * @description Generate embedding for an image.
249
+ */
250
+ post: operations["EmbedImage"];
251
+ };
252
+ "/MultiEmbedImage": {
253
+ /**
254
+ * MultiEmbedImage
255
+ * @description Generate embeddings for multiple images.
256
+ */
257
+ post: operations["MultiEmbedImage"];
258
+ };
259
+ "/JinaV2": {
260
+ /**
261
+ * JinaV2
262
+ * @description Generate embeddings for multiple text documents using [Jina Embeddings 2](https://arxiv.org/abs/2310.19923).
263
+ */
264
+ post: operations["JinaV2"];
265
+ };
266
+ "/CLIP": {
267
+ /**
268
+ * CLIP
269
+ * @description Generate embeddings for text or images using [CLIP](https://openai.com/research/clip).
270
+ */
271
+ post: operations["CLIP"];
272
+ };
273
+ "/FindOrCreateVectorStore": {
274
+ /**
275
+ * FindOrCreateVectorStore
276
+ * @description Find a vector store matching the given collection name, or create a new vector store.
277
+ */
278
+ post: operations["FindOrCreateVectorStore"];
279
+ };
280
+ "/ListVectorStores": {
281
+ /**
282
+ * ListVectorStores
283
+ * @description List all vector stores.
284
+ */
285
+ post: operations["ListVectorStores"];
286
+ };
287
+ "/DeleteVectorStore": {
288
+ /**
289
+ * DeleteVectorStore
290
+ * @description Delete a vector store.
291
+ */
292
+ post: operations["DeleteVectorStore"];
293
+ };
294
+ "/QueryVectorStore": {
295
+ /**
296
+ * QueryVectorStore
297
+ * @description Query a vector store for similar vectors.
298
+ */
299
+ post: operations["QueryVectorStore"];
300
+ };
301
+ "/FetchVectors": {
302
+ /**
303
+ * FetchVectors
304
+ * @description Fetch vectors from a vector store.
305
+ */
306
+ post: operations["FetchVectors"];
307
+ };
308
+ "/UpdateVectors": {
309
+ /**
310
+ * UpdateVectors
311
+ * @description Update vectors in a vector store.
312
+ */
313
+ post: operations["UpdateVectors"];
314
+ };
315
+ "/DeleteVectors": {
316
+ /**
317
+ * DeleteVectors
318
+ * @description Delete vectors in a vector store.
319
+ */
320
+ post: operations["DeleteVectors"];
321
+ };
322
+ }
323
+
324
+ export type webhooks = Record<string, never>;
325
+
326
+ export interface components {
327
+ schemas: {
328
+ /** ErrorOut */
329
+ ErrorOut: {
330
+ /**
331
+ * @description The type of error returned.
332
+ * @enum {string}
333
+ */
334
+ type: "api_error" | "invalid_request_error" | "dependency_error";
335
+ /** @description A message providing more details about the error. */
336
+ message: string;
337
+ /**
338
+ * @description The HTTP status code for the error.
339
+ * @default 500
340
+ */
341
+ status_code?: number;
342
+ };
343
+ /** ExperimentalIn */
344
+ ExperimentalIn: {
345
+ /** @description Identifier. */
346
+ name: string;
347
+ /** @description Arguments. */
348
+ args: {
349
+ [key: string]: unknown;
350
+ };
351
+ /**
352
+ * @description Timeout in seconds.
353
+ * @default 60
354
+ */
355
+ timeout?: number;
356
+ };
357
+ /** ExperimentalOut */
358
+ ExperimentalOut: {
359
+ /** @description Response. */
360
+ output: {
361
+ [key: string]: unknown;
362
+ };
363
+ };
364
+ /** BoxIn */
365
+ BoxIn: {
366
+ /** @description Values to box. */
367
+ value: unknown;
368
+ };
369
+ /** BoxOut */
370
+ BoxOut: {
371
+ /** @description The evaluated result. */
372
+ value: unknown;
373
+ };
374
+ /** IfIn */
375
+ IfIn: {
376
+ /** @description Condition. */
377
+ condition: boolean;
378
+ /** @description Result when condition is true. */
379
+ value_if_true: unknown;
380
+ /** @description Result when condition is false. */
381
+ value_if_false?: unknown;
382
+ };
383
+ /** IfOut */
384
+ IfOut: {
385
+ /** @description Result. Null if `value_if_false` is not provided and `condition` is false. */
386
+ result: unknown;
387
+ };
388
+ /** RunPythonIn */
389
+ RunPythonIn: {
390
+ /** @description Pickled function. */
391
+ pkl_function?: string;
392
+ /** @description Keyword arguments to your function. */
393
+ kwargs: {
394
+ [key: string]: unknown;
395
+ };
396
+ /** @description Python version. */
397
+ python_version?: string;
398
+ /** @description Python packages to install. You must import them in your code. */
399
+ pip_install?: string[];
400
+ };
401
+ /** RunPythonOut */
402
+ RunPythonOut: {
403
+ /** @description Return value of your function. */
404
+ output?: unknown;
405
+ /** @description Pickled return value. */
406
+ pkl_output?: string;
407
+ /** @description Everything printed to stdout while running your code. */
408
+ stdout: string;
409
+ /** @description Contents of stderr if your code did not run successfully. */
410
+ stderr: string;
411
+ };
412
+ /** ComputeTextIn */
413
+ ComputeTextIn: {
414
+ /** @description Input prompt. */
415
+ prompt: string;
416
+ /** @description Image prompts. */
417
+ image_uris?: string[];
418
+ /**
419
+ * Format: float
420
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
421
+ * @default 0.4
422
+ */
423
+ temperature?: number;
424
+ /** @description Maximum number of tokens to generate. */
425
+ max_tokens?: number;
426
+ /**
427
+ * @description Selected model. `Firellava13B` is automatically selected when `image_uris` is provided.
428
+ * @default Llama3Instruct8B
429
+ * @enum {string}
430
+ */
431
+ model?:
432
+ | "Mistral7BInstruct"
433
+ | "Mixtral8x7BInstruct"
434
+ | "Llama3Instruct8B"
435
+ | "Llama3Instruct70B"
436
+ | "Llama3Instruct405B"
437
+ | "Firellava13B"
438
+ | "gpt-4o"
439
+ | "gpt-4o-mini"
440
+ | "claude-3-5-sonnet-20240620";
441
+ };
442
+ /** ComputeTextOut */
443
+ ComputeTextOut: {
444
+ /** @description Text response. */
445
+ text: string;
446
+ };
447
+ /** ComputeJSONIn */
448
+ ComputeJSONIn: {
449
+ /** @description Input prompt. */
450
+ prompt: string;
451
+ /** @description JSON schema to guide `json_object` response. */
452
+ json_schema: {
453
+ [key: string]: unknown;
454
+ };
455
+ /**
456
+ * Format: float
457
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
458
+ * @default 0.4
459
+ */
460
+ temperature?: number;
461
+ /** @description Maximum number of tokens to generate. */
462
+ max_tokens?: number;
463
+ /**
464
+ * @description Selected model.
465
+ * @default Llama3Instruct8B
466
+ * @enum {string}
467
+ */
468
+ model?: "Mistral7BInstruct" | "Mixtral8x7BInstruct" | "Llama3Instruct8B";
469
+ };
470
+ /** ComputeJSONOut */
471
+ ComputeJSONOut: {
472
+ /** @description JSON response. */
473
+ json_object?: {
474
+ [key: string]: unknown;
475
+ };
476
+ /** @description If the model output could not be parsed to JSON, this is the raw text output. */
477
+ text?: string;
478
+ };
479
+ /** MultiComputeTextIn */
480
+ MultiComputeTextIn: {
481
+ /** @description Input prompt. */
482
+ prompt: string;
483
+ /**
484
+ * @description Number of choices to generate.
485
+ * @default 1
486
+ */
487
+ num_choices: number;
488
+ /**
489
+ * Format: float
490
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
491
+ * @default 0.4
492
+ */
493
+ temperature?: number;
494
+ /** @description Maximum number of tokens to generate. */
495
+ max_tokens?: number;
496
+ /**
497
+ * @description Selected model.
498
+ * @default Llama3Instruct8B
499
+ * @enum {string}
500
+ */
501
+ model?:
502
+ | "Mistral7BInstruct"
503
+ | "Mixtral8x7BInstruct"
504
+ | "Llama3Instruct8B"
505
+ | "Llama3Instruct70B";
506
+ };
507
+ /** MultiComputeTextOut */
508
+ MultiComputeTextOut: {
509
+ /** @description Response choices. */
510
+ choices: {
511
+ /** @description Text response. */
512
+ text: string;
513
+ }[];
514
+ };
515
+ /** BatchComputeTextIn */
516
+ BatchComputeTextIn: {
517
+ /** @description Batch input prompts. */
518
+ prompts: string[];
519
+ /**
520
+ * Format: float
521
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
522
+ * @default 0.4
523
+ */
524
+ temperature?: number;
525
+ /** @description Maximum number of tokens to generate. */
526
+ max_tokens?: number;
527
+ /**
528
+ * @description Selected model.
529
+ * @default Llama3Instruct8B
530
+ * @enum {string}
531
+ */
532
+ model?: "Mistral7BInstruct" | "Llama3Instruct8B";
533
+ };
534
+ /** BatchComputeTextOut */
535
+ BatchComputeTextOut: {
536
+ /** @description Batch outputs. */
537
+ outputs: {
538
+ /** @description Text response. */
539
+ text: string;
540
+ }[];
541
+ };
542
+ /** MultiComputeJSONIn */
543
+ MultiComputeJSONIn: {
544
+ /** @description Input prompt. */
545
+ prompt: string;
546
+ /** @description JSON schema to guide `json_object` response. */
547
+ json_schema: {
548
+ [key: string]: unknown;
549
+ };
550
+ /**
551
+ * @description Number of choices to generate.
552
+ * @default 2
553
+ */
554
+ num_choices: number;
555
+ /**
556
+ * Format: float
557
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
558
+ * @default 0.4
559
+ */
560
+ temperature?: number;
561
+ /** @description Maximum number of tokens to generate. */
562
+ max_tokens?: number;
563
+ /**
564
+ * @description Selected model.
565
+ * @default Llama3Instruct8B
566
+ * @enum {string}
567
+ */
568
+ model?: "Mistral7BInstruct" | "Mixtral8x7BInstruct" | "Llama3Instruct8B";
569
+ };
570
+ /** MultiComputeJSONOut */
571
+ MultiComputeJSONOut: {
572
+ /** @description Response choices. */
573
+ choices: {
574
+ /** @description JSON response. */
575
+ json_object?: {
576
+ [key: string]: unknown;
577
+ };
578
+ /** @description If the model output could not be parsed to JSON, this is the raw text output. */
579
+ text?: string;
580
+ }[];
581
+ };
582
+ /** BatchComputeJSONIn */
583
+ BatchComputeJSONIn: {
584
+ /** @description Batch input prompts. */
585
+ prompts: string[];
586
+ /** @description JSON schema to guide `json_object` response. */
587
+ json_schema: {
588
+ [key: string]: unknown;
589
+ };
590
+ /**
591
+ * Format: float
592
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
593
+ * @default 0.4
594
+ */
595
+ temperature?: number;
596
+ /** @description Maximum number of tokens to generate. */
597
+ max_tokens?: number;
598
+ /**
599
+ * @description Selected model.
600
+ * @default Llama3Instruct8B
601
+ * @enum {string}
602
+ */
603
+ model?: "Mistral7BInstruct" | "Llama3Instruct8B";
604
+ };
605
+ /** BatchComputeJSONOut */
606
+ BatchComputeJSONOut: {
607
+ /** @description Batch outputs. */
608
+ outputs: {
609
+ /** @description JSON response. */
610
+ json_object?: {
611
+ [key: string]: unknown;
612
+ };
613
+ /** @description If the model output could not be parsed to JSON, this is the raw text output. */
614
+ text?: string;
615
+ }[];
616
+ };
617
+ /** Mistral7BInstructIn */
618
+ Mistral7BInstructIn: {
619
+ /** @description Input prompt. */
620
+ prompt: string;
621
+ /** @description System prompt. */
622
+ system_prompt?: string;
623
+ /**
624
+ * @description Number of choices to generate.
625
+ * @default 1
626
+ */
627
+ num_choices?: number;
628
+ /** @description JSON schema to guide response. */
629
+ json_schema?: {
630
+ [key: string]: unknown;
631
+ };
632
+ /**
633
+ * Format: float
634
+ * @description Higher values make the output more random, lower values make the output more deterministic.
635
+ */
636
+ temperature?: number;
637
+ /**
638
+ * Format: float
639
+ * @description Higher values decrease the likelihood of repeating previous tokens.
640
+ * @default 0
641
+ */
642
+ frequency_penalty?: number;
643
+ /**
644
+ * Format: float
645
+ * @description Higher values decrease the likelihood of repeated sequences.
646
+ * @default 1
647
+ */
648
+ repetition_penalty?: number;
649
+ /**
650
+ * Format: float
651
+ * @description Higher values increase the likelihood of new topics appearing.
652
+ * @default 1.1
653
+ */
654
+ presence_penalty?: number;
655
+ /**
656
+ * Format: float
657
+ * @description Probability below which less likely tokens are filtered out.
658
+ * @default 0.95
659
+ */
660
+ top_p?: number;
661
+ /** @description Maximum number of tokens to generate. */
662
+ max_tokens?: number;
663
+ };
664
+ /** Mistral7BInstructChoice */
665
+ Mistral7BInstructChoice: {
666
+ /** @description Text response, if `json_schema` was not provided. */
667
+ text?: string;
668
+ /** @description JSON response, if `json_schema` was provided. */
669
+ json_object?: {
670
+ [key: string]: unknown;
671
+ };
672
+ };
673
+ /** Mistral7BInstructOut */
674
+ Mistral7BInstructOut: {
675
+ /** @description Response choices. */
676
+ choices: {
677
+ /** @description Text response, if `json_schema` was not provided. */
678
+ text?: string;
679
+ /** @description JSON response, if `json_schema` was provided. */
680
+ json_object?: {
681
+ [key: string]: unknown;
682
+ };
683
+ }[];
684
+ };
685
+ /** Mixtral8x7BInstructIn */
686
+ Mixtral8x7BInstructIn: {
687
+ /** @description Input prompt. */
688
+ prompt: string;
689
+ /** @description System prompt. */
690
+ system_prompt?: string;
691
+ /**
692
+ * @description Number of choices to generate.
693
+ * @default 1
694
+ */
695
+ num_choices?: number;
696
+ /** @description JSON schema to guide response. */
697
+ json_schema?: {
698
+ [key: string]: unknown;
699
+ };
700
+ /**
701
+ * Format: float
702
+ * @description Higher values make the output more random, lower values make the output more deterministic.
703
+ */
704
+ temperature?: number;
705
+ /**
706
+ * Format: float
707
+ * @description Higher values decrease the likelihood of repeating previous tokens.
708
+ * @default 0
709
+ */
710
+ frequency_penalty?: number;
711
+ /**
712
+ * Format: float
713
+ * @description Higher values decrease the likelihood of repeated sequences.
714
+ * @default 1
715
+ */
716
+ repetition_penalty?: number;
717
+ /**
718
+ * Format: float
719
+ * @description Higher values increase the likelihood of new topics appearing.
720
+ * @default 1.1
721
+ */
722
+ presence_penalty?: number;
723
+ /**
724
+ * Format: float
725
+ * @description Probability below which less likely tokens are filtered out.
726
+ * @default 0.95
727
+ */
728
+ top_p?: number;
729
+ /** @description Maximum number of tokens to generate. */
730
+ max_tokens?: number;
731
+ };
732
+ /** Mixtral8x7BChoice */
733
+ Mixtral8x7BChoice: {
734
+ /** @description Text response, if `json_schema` was not provided. */
735
+ text?: string;
736
+ /** @description JSON response, if `json_schema` was provided. */
737
+ json_object?: {
738
+ [key: string]: unknown;
739
+ };
740
+ };
741
+ /** Mixtral8x7BInstructOut */
742
+ Mixtral8x7BInstructOut: {
743
+ /** @description Response choices. */
744
+ choices: {
745
+ /** @description Text response, if `json_schema` was not provided. */
746
+ text?: string;
747
+ /** @description JSON response, if `json_schema` was provided. */
748
+ json_object?: {
749
+ [key: string]: unknown;
750
+ };
751
+ }[];
752
+ };
753
+ /** Llama3Instruct8BIn */
754
+ Llama3Instruct8BIn: {
755
+ /** @description Input prompt. */
756
+ prompt: string;
757
+ /** @description System prompt. */
758
+ system_prompt?: string;
759
+ /**
760
+ * @description Number of choices to generate.
761
+ * @default 1
762
+ */
763
+ num_choices?: number;
764
+ /**
765
+ * Format: float
766
+ * @description Higher values make the output more random, lower values make the output more deterministic.
767
+ */
768
+ temperature?: number;
769
+ /**
770
+ * Format: float
771
+ * @description Higher values decrease the likelihood of repeating previous tokens.
772
+ * @default 0
773
+ */
774
+ frequency_penalty?: number;
775
+ /**
776
+ * Format: float
777
+ * @description Higher values decrease the likelihood of repeated sequences.
778
+ * @default 1
779
+ */
780
+ repetition_penalty?: number;
781
+ /**
782
+ * Format: float
783
+ * @description Higher values increase the likelihood of new topics appearing.
784
+ * @default 1.1
785
+ */
786
+ presence_penalty?: number;
787
+ /**
788
+ * Format: float
789
+ * @description Probability below which less likely tokens are filtered out.
790
+ * @default 0.95
791
+ */
792
+ top_p?: number;
793
+ /** @description Maximum number of tokens to generate. */
794
+ max_tokens?: number;
795
+ /** @description JSON schema to guide response. */
796
+ json_schema?: {
797
+ [key: string]: unknown;
798
+ };
799
+ };
800
+ /** Llama3Instruct8BChoice */
801
+ Llama3Instruct8BChoice: {
802
+ /** @description Text response. */
803
+ text?: string;
804
+ /** @description JSON response, if `json_schema` was provided. */
805
+ json_object?: {
806
+ [key: string]: unknown;
807
+ };
808
+ };
809
+ /** Llama3Instruct8BOut */
810
+ Llama3Instruct8BOut: {
811
+ /** @description Response choices. */
812
+ choices: {
813
+ /** @description Text response. */
814
+ text?: string;
815
+ /** @description JSON response, if `json_schema` was provided. */
816
+ json_object?: {
817
+ [key: string]: unknown;
818
+ };
819
+ }[];
820
+ };
821
+ /** Llama3Instruct70BIn */
822
+ Llama3Instruct70BIn: {
823
+ /** @description Input prompt. */
824
+ prompt: string;
825
+ /** @description System prompt. */
826
+ system_prompt?: string;
827
+ /**
828
+ * @description Number of choices to generate.
829
+ * @default 1
830
+ */
831
+ num_choices?: number;
832
+ /**
833
+ * Format: float
834
+ * @description Higher values make the output more random, lower values make the output more deterministic.
835
+ */
836
+ temperature?: number;
837
+ /**
838
+ * Format: float
839
+ * @description Higher values decrease the likelihood of repeating previous tokens.
840
+ * @default 0
841
+ */
842
+ frequency_penalty?: number;
843
+ /**
844
+ * Format: float
845
+ * @description Higher values decrease the likelihood of repeated sequences.
846
+ * @default 1
847
+ */
848
+ repetition_penalty?: number;
849
+ /**
850
+ * Format: float
851
+ * @description Higher values increase the likelihood of new topics appearing.
852
+ * @default 1.1
853
+ */
854
+ presence_penalty?: number;
855
+ /**
856
+ * Format: float
857
+ * @description Probability below which less likely tokens are filtered out.
858
+ * @default 0.95
859
+ */
860
+ top_p?: number;
861
+ /** @description Maximum number of tokens to generate. */
862
+ max_tokens?: number;
863
+ };
864
+ /** Llama3Instruct70BChoice */
865
+ Llama3Instruct70BChoice: {
866
+ /** @description Text response. */
867
+ text?: string;
868
+ };
869
+ /** Llama3Instruct70BOut */
870
+ Llama3Instruct70BOut: {
871
+ /** @description Response choices. */
872
+ choices: {
873
+ /** @description Text response. */
874
+ text?: string;
875
+ }[];
876
+ };
877
+ /** Firellava13BIn */
878
+ Firellava13BIn: {
879
+ /** @description Text prompt. */
880
+ prompt: string;
881
+ /** @description Image prompts. */
882
+ image_uris: string[];
883
+ /** @description Maximum number of tokens to generate. */
884
+ max_tokens?: number;
885
+ };
886
+ /** Firellava13BOut */
887
+ Firellava13BOut: {
888
+ /** @description Text response. */
889
+ text: string;
890
+ };
891
+ /** GenerateImageIn */
892
+ GenerateImageIn: {
893
+ /** @description Text prompt. */
894
+ prompt: string;
895
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
896
+ store?: string;
897
+ };
898
+ /** GenerateImageOut */
899
+ GenerateImageOut: {
900
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
901
+ image_uri: string;
902
+ };
903
+ /** MultiGenerateImageIn */
904
+ MultiGenerateImageIn: {
905
+ /** @description Text prompt. */
906
+ prompt: string;
907
+ /**
908
+ * @description Number of images to generate.
909
+ * @default 2
910
+ */
911
+ num_images: number;
912
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
913
+ store?: string;
914
+ };
915
+ /** MultiGenerateImageOut */
916
+ MultiGenerateImageOut: {
917
+ /** @description Generated images. */
918
+ outputs: {
919
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
920
+ image_uri: string;
921
+ }[];
922
+ };
923
+ /** StableDiffusionXLIn */
924
+ StableDiffusionXLIn: {
925
+ /** @description Text prompt. */
926
+ prompt: string;
927
+ /** @description Negative input prompt. */
928
+ negative_prompt?: string;
929
+ /**
930
+ * @description Number of diffusion steps.
931
+ * @default 30
932
+ */
933
+ steps?: number;
934
+ /**
935
+ * @description Number of images to generate.
936
+ * @default 1
937
+ */
938
+ num_images: number;
939
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
940
+ store?: string;
941
+ /**
942
+ * @description Height of output image, in pixels.
943
+ * @default 1024
944
+ */
945
+ height?: number;
946
+ /**
947
+ * @description Width of output image, in pixels.
948
+ * @default 1024
949
+ */
950
+ width?: number;
951
+ /** @description Seeds for deterministic generation. Default is a random seed. */
952
+ seeds?: number[];
953
+ /**
954
+ * Format: float
955
+ * @description Higher values adhere to the text prompt more strongly, typically at the expense of image quality.
956
+ * @default 7
957
+ */
958
+ guidance_scale?: number;
959
+ };
960
+ /** StableDiffusionImage */
961
+ StableDiffusionImage: {
962
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
963
+ image_uri: string;
964
+ /** @description The random noise seed used for generation. */
965
+ seed: number;
966
+ };
967
+ /** StableDiffusionXLOut */
968
+ StableDiffusionXLOut: {
969
+ /** @description Generated images. */
970
+ outputs: {
971
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
972
+ image_uri: string;
973
+ /** @description The random noise seed used for generation. */
974
+ seed: number;
975
+ }[];
976
+ };
977
+ /** StableDiffusionXLLightningIn */
978
+ StableDiffusionXLLightningIn: {
979
+ /** @description Text prompt. */
980
+ prompt: string;
981
+ /** @description Negative input prompt. */
982
+ negative_prompt?: string;
983
+ /**
984
+ * @description Number of images to generate.
985
+ * @default 1
986
+ */
987
+ num_images?: number;
988
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
989
+ store?: string;
990
+ /**
991
+ * @description Height of output image, in pixels.
992
+ * @default 1024
993
+ */
994
+ height?: number;
995
+ /**
996
+ * @description Width of output image, in pixels.
997
+ * @default 1024
998
+ */
999
+ width?: number;
1000
+ /** @description Seeds for deterministic generation. Default is a random seed. */
1001
+ seeds?: number[];
1002
+ };
1003
+ /** StableDiffusionXLLightningOut */
1004
+ StableDiffusionXLLightningOut: {
1005
+ /** @description Generated images. */
1006
+ outputs: {
1007
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1008
+ image_uri: string;
1009
+ /** @description The random noise seed used for generation. */
1010
+ seed: number;
1011
+ }[];
1012
+ };
1013
+ /** StableDiffusionXLIPAdapterIn */
1014
+ StableDiffusionXLIPAdapterIn: {
1015
+ /** @description Text prompt. */
1016
+ prompt: string;
1017
+ /** @description Image prompt. */
1018
+ image_prompt_uri: string;
1019
+ /**
1020
+ * @description Number of images to generate.
1021
+ * @default 1
1022
+ */
1023
+ num_images: number;
1024
+ /**
1025
+ * Format: float
1026
+ * @description Controls the influence of the image prompt on the generated output.
1027
+ * @default 0.5
1028
+ */
1029
+ ip_adapter_scale?: number;
1030
+ /** @description Negative input prompt. */
1031
+ negative_prompt?: string;
1032
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1033
+ store?: string;
1034
+ /**
1035
+ * @description Width of output image, in pixels.
1036
+ * @default 1024
1037
+ */
1038
+ width?: number;
1039
+ /**
1040
+ * @description Height of output image, in pixels.
1041
+ * @default 1024
1042
+ */
1043
+ height?: number;
1044
+ /** @description Random noise seeds. Default is random seeds for each generation. */
1045
+ seeds?: number[];
1046
+ };
1047
+ /** StableDiffusionXLIPAdapterOut */
1048
+ StableDiffusionXLIPAdapterOut: {
1049
+ /** @description Generated images. */
1050
+ outputs: {
1051
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1052
+ image_uri: string;
1053
+ /** @description The random noise seed used for generation. */
1054
+ seed: number;
1055
+ }[];
1056
+ };
1057
+ /** StableDiffusionXLControlNetIn */
1058
+ StableDiffusionXLControlNetIn: {
1059
+ /** @description Input image. */
1060
+ image_uri: string;
1061
+ /**
1062
+ * @description Strategy to control generation using the input image.
1063
+ * @enum {string}
1064
+ */
1065
+ control_method: "edge" | "depth" | "illusion" | "tile";
1066
+ /** @description Text prompt. */
1067
+ prompt: string;
1068
+ /**
1069
+ * @description Number of images to generate.
1070
+ * @default 1
1071
+ */
1072
+ num_images: number;
1073
+ /**
1074
+ * @description Resolution of the output image, in pixels.
1075
+ * @default 1024
1076
+ */
1077
+ output_resolution?: number;
1078
+ /** @description Negative input prompt. */
1079
+ negative_prompt?: string;
1080
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1081
+ store?: string;
1082
+ /**
1083
+ * Format: float
1084
+ * @description Controls the influence of the input image on the generated output.
1085
+ * @default 0.5
1086
+ */
1087
+ conditioning_scale?: number;
1088
+ /**
1089
+ * Format: float
1090
+ * @description Controls how much to transform the input image.
1091
+ * @default 0.5
1092
+ */
1093
+ strength?: number;
1094
+ /** @description Random noise seeds. Default is random seeds for each generation. */
1095
+ seeds?: number[];
1096
+ };
1097
+ /** StableDiffusionXLControlNetOut */
1098
+ StableDiffusionXLControlNetOut: {
1099
+ /** @description Generated images. */
1100
+ outputs: {
1101
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1102
+ image_uri: string;
1103
+ /** @description The random noise seed used for generation. */
1104
+ seed: number;
1105
+ }[];
1106
+ };
1107
+ /** StableVideoDiffusionIn */
1108
+ StableVideoDiffusionIn: {
1109
+ /** @description Original image. */
1110
+ image_uri: string;
1111
+ /** @description Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string. */
1112
+ store?: string;
1113
+ /**
1114
+ * @description Output video format.
1115
+ * @default gif
1116
+ * @enum {string}
1117
+ */
1118
+ output_format?: "gif" | "webp" | "mp4" | "frames";
1119
+ /** @description Seed for deterministic generation. Default is a random seed. */
1120
+ seed?: number;
1121
+ /**
1122
+ * @description Frames per second of the generated video. Ignored if output format is `frames`.
1123
+ * @default 7
1124
+ */
1125
+ fps?: number;
1126
+ /**
1127
+ * @description The motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video.
1128
+ * @default 180
1129
+ */
1130
+ motion_bucket_id?: number;
1131
+ /**
1132
+ * Format: float
1133
+ * @description The amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video.
1134
+ * @default 0.1
1135
+ */
1136
+ noise?: number;
1137
+ };
1138
+ /** StableVideoDiffusionOut */
1139
+ StableVideoDiffusionOut: {
1140
+ /** @description Generated video. */
1141
+ video_uri?: string;
1142
+ /** @description Generated frames. */
1143
+ frame_uris?: string[];
1144
+ };
1145
+ /** InterpolateFramesIn */
1146
+ InterpolateFramesIn: {
1147
+ /** @description Frames. */
1148
+ frame_uris: string[];
1149
+ /** @description Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string. */
1150
+ store?: string;
1151
+ /**
1152
+ * @description Output video format.
1153
+ * @default gif
1154
+ * @enum {string}
1155
+ */
1156
+ output_format?: "gif" | "webp" | "mp4" | "frames";
1157
+ /**
1158
+ * @description Frames per second of the generated video. Ignored if output format is `frames`.
1159
+ * @default 7
1160
+ */
1161
+ fps?: number;
1162
+ /**
1163
+ * @description Number of interpolation steps. Each step adds an interpolated frame between adjacent frames. For example, 2 steps over 2 frames produces 5 frames.
1164
+ * @default 2
1165
+ */
1166
+ num_steps?: number;
1167
+ };
1168
+ /** InterpolateFramesOut */
1169
+ InterpolateFramesOut: {
1170
+ /** @description Generated video. */
1171
+ video_uri?: string;
1172
+ /** @description Output frames. */
1173
+ frame_uris?: string[];
1174
+ };
1175
+ /** InpaintImageIn */
1176
+ InpaintImageIn: {
1177
+ /** @description Original image. */
1178
+ image_uri: string;
1179
+ /** @description Text prompt. */
1180
+ prompt: string;
1181
+ /** @description Mask image that controls which pixels are inpainted. If unset, the entire image is edited (image-to-image). */
1182
+ mask_image_uri?: string;
1183
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1184
+ store?: string;
1185
+ };
1186
+ /** InpaintImageOut */
1187
+ InpaintImageOut: {
1188
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1189
+ image_uri: string;
1190
+ };
1191
+ /** MultiInpaintImageIn */
1192
+ MultiInpaintImageIn: {
1193
+ /** @description Original image. */
1194
+ image_uri: string;
1195
+ /** @description Text prompt. */
1196
+ prompt: string;
1197
+ /** @description Mask image that controls which pixels are edited (inpainting). If unset, the entire image is edited (image-to-image). */
1198
+ mask_image_uri?: string;
1199
+ /**
1200
+ * @description Number of images to generate.
1201
+ * @default 2
1202
+ */
1203
+ num_images: number;
1204
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1205
+ store?: string;
1206
+ };
1207
+ /** MultiInpaintImageOut */
1208
+ MultiInpaintImageOut: {
1209
+ /** @description Generated images. */
1210
+ outputs: {
1211
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1212
+ image_uri: string;
1213
+ }[];
1214
+ };
1215
+ /** StableDiffusionXLInpaintIn */
1216
+ StableDiffusionXLInpaintIn: {
1217
+ /** @description Original image. */
1218
+ image_uri: string;
1219
+ /** @description Text prompt. */
1220
+ prompt: string;
1221
+ /** @description Mask image that controls which pixels are edited (inpainting). If unset, the entire image is edited (image-to-image). */
1222
+ mask_image_uri?: string;
1223
+ /**
1224
+ * @description Number of images to generate.
1225
+ * @default 1
1226
+ */
1227
+ num_images: number;
1228
+ /**
1229
+ * @description Resolution of the output image, in pixels.
1230
+ * @default 1024
1231
+ */
1232
+ output_resolution?: number;
1233
+ /** @description Negative input prompt. */
1234
+ negative_prompt?: string;
1235
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1236
+ store?: string;
1237
+ /**
1238
+ * Format: float
1239
+ * @description Controls the strength of the generation process.
1240
+ * @default 0.8
1241
+ */
1242
+ strength?: number;
1243
+ /** @description Random noise seeds. Default is random seeds for each generation. */
1244
+ seeds?: number[];
1245
+ };
1246
+ /** StableDiffusionXLInpaintOut */
1247
+ StableDiffusionXLInpaintOut: {
1248
+ /** @description Generated images. */
1249
+ outputs: {
1250
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1251
+ image_uri: string;
1252
+ /** @description The random noise seed used for generation. */
1253
+ seed: number;
1254
+ }[];
1255
+ };
1256
+ /** BoundingBox */
1257
+ BoundingBox: {
1258
+ /**
1259
+ * Format: float
1260
+ * @description Top left corner x.
1261
+ */
1262
+ x1: number;
1263
+ /**
1264
+ * Format: float
1265
+ * @description Top left corner y.
1266
+ */
1267
+ y1: number;
1268
+ /**
1269
+ * Format: float
1270
+ * @description Bottom right corner x.
1271
+ */
1272
+ x2: number;
1273
+ /**
1274
+ * Format: float
1275
+ * @description Bottom right corner y.
1276
+ */
1277
+ y2: number;
1278
+ };
1279
+ /** Point */
1280
+ Point: {
1281
+ /** @description X position. */
1282
+ x: number;
1283
+ /** @description Y position. */
1284
+ y: number;
1285
+ };
1286
+ /** EraseImageIn */
1287
+ EraseImageIn: {
1288
+ /** @description Input image. */
1289
+ image_uri: string;
1290
+ /** @description Mask image that controls which pixels are inpainted. */
1291
+ mask_image_uri: string;
1292
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1293
+ store?: string;
1294
+ };
1295
+ /** EraseImageOut */
1296
+ EraseImageOut: {
1297
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1298
+ image_uri: string;
1299
+ };
1300
+ /** BigLaMaIn */
1301
+ BigLaMaIn: {
1302
+ /** @description Input image. */
1303
+ image_uri: string;
1304
+ /** @description Mask image that controls which pixels are inpainted. */
1305
+ mask_image_uri: string;
1306
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1307
+ store?: string;
1308
+ };
1309
+ /** BigLaMaOut */
1310
+ BigLaMaOut: {
1311
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1312
+ image_uri: string;
1313
+ };
1314
+ /** RemoveBackgroundIn */
1315
+ RemoveBackgroundIn: {
1316
+ /** @description Input image. */
1317
+ image_uri: string;
1318
+ /**
1319
+ * @description Return a mask image instead of the original content.
1320
+ * @default false
1321
+ */
1322
+ return_mask?: boolean;
1323
+ /**
1324
+ * @description Invert the mask image. Only takes effect if `return_mask` is true.
1325
+ * @default false
1326
+ */
1327
+ invert_mask?: boolean;
1328
+ /** @description Hex value background color. Transparent if unset. */
1329
+ background_color?: string;
1330
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1331
+ store?: string;
1332
+ };
1333
+ /** RemoveBackgroundOut */
1334
+ RemoveBackgroundOut: {
1335
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1336
+ image_uri: string;
1337
+ };
1338
+ /** DISISNetIn */
1339
+ DISISNetIn: {
1340
+ /** @description Input image. */
1341
+ image_uri: string;
1342
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1343
+ store?: string;
1344
+ };
1345
+ /** DISISNetOut */
1346
+ DISISNetOut: {
1347
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1348
+ image_uri: string;
1349
+ };
1350
+ /** UpscaleImageIn */
1351
+ UpscaleImageIn: {
1352
+ /** @description Prompt to guide model on the content of image to upscale. */
1353
+ prompt?: string;
1354
+ /** @description Input image. */
1355
+ image_uri: string;
1356
+ /**
1357
+ * @description Resolution of the output image, in pixels.
1358
+ * @default 1024
1359
+ */
1360
+ output_resolution?: number;
1361
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1362
+ store?: string;
1363
+ };
1364
+ /** UpscaleImageOut */
1365
+ UpscaleImageOut: {
1366
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1367
+ image_uri: string;
1368
+ };
1369
+ /** SegmentUnderPointIn */
1370
+ SegmentUnderPointIn: {
1371
+ /** @description Input image. */
1372
+ image_uri: string;
1373
+ /** Point */
1374
+ point: {
1375
+ /** @description X position. */
1376
+ x: number;
1377
+ /** @description Y position. */
1378
+ y: number;
1379
+ };
1380
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1381
+ store?: string;
1382
+ };
1383
+ /** SegmentUnderPointOut */
1384
+ SegmentUnderPointOut: {
1385
+ /** @description Detected segments in 'mask image' format. Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1386
+ mask_image_uri: string;
1387
+ };
1388
+ /** SegmentAnythingIn */
1389
+ SegmentAnythingIn: {
1390
+ /** @description Input image. */
1391
+ image_uri: string;
1392
+ /** @description Point prompts, to detect a segment under the point. One of `point_prompts` or `box_prompts` must be set. */
1393
+ point_prompts?: {
1394
+ /** @description X position. */
1395
+ x: number;
1396
+ /** @description Y position. */
1397
+ y: number;
1398
+ }[];
1399
+ /** @description Box prompts, to detect a segment within the bounding box. One of `point_prompts` or `box_prompts` must be set. */
1400
+ box_prompts?: {
1401
+ /**
1402
+ * Format: float
1403
+ * @description Top left corner x.
1404
+ */
1405
+ x1: number;
1406
+ /**
1407
+ * Format: float
1408
+ * @description Top left corner y.
1409
+ */
1410
+ y1: number;
1411
+ /**
1412
+ * Format: float
1413
+ * @description Bottom right corner x.
1414
+ */
1415
+ x2: number;
1416
+ /**
1417
+ * Format: float
1418
+ * @description Bottom right corner y.
1419
+ */
1420
+ y2: number;
1421
+ }[];
1422
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
1423
+ store?: string;
1424
+ };
1425
+ /** SegmentAnythingOut */
1426
+ SegmentAnythingOut: {
1427
+ /** @description Detected segments in 'mask image' format. Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
1428
+ mask_image_uri: string;
1429
+ };
1430
+ /** TranscribeSpeechIn */
1431
+ TranscribeSpeechIn: {
1432
+ /** @description Input audio. */
1433
+ audio_uri: string;
1434
+ /** @description Prompt to guide model on the content and context of input audio. */
1435
+ prompt?: string;
1436
+ /**
1437
+ * @description Language of input audio in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format.
1438
+ * @default en
1439
+ */
1440
+ language?: string;
1441
+ /**
1442
+ * @description Segment the text into sentences with approximate timestamps.
1443
+ * @default false
1444
+ */
1445
+ segment?: boolean;
1446
+ /**
1447
+ * @description Align transcription to produce more accurate sentence-level timestamps and word-level timestamps. An array of word segments will be included in each sentence segment.
1448
+ * @default false
1449
+ */
1450
+ align?: boolean;
1451
+ /**
1452
+ * @description Identify speakers for each segment. Speaker IDs will be included in each segment.
1453
+ * @default false
1454
+ */
1455
+ diarize?: boolean;
1456
+ /**
1457
+ * @description Suggest automatic chapter markers.
1458
+ * @default false
1459
+ */
1460
+ suggest_chapters?: boolean;
1461
+ };
1462
+ /** TranscribedWord */
1463
+ TranscribedWord: {
1464
+ /** @description Text of word. */
1465
+ word: string;
1466
+ /**
1467
+ * Format: float
1468
+ * @description Start time of word, in seconds.
1469
+ */
1470
+ start?: number;
1471
+ /**
1472
+ * Format: float
1473
+ * @description End time of word, in seconds.
1474
+ */
1475
+ end?: number;
1476
+ /** @description ID of speaker, if `diarize` is enabled. */
1477
+ speaker?: string;
1478
+ };
1479
+ /** TranscribedSegment */
1480
+ TranscribedSegment: {
1481
+ /** @description Text of segment. */
1482
+ text: string;
1483
+ /**
1484
+ * Format: float
1485
+ * @description Start time of segment, in seconds.
1486
+ */
1487
+ start: number;
1488
+ /**
1489
+ * Format: float
1490
+ * @description End time of segment, in seconds.
1491
+ */
1492
+ end: number;
1493
+ /** @description ID of speaker, if `diarize` is enabled. */
1494
+ speaker?: string;
1495
+ /** @description Aligned words, if `align` is enabled. */
1496
+ words?: {
1497
+ /** @description Text of word. */
1498
+ word: string;
1499
+ /**
1500
+ * Format: float
1501
+ * @description Start time of word, in seconds.
1502
+ */
1503
+ start?: number;
1504
+ /**
1505
+ * Format: float
1506
+ * @description End time of word, in seconds.
1507
+ */
1508
+ end?: number;
1509
+ /** @description ID of speaker, if `diarize` is enabled. */
1510
+ speaker?: string;
1511
+ }[];
1512
+ };
1513
+ /** ChapterMarker */
1514
+ ChapterMarker: {
1515
+ /** @description Chapter title. */
1516
+ title: string;
1517
+ /**
1518
+ * Format: float
1519
+ * @description Start time of chapter, in seconds.
1520
+ */
1521
+ start: number;
1522
+ };
1523
+ /** TranscribeSpeechOut */
1524
+ TranscribeSpeechOut: {
1525
+ /** @description Transcribed text. */
1526
+ text: string;
1527
+ /** @description Transcribed segments, if `segment` is enabled. */
1528
+ segments?: {
1529
+ /** @description Text of segment. */
1530
+ text: string;
1531
+ /**
1532
+ * Format: float
1533
+ * @description Start time of segment, in seconds.
1534
+ */
1535
+ start: number;
1536
+ /**
1537
+ * Format: float
1538
+ * @description End time of segment, in seconds.
1539
+ */
1540
+ end: number;
1541
+ /** @description ID of speaker, if `diarize` is enabled. */
1542
+ speaker?: string;
1543
+ /** @description Aligned words, if `align` is enabled. */
1544
+ words?: {
1545
+ /** @description Text of word. */
1546
+ word: string;
1547
+ /**
1548
+ * Format: float
1549
+ * @description Start time of word, in seconds.
1550
+ */
1551
+ start?: number;
1552
+ /**
1553
+ * Format: float
1554
+ * @description End time of word, in seconds.
1555
+ */
1556
+ end?: number;
1557
+ /** @description ID of speaker, if `diarize` is enabled. */
1558
+ speaker?: string;
1559
+ }[];
1560
+ }[];
1561
+ /** @description Chapter markers, if `suggest_chapters` is enabled. */
1562
+ chapters?: {
1563
+ /** @description Chapter title. */
1564
+ title: string;
1565
+ /**
1566
+ * Format: float
1567
+ * @description Start time of chapter, in seconds.
1568
+ */
1569
+ start: number;
1570
+ }[];
1571
+ };
1572
+ /** GenerateSpeechIn */
1573
+ GenerateSpeechIn: {
1574
+ /** @description Input text. */
1575
+ text: string;
1576
+ /** @description Use "hosted" to return an audio URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the audio data will be returned as a base64-encoded string. */
1577
+ store?: string;
1578
+ };
1579
+ /** GenerateSpeechOut */
1580
+ GenerateSpeechOut: {
1581
+ /** @description Base 64-encoded WAV audio bytes, or a hosted audio url if `store` is provided. */
1582
+ audio_uri: string;
1583
+ };
1584
+ /** XTTSV2In */
1585
+ XTTSV2In: {
1586
+ /** @description Input text. */
1587
+ text: string;
1588
+ /** @description Reference audio used to synthesize the speaker. If unset, a default speaker voice will be used. */
1589
+ audio_uri?: string;
1590
+ /**
1591
+ * @description Language of input text. Supported languages: `en, de, fr, es, it, pt, pl, zh, ar, cs, ru, nl, tr, hu, ko`.
1592
+ * @default en
1593
+ */
1594
+ language?: string;
1595
+ /** @description Use "hosted" to return an audio URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the audio data will be returned as a base64-encoded string. */
1596
+ store?: string;
1597
+ };
1598
+ /** XTTSV2Out */
1599
+ XTTSV2Out: {
1600
+ /** @description Base 64-encoded WAV audio bytes, or a hosted audio url if `store` is provided. */
1601
+ audio_uri: string;
1602
+ };
1603
+ /** Embedding */
1604
+ Embedding: {
1605
+ /** @description Embedding vector. */
1606
+ vector: number[];
1607
+ /** @description Vector store document ID. */
1608
+ doc_id?: string;
1609
+ /** @description Vector store document metadata. */
1610
+ metadata?: {
1611
+ [key: string]: unknown;
1612
+ };
1613
+ };
1614
+ /** EmbedTextIn */
1615
+ EmbedTextIn: {
1616
+ /** @description Text to embed. */
1617
+ text: string;
1618
+ /** @description Vector store name. */
1619
+ collection_name?: string;
1620
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
1621
+ metadata?: {
1622
+ [key: string]: unknown;
1623
+ };
1624
+ /** @description Choose keys from `metadata` to embed with text. */
1625
+ embedded_metadata_keys?: string[];
1626
+ /** @description Vector store document ID. Ignored if `store` is unset. */
1627
+ doc_id?: string;
1628
+ /**
1629
+ * @description Selected embedding model.
1630
+ * @default jina-v2
1631
+ * @enum {string}
1632
+ */
1633
+ model?: "jina-v2" | "clip";
1634
+ };
1635
+ /** EmbedTextOut */
1636
+ EmbedTextOut: {
1637
+ /** Embedding */
1638
+ embedding: {
1639
+ /** @description Embedding vector. */
1640
+ vector: number[];
1641
+ /** @description Vector store document ID. */
1642
+ doc_id?: string;
1643
+ /** @description Vector store document metadata. */
1644
+ metadata?: {
1645
+ [key: string]: unknown;
1646
+ };
1647
+ };
1648
+ };
1649
+ /** EmbedTextItem */
1650
+ EmbedTextItem: {
1651
+ /** @description Text to embed. */
1652
+ text: string;
1653
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
1654
+ metadata?: {
1655
+ [key: string]: unknown;
1656
+ };
1657
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1658
+ doc_id?: string;
1659
+ };
1660
+ /** MultiEmbedTextIn */
1661
+ MultiEmbedTextIn: {
1662
+ /** @description Items to embed. */
1663
+ items: {
1664
+ /** @description Text to embed. */
1665
+ text: string;
1666
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
1667
+ metadata?: {
1668
+ [key: string]: unknown;
1669
+ };
1670
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1671
+ doc_id?: string;
1672
+ }[];
1673
+ /** @description Vector store name. */
1674
+ collection_name?: string;
1675
+ /** @description Choose keys from `metadata` to embed with text. */
1676
+ embedded_metadata_keys?: string[];
1677
+ /**
1678
+ * @description Selected embedding model.
1679
+ * @default jina-v2
1680
+ * @enum {string}
1681
+ */
1682
+ model?: "jina-v2" | "clip";
1683
+ };
1684
+ /** MultiEmbedTextOut */
1685
+ MultiEmbedTextOut: {
1686
+ /** @description Generated embeddings. */
1687
+ embeddings: {
1688
+ /** @description Embedding vector. */
1689
+ vector: number[];
1690
+ /** @description Vector store document ID. */
1691
+ doc_id?: string;
1692
+ /** @description Vector store document metadata. */
1693
+ metadata?: {
1694
+ [key: string]: unknown;
1695
+ };
1696
+ }[];
1697
+ };
1698
+ /** JinaV2In */
1699
+ JinaV2In: {
1700
+ /** @description Items to embed. */
1701
+ items: {
1702
+ /** @description Text to embed. */
1703
+ text: string;
1704
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
1705
+ metadata?: {
1706
+ [key: string]: unknown;
1707
+ };
1708
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1709
+ doc_id?: string;
1710
+ }[];
1711
+ /** @description Vector store name. */
1712
+ collection_name?: string;
1713
+ /** @description Choose keys from `metadata` to embed with text. */
1714
+ embedded_metadata_keys?: string[];
1715
+ };
1716
+ /** JinaV2Out */
1717
+ JinaV2Out: {
1718
+ /** @description Generated embeddings. */
1719
+ embeddings: {
1720
+ /** @description Embedding vector. */
1721
+ vector: number[];
1722
+ /** @description Vector store document ID. */
1723
+ doc_id?: string;
1724
+ /** @description Vector store document metadata. */
1725
+ metadata?: {
1726
+ [key: string]: unknown;
1727
+ };
1728
+ }[];
1729
+ };
1730
+ /** EmbedImageIn */
1731
+ EmbedImageIn: {
1732
+ /** @description Image to embed. */
1733
+ image_uri: string;
1734
+ /** @description Vector store name. */
1735
+ collection_name?: string;
1736
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1737
+ doc_id?: string;
1738
+ /**
1739
+ * @description Selected embedding model.
1740
+ * @default clip
1741
+ * @enum {string}
1742
+ */
1743
+ model?: "clip";
1744
+ };
1745
+ /** EmbedImageOut */
1746
+ EmbedImageOut: {
1747
+ /** Embedding */
1748
+ embedding: {
1749
+ /** @description Embedding vector. */
1750
+ vector: number[];
1751
+ /** @description Vector store document ID. */
1752
+ doc_id?: string;
1753
+ /** @description Vector store document metadata. */
1754
+ metadata?: {
1755
+ [key: string]: unknown;
1756
+ };
1757
+ };
1758
+ };
1759
+ /** EmbedImageItem */
1760
+ EmbedImageItem: {
1761
+ /** @description Image to embed. */
1762
+ image_uri: string;
1763
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1764
+ doc_id?: string;
1765
+ };
1766
+ /** EmbedTextOrImageItem */
1767
+ EmbedTextOrImageItem: {
1768
+ /** @description Image to embed. */
1769
+ image_uri?: string;
1770
+ /** @description Text to embed. */
1771
+ text?: string;
1772
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
1773
+ metadata?: {
1774
+ [key: string]: unknown;
1775
+ };
1776
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1777
+ doc_id?: string;
1778
+ };
1779
+ /** MultiEmbedImageIn */
1780
+ MultiEmbedImageIn: {
1781
+ /** @description Items to embed. */
1782
+ items: {
1783
+ /** @description Image to embed. */
1784
+ image_uri: string;
1785
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1786
+ doc_id?: string;
1787
+ }[];
1788
+ /** @description Vector store name. */
1789
+ collection_name?: string;
1790
+ /**
1791
+ * @description Selected embedding model.
1792
+ * @default clip
1793
+ * @enum {string}
1794
+ */
1795
+ model?: "clip";
1796
+ };
1797
+ /** MultiEmbedImageOut */
1798
+ MultiEmbedImageOut: {
1799
+ /** @description Generated embeddings. */
1800
+ embeddings: {
1801
+ /** @description Embedding vector. */
1802
+ vector: number[];
1803
+ /** @description Vector store document ID. */
1804
+ doc_id?: string;
1805
+ /** @description Vector store document metadata. */
1806
+ metadata?: {
1807
+ [key: string]: unknown;
1808
+ };
1809
+ }[];
1810
+ };
1811
+ /** CLIPIn */
1812
+ CLIPIn: {
1813
+ /** @description Items to embed. */
1814
+ items: {
1815
+ /** @description Image to embed. */
1816
+ image_uri?: string;
1817
+ /** @description Text to embed. */
1818
+ text?: string;
1819
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
1820
+ metadata?: {
1821
+ [key: string]: unknown;
1822
+ };
1823
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
1824
+ doc_id?: string;
1825
+ }[];
1826
+ /** @description Vector store name. */
1827
+ collection_name?: string;
1828
+ /** @description Choose keys from `metadata` to embed with text. Only applies to text items. */
1829
+ embedded_metadata_keys?: string[];
1830
+ };
1831
+ /** CLIPOut */
1832
+ CLIPOut: {
1833
+ /** @description Generated embeddings. */
1834
+ embeddings: {
1835
+ /** @description Embedding vector. */
1836
+ vector: number[];
1837
+ /** @description Vector store document ID. */
1838
+ doc_id?: string;
1839
+ /** @description Vector store document metadata. */
1840
+ metadata?: {
1841
+ [key: string]: unknown;
1842
+ };
1843
+ }[];
1844
+ };
1845
+ /** FindOrCreateVectorStoreIn */
1846
+ FindOrCreateVectorStoreIn: {
1847
+ /** @description Vector store name. */
1848
+ collection_name: string;
1849
+ /**
1850
+ * @description Selected embedding model.
1851
+ * @enum {string}
1852
+ */
1853
+ model: "jina-v2" | "clip";
1854
+ };
1855
+ /** FindOrCreateVectorStoreOut */
1856
+ FindOrCreateVectorStoreOut: {
1857
+ /** @description Vector store name. */
1858
+ collection_name: string;
1859
+ /**
1860
+ * @description Selected embedding model.
1861
+ * @enum {string}
1862
+ */
1863
+ model: "jina-v2" | "clip";
1864
+ /** @description Number of leaves in the vector store. */
1865
+ num_leaves?: number;
1866
+ };
1867
+ /** ListVectorStoresIn */
1868
+ ListVectorStoresIn: Record<string, never>;
1869
+ /** ListVectorStoresOut */
1870
+ ListVectorStoresOut: {
1871
+ /** @description List of vector stores. */
1872
+ items?: {
1873
+ /** @description Vector store name. */
1874
+ collection_name: string;
1875
+ /**
1876
+ * @description Selected embedding model.
1877
+ * @enum {string}
1878
+ */
1879
+ model: "jina-v2" | "clip";
1880
+ /** @description Number of leaves in the vector store. */
1881
+ num_leaves?: number;
1882
+ }[];
1883
+ };
1884
+ /** DeleteVectorStoreIn */
1885
+ DeleteVectorStoreIn: {
1886
+ /** @description Vector store name. */
1887
+ collection_name: string;
1888
+ /**
1889
+ * @description Selected embedding model.
1890
+ * @enum {string}
1891
+ */
1892
+ model: "jina-v2" | "clip";
1893
+ };
1894
+ /** DeleteVectorStoreOut */
1895
+ DeleteVectorStoreOut: {
1896
+ /** @description Vector store name. */
1897
+ collection_name: string;
1898
+ /**
1899
+ * @description Selected embedding model.
1900
+ * @enum {string}
1901
+ */
1902
+ model: "jina-v2" | "clip";
1903
+ };
1904
+ /**
1905
+ * Vector
1906
+ * @description Canonical representation of document with embedding vector.
1907
+ */
1908
+ Vector: {
1909
+ /** @description Document ID. */
1910
+ id: string;
1911
+ /** @description Embedding vector. */
1912
+ vector: number[];
1913
+ /** @description Document metadata. */
1914
+ metadata: {
1915
+ [key: string]: unknown;
1916
+ };
1917
+ };
1918
+ /** FetchVectorsIn */
1919
+ FetchVectorsIn: {
1920
+ /** @description Vector store name. */
1921
+ collection_name: string;
1922
+ /**
1923
+ * @description Selected embedding model.
1924
+ * @enum {string}
1925
+ */
1926
+ model: "jina-v2" | "clip";
1927
+ /** @description Document IDs to retrieve. */
1928
+ ids: string[];
1929
+ };
1930
+ /** FetchVectorsOut */
1931
+ FetchVectorsOut: {
1932
+ /** @description Retrieved vectors. */
1933
+ vectors: {
1934
+ /** @description Document ID. */
1935
+ id: string;
1936
+ /** @description Embedding vector. */
1937
+ vector: number[];
1938
+ /** @description Document metadata. */
1939
+ metadata: {
1940
+ [key: string]: unknown;
1941
+ };
1942
+ }[];
1943
+ };
1944
+ /** UpdateVectorsOut */
1945
+ UpdateVectorsOut: {
1946
+ /** @description Number of vectors modified. */
1947
+ count: number;
1948
+ };
1949
+ /** DeleteVectorsOut */
1950
+ DeleteVectorsOut: {
1951
+ /** @description Number of vectors modified. */
1952
+ count: number;
1953
+ };
1954
+ /** UpdateVectorParams */
1955
+ UpdateVectorParams: {
1956
+ /** @description Document ID. */
1957
+ id: string;
1958
+ /** @description Embedding vector. */
1959
+ vector?: number[];
1960
+ /** @description Document metadata. */
1961
+ metadata?: {
1962
+ [key: string]: unknown;
1963
+ };
1964
+ };
1965
+ /** UpdateVectorsIn */
1966
+ UpdateVectorsIn: {
1967
+ /** @description Vector store name. */
1968
+ collection_name: string;
1969
+ /**
1970
+ * @description Selected embedding model.
1971
+ * @enum {string}
1972
+ */
1973
+ model: "jina-v2" | "clip";
1974
+ /** @description Vectors to upsert. */
1975
+ vectors: {
1976
+ /** @description Document ID. */
1977
+ id: string;
1978
+ /** @description Embedding vector. */
1979
+ vector?: number[];
1980
+ /** @description Document metadata. */
1981
+ metadata?: {
1982
+ [key: string]: unknown;
1983
+ };
1984
+ }[];
1985
+ };
1986
+ /** DeleteVectorsIn */
1987
+ DeleteVectorsIn: {
1988
+ /** @description Vector store name. */
1989
+ collection_name: string;
1990
+ /**
1991
+ * @description Selected embedding model.
1992
+ * @enum {string}
1993
+ */
1994
+ model: "jina-v2" | "clip";
1995
+ /** @description Document IDs to delete. */
1996
+ ids: string[];
1997
+ };
1998
+ /** QueryVectorStoreIn */
1999
+ QueryVectorStoreIn: {
2000
+ /** @description Vector store to query against. */
2001
+ collection_name: string;
2002
+ /**
2003
+ * @description Selected embedding model.
2004
+ * @enum {string}
2005
+ */
2006
+ model: "jina-v2" | "clip";
2007
+ /** @description Texts to embed and use for the query. */
2008
+ query_strings?: string[];
2009
+ /** @description Image URIs to embed and use for the query. */
2010
+ query_image_uris?: string[];
2011
+ /** @description Vectors to use for the query. */
2012
+ query_vectors?: number[][];
2013
+ /** @description Document IDs to use for the query. */
2014
+ query_ids?: string[];
2015
+ /**
2016
+ * @description Number of results to return.
2017
+ * @default 10
2018
+ */
2019
+ top_k?: number;
2020
+ /**
2021
+ * @description The size of the dynamic candidate list for searching the index graph.
2022
+ * @default 40
2023
+ */
2024
+ ef_search?: number;
2025
+ /**
2026
+ * @description The number of leaves in the index tree to search.
2027
+ * @default 40
2028
+ */
2029
+ num_leaves_to_search?: number;
2030
+ /**
2031
+ * @description Include the values of the vectors in the response.
2032
+ * @default false
2033
+ */
2034
+ include_values?: boolean;
2035
+ /**
2036
+ * @description Include the metadata of the vectors in the response.
2037
+ * @default false
2038
+ */
2039
+ include_metadata?: boolean;
2040
+ /** @description Filter metadata by key-value pairs. */
2041
+ filters?: {
2042
+ [key: string]: unknown;
2043
+ };
2044
+ };
2045
+ /** VectorStoreQueryResult */
2046
+ VectorStoreQueryResult: {
2047
+ /** @description Document ID. */
2048
+ id: string;
2049
+ /**
2050
+ * Format: float
2051
+ * @description Similarity score.
2052
+ */
2053
+ distance: number;
2054
+ /** @description Embedding vector. */
2055
+ vector?: number[];
2056
+ /** @description Document metadata. */
2057
+ metadata?: {
2058
+ [key: string]: unknown;
2059
+ };
2060
+ };
2061
+ /** QueryVectorStoreOut */
2062
+ QueryVectorStoreOut: {
2063
+ /** @description Query results. */
2064
+ results: {
2065
+ /** @description Document ID. */
2066
+ id: string;
2067
+ /**
2068
+ * Format: float
2069
+ * @description Similarity score.
2070
+ */
2071
+ distance: number;
2072
+ /** @description Embedding vector. */
2073
+ vector?: number[];
2074
+ /** @description Document metadata. */
2075
+ metadata?: {
2076
+ [key: string]: unknown;
2077
+ };
2078
+ }[][];
2079
+ /** @description Vector store name. */
2080
+ collection_name?: string;
2081
+ /**
2082
+ * @description Selected embedding model.
2083
+ * @enum {string}
2084
+ */
2085
+ model?: "jina-v2" | "clip";
2086
+ };
2087
+ /** SplitDocumentIn */
2088
+ SplitDocumentIn: {
2089
+ /** @description URI of the document. */
2090
+ uri: string;
2091
+ /** @description Document ID. */
2092
+ doc_id?: string;
2093
+ /** @description Document metadata. */
2094
+ metadata?: {
2095
+ [key: string]: unknown;
2096
+ };
2097
+ /** @description Maximum number of units per chunk. Defaults to 1024 tokens for text or 40 lines for code. */
2098
+ chunk_size?: number;
2099
+ /** @description Number of units to overlap between chunks. Defaults to 200 tokens for text or 15 lines for code. */
2100
+ chunk_overlap?: number;
2101
+ };
2102
+ /** SplitDocumentOut */
2103
+ SplitDocumentOut: {
2104
+ /** @description Document chunks */
2105
+ items: {
2106
+ /** @description Text to embed. */
2107
+ text: string;
2108
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
2109
+ metadata?: {
2110
+ [key: string]: unknown;
2111
+ };
2112
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
2113
+ doc_id?: string;
2114
+ }[];
2115
+ };
2116
+ };
2117
+ responses: never;
2118
+ parameters: never;
2119
+ requestBodies: never;
2120
+ headers: never;
2121
+ pathItems: never;
2122
+ }
2123
+
2124
+ export type $defs = Record<string, never>;
2125
+
2126
+ export type external = Record<string, never>;
2127
+
2128
+ export interface operations {
2129
+ /**
2130
+ * Experimental
2131
+ * @description Experimental node.
2132
+ */
2133
+ Experimental: {
2134
+ requestBody?: {
2135
+ content: {
2136
+ /**
2137
+ * @example {
2138
+ * "name": "some_name",
2139
+ * "args": {
2140
+ * "foo": "bar"
2141
+ * }
2142
+ * }
2143
+ */
2144
+ "application/json": {
2145
+ /** @description Identifier. */
2146
+ name: string;
2147
+ /** @description Arguments. */
2148
+ args: {
2149
+ [key: string]: unknown;
2150
+ };
2151
+ /**
2152
+ * @description Timeout in seconds.
2153
+ * @default 60
2154
+ */
2155
+ timeout?: number;
2156
+ };
2157
+ };
2158
+ };
2159
+ responses: {
2160
+ /** @description OK */
2161
+ 200: {
2162
+ content: {
2163
+ "application/json": {
2164
+ /** @description Response. */
2165
+ output: {
2166
+ [key: string]: unknown;
2167
+ };
2168
+ };
2169
+ };
2170
+ };
2171
+ };
2172
+ };
2173
+ /**
2174
+ * Box
2175
+ * @description Combine multiple values into a single output.
2176
+ */
2177
+ Box: {
2178
+ requestBody?: {
2179
+ content: {
2180
+ /**
2181
+ * @example {
2182
+ * "value": {
2183
+ * "a": "b",
2184
+ * "c": {
2185
+ * "d": [
2186
+ * 1,
2187
+ * 2,
2188
+ * 3
2189
+ * ]
2190
+ * }
2191
+ * }
2192
+ * }
2193
+ */
2194
+ "application/json": {
2195
+ /** @description Values to box. */
2196
+ value: unknown;
2197
+ };
2198
+ };
2199
+ };
2200
+ responses: {
2201
+ /** @description OK */
2202
+ 200: {
2203
+ content: {
2204
+ "application/json": {
2205
+ /** @description The evaluated result. */
2206
+ value: unknown;
2207
+ };
2208
+ };
2209
+ };
2210
+ };
2211
+ };
2212
+ /**
2213
+ * If
2214
+ * @description Return one of two options based on a condition.
2215
+ */
2216
+ If: {
2217
+ requestBody?: {
2218
+ content: {
2219
+ /**
2220
+ * @example {
2221
+ * "condition": true,
2222
+ * "value_if_true": "yes",
2223
+ * "value_if_false": "no"
2224
+ * }
2225
+ */
2226
+ "application/json": {
2227
+ /** @description Condition. */
2228
+ condition: boolean;
2229
+ /** @description Result when condition is true. */
2230
+ value_if_true: unknown;
2231
+ /** @description Result when condition is false. */
2232
+ value_if_false?: unknown;
2233
+ };
2234
+ };
2235
+ };
2236
+ responses: {
2237
+ /** @description OK */
2238
+ 200: {
2239
+ content: {
2240
+ "application/json": {
2241
+ /** @description Result. Null if `value_if_false` is not provided and `condition` is false. */
2242
+ result: unknown;
2243
+ };
2244
+ };
2245
+ };
2246
+ };
2247
+ };
2248
+ /**
2249
+ * RunPython
2250
+ * @description Run code using a Python interpreter.
2251
+ */
2252
+ RunPython: {
2253
+ requestBody?: {
2254
+ content: {
2255
+ /**
2256
+ * @example {
2257
+ * "pkl_function": "g2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5SMCF9fbWFpbl9flIwHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5ROjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu",
2258
+ * "kwargs": {},
2259
+ * "pip_install": [
2260
+ * "numpy"
2261
+ * ]
2262
+ * }
2263
+ */
2264
+ "application/json": {
2265
+ /** @description Pickled function. */
2266
+ pkl_function?: string;
2267
+ /** @description Keyword arguments to your function. */
2268
+ kwargs: {
2269
+ [key: string]: unknown;
2270
+ };
2271
+ /** @description Python version. */
2272
+ python_version?: string;
2273
+ /** @description Python packages to install. You must import them in your code. */
2274
+ pip_install?: string[];
2275
+ };
2276
+ };
2277
+ };
2278
+ responses: {
2279
+ /** @description OK */
2280
+ 200: {
2281
+ content: {
2282
+ "application/json": {
2283
+ /** @description Return value of your function. */
2284
+ output?: unknown;
2285
+ /** @description Pickled return value. */
2286
+ pkl_output?: string;
2287
+ /** @description Everything printed to stdout while running your code. */
2288
+ stdout: string;
2289
+ /** @description Contents of stderr if your code did not run successfully. */
2290
+ stderr: string;
2291
+ };
2292
+ };
2293
+ };
2294
+ };
2295
+ };
2296
+ /**
2297
+ * ComputeText
2298
+ * @description Compute text using a language model.
2299
+ */
2300
+ ComputeText: {
2301
+ requestBody?: {
2302
+ content: {
2303
+ /**
2304
+ * @example {
2305
+ * "prompt": "Who is Don Quixote?",
2306
+ * "temperature": 0.4,
2307
+ * "max_tokens": 800
2308
+ * }
2309
+ */
2310
+ "application/json": {
2311
+ /** @description Input prompt. */
2312
+ prompt: string;
2313
+ /** @description Image prompts. */
2314
+ image_uris?: string[];
2315
+ /**
2316
+ * Format: float
2317
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
2318
+ * @default 0.4
2319
+ */
2320
+ temperature?: number;
2321
+ /** @description Maximum number of tokens to generate. */
2322
+ max_tokens?: number;
2323
+ /**
2324
+ * @description Selected model. `Firellava13B` is automatically selected when `image_uris` is provided.
2325
+ * @default Llama3Instruct8B
2326
+ * @enum {string}
2327
+ */
2328
+ model?:
2329
+ | "Mistral7BInstruct"
2330
+ | "Mixtral8x7BInstruct"
2331
+ | "Llama3Instruct8B"
2332
+ | "Llama3Instruct70B"
2333
+ | "Llama3Instruct405B"
2334
+ | "Firellava13B"
2335
+ | "gpt-4o"
2336
+ | "gpt-4o-mini"
2337
+ | "claude-3-5-sonnet-20240620";
2338
+ };
2339
+ };
2340
+ };
2341
+ responses: {
2342
+ /** @description OK */
2343
+ 200: {
2344
+ content: {
2345
+ "application/json": {
2346
+ /** @description Text response. */
2347
+ text: string;
2348
+ };
2349
+ };
2350
+ };
2351
+ };
2352
+ };
2353
+ /**
2354
+ * MultiComputeText
2355
+ * @description Generate multiple text choices using a language model.
2356
+ */
2357
+ MultiComputeText: {
2358
+ requestBody?: {
2359
+ content: {
2360
+ /**
2361
+ * @example {
2362
+ * "prompt": "Who is Don Quixote?",
2363
+ * "num_choices": 2,
2364
+ * "max_tokens": 800
2365
+ * }
2366
+ */
2367
+ "application/json": {
2368
+ /** @description Input prompt. */
2369
+ prompt: string;
2370
+ /**
2371
+ * @description Number of choices to generate.
2372
+ * @default 1
2373
+ */
2374
+ num_choices: number;
2375
+ /**
2376
+ * Format: float
2377
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
2378
+ * @default 0.4
2379
+ */
2380
+ temperature?: number;
2381
+ /** @description Maximum number of tokens to generate. */
2382
+ max_tokens?: number;
2383
+ /**
2384
+ * @description Selected model.
2385
+ * @default Llama3Instruct8B
2386
+ * @enum {string}
2387
+ */
2388
+ model?:
2389
+ | "Mistral7BInstruct"
2390
+ | "Mixtral8x7BInstruct"
2391
+ | "Llama3Instruct8B"
2392
+ | "Llama3Instruct70B";
2393
+ };
2394
+ };
2395
+ };
2396
+ responses: {
2397
+ /** @description OK */
2398
+ 200: {
2399
+ content: {
2400
+ "application/json": {
2401
+ /** @description Response choices. */
2402
+ choices: {
2403
+ /** @description Text response. */
2404
+ text: string;
2405
+ }[];
2406
+ };
2407
+ };
2408
+ };
2409
+ };
2410
+ };
2411
+ /**
2412
+ * BatchComputeText
2413
+ * @description Compute text for multiple prompts in batch using a language model.
2414
+ */
2415
+ BatchComputeText: {
2416
+ requestBody?: {
2417
+ content: {
2418
+ /**
2419
+ * @example {
2420
+ * "prompts": [
2421
+ * "Who is Don Quixote?",
2422
+ * "Who is Sancho Panza?"
2423
+ * ],
2424
+ * "max_tokens": 800
2425
+ * }
2426
+ */
2427
+ "application/json": {
2428
+ /** @description Batch input prompts. */
2429
+ prompts: string[];
2430
+ /**
2431
+ * Format: float
2432
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
2433
+ * @default 0.4
2434
+ */
2435
+ temperature?: number;
2436
+ /** @description Maximum number of tokens to generate. */
2437
+ max_tokens?: number;
2438
+ /**
2439
+ * @description Selected model.
2440
+ * @default Llama3Instruct8B
2441
+ * @enum {string}
2442
+ */
2443
+ model?: "Mistral7BInstruct" | "Llama3Instruct8B";
2444
+ };
2445
+ };
2446
+ };
2447
+ responses: {
2448
+ /** @description OK */
2449
+ 200: {
2450
+ content: {
2451
+ "application/json": {
2452
+ /** @description Batch outputs. */
2453
+ outputs: {
2454
+ /** @description Text response. */
2455
+ text: string;
2456
+ }[];
2457
+ };
2458
+ };
2459
+ };
2460
+ };
2461
+ };
2462
+ /**
2463
+ * BatchComputeJSON
2464
+ * @description Compute JSON for multiple prompts in batch using a language model.
2465
+ */
2466
+ BatchComputeJSON: {
2467
+ requestBody?: {
2468
+ content: {
2469
+ /**
2470
+ * @example {
2471
+ * "prompts": [
2472
+ * "Who is Don Quixote?",
2473
+ * "Who is Sancho Panza?"
2474
+ * ],
2475
+ * "max_tokens": 800,
2476
+ * "json_schema": {
2477
+ * "type": "object",
2478
+ * "properties": {
2479
+ * "name": {
2480
+ * "type": "string",
2481
+ * "description": "The name of the character."
2482
+ * },
2483
+ * "bio": {
2484
+ * "type": "string",
2485
+ * "description": "Concise biography of the character."
2486
+ * }
2487
+ * }
2488
+ * }
2489
+ * }
2490
+ */
2491
+ "application/json": {
2492
+ /** @description Batch input prompts. */
2493
+ prompts: string[];
2494
+ /** @description JSON schema to guide `json_object` response. */
2495
+ json_schema: {
2496
+ [key: string]: unknown;
2497
+ };
2498
+ /**
2499
+ * Format: float
2500
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
2501
+ * @default 0.4
2502
+ */
2503
+ temperature?: number;
2504
+ /** @description Maximum number of tokens to generate. */
2505
+ max_tokens?: number;
2506
+ /**
2507
+ * @description Selected model.
2508
+ * @default Llama3Instruct8B
2509
+ * @enum {string}
2510
+ */
2511
+ model?: "Mistral7BInstruct" | "Llama3Instruct8B";
2512
+ };
2513
+ };
2514
+ };
2515
+ responses: {
2516
+ /** @description OK */
2517
+ 200: {
2518
+ content: {
2519
+ "application/json": {
2520
+ /** @description Batch outputs. */
2521
+ outputs: {
2522
+ /** @description JSON response. */
2523
+ json_object?: {
2524
+ [key: string]: unknown;
2525
+ };
2526
+ /** @description If the model output could not be parsed to JSON, this is the raw text output. */
2527
+ text?: string;
2528
+ }[];
2529
+ };
2530
+ };
2531
+ };
2532
+ };
2533
+ };
2534
+ /**
2535
+ * ComputeJSON
2536
+ * @description Compute JSON using a language model.
2537
+ */
2538
+ ComputeJSON: {
2539
+ requestBody?: {
2540
+ content: {
2541
+ /**
2542
+ * @example {
2543
+ * "prompt": "Who wrote Don Quixote?",
2544
+ * "json_schema": {
2545
+ * "type": "object",
2546
+ * "properties": {
2547
+ * "name": {
2548
+ * "type": "string",
2549
+ * "description": "The name of the author."
2550
+ * },
2551
+ * "bio": {
2552
+ * "type": "string",
2553
+ * "description": "Concise biography of the author."
2554
+ * }
2555
+ * }
2556
+ * },
2557
+ * "temperature": 0.4,
2558
+ * "max_tokens": 800
2559
+ * }
2560
+ */
2561
+ "application/json": {
2562
+ /** @description Input prompt. */
2563
+ prompt: string;
2564
+ /** @description JSON schema to guide `json_object` response. */
2565
+ json_schema: {
2566
+ [key: string]: unknown;
2567
+ };
2568
+ /**
2569
+ * Format: float
2570
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
2571
+ * @default 0.4
2572
+ */
2573
+ temperature?: number;
2574
+ /** @description Maximum number of tokens to generate. */
2575
+ max_tokens?: number;
2576
+ /**
2577
+ * @description Selected model.
2578
+ * @default Llama3Instruct8B
2579
+ * @enum {string}
2580
+ */
2581
+ model?:
2582
+ | "Mistral7BInstruct"
2583
+ | "Mixtral8x7BInstruct"
2584
+ | "Llama3Instruct8B";
2585
+ };
2586
+ };
2587
+ };
2588
+ responses: {
2589
+ /** @description OK */
2590
+ 200: {
2591
+ content: {
2592
+ "application/json": {
2593
+ /** @description JSON response. */
2594
+ json_object?: {
2595
+ [key: string]: unknown;
2596
+ };
2597
+ /** @description If the model output could not be parsed to JSON, this is the raw text output. */
2598
+ text?: string;
2599
+ };
2600
+ };
2601
+ };
2602
+ };
2603
+ };
2604
+ /**
2605
+ * MultiComputeJSON
2606
+ * @description Compute multiple JSON choices using a language model.
2607
+ */
2608
+ MultiComputeJSON: {
2609
+ requestBody?: {
2610
+ content: {
2611
+ /**
2612
+ * @example {
2613
+ * "prompt": "Who wrote Don Quixote?",
2614
+ * "json_schema": {
2615
+ * "type": "object",
2616
+ * "properties": {
2617
+ * "name": {
2618
+ * "type": "string",
2619
+ * "description": "The name of the author."
2620
+ * },
2621
+ * "bio": {
2622
+ * "type": "string",
2623
+ * "description": "Concise biography of the author."
2624
+ * }
2625
+ * }
2626
+ * },
2627
+ * "num_choices": 2,
2628
+ * "temperature": 0.4,
2629
+ * "max_tokens": 800
2630
+ * }
2631
+ */
2632
+ "application/json": {
2633
+ /** @description Input prompt. */
2634
+ prompt: string;
2635
+ /** @description JSON schema to guide `json_object` response. */
2636
+ json_schema: {
2637
+ [key: string]: unknown;
2638
+ };
2639
+ /**
2640
+ * @description Number of choices to generate.
2641
+ * @default 2
2642
+ */
2643
+ num_choices: number;
2644
+ /**
2645
+ * Format: float
2646
+ * @description Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic.
2647
+ * @default 0.4
2648
+ */
2649
+ temperature?: number;
2650
+ /** @description Maximum number of tokens to generate. */
2651
+ max_tokens?: number;
2652
+ /**
2653
+ * @description Selected model.
2654
+ * @default Llama3Instruct8B
2655
+ * @enum {string}
2656
+ */
2657
+ model?:
2658
+ | "Mistral7BInstruct"
2659
+ | "Mixtral8x7BInstruct"
2660
+ | "Llama3Instruct8B";
2661
+ };
2662
+ };
2663
+ };
2664
+ responses: {
2665
+ /** @description OK */
2666
+ 200: {
2667
+ content: {
2668
+ "application/json": {
2669
+ /** @description Response choices. */
2670
+ choices: {
2671
+ /** @description JSON response. */
2672
+ json_object?: {
2673
+ [key: string]: unknown;
2674
+ };
2675
+ /** @description If the model output could not be parsed to JSON, this is the raw text output. */
2676
+ text?: string;
2677
+ }[];
2678
+ };
2679
+ };
2680
+ };
2681
+ };
2682
+ };
2683
+ /**
2684
+ * Mistral7BInstruct
2685
+ * @description Compute text using [Mistral 7B Instruct](https://mistral.ai/news/announcing-mistral-7b).
2686
+ */
2687
+ Mistral7BInstruct: {
2688
+ requestBody?: {
2689
+ content: {
2690
+ /**
2691
+ * @example {
2692
+ * "prompt": "Who is Don Quixote?",
2693
+ * "num_choices": 2,
2694
+ * "temperature": 0.4,
2695
+ * "max_tokens": 800
2696
+ * }
2697
+ */
2698
+ "application/json": {
2699
+ /** @description Input prompt. */
2700
+ prompt: string;
2701
+ /** @description System prompt. */
2702
+ system_prompt?: string;
2703
+ /**
2704
+ * @description Number of choices to generate.
2705
+ * @default 1
2706
+ */
2707
+ num_choices?: number;
2708
+ /** @description JSON schema to guide response. */
2709
+ json_schema?: {
2710
+ [key: string]: unknown;
2711
+ };
2712
+ /**
2713
+ * Format: float
2714
+ * @description Higher values make the output more random, lower values make the output more deterministic.
2715
+ */
2716
+ temperature?: number;
2717
+ /**
2718
+ * Format: float
2719
+ * @description Higher values decrease the likelihood of repeating previous tokens.
2720
+ * @default 0
2721
+ */
2722
+ frequency_penalty?: number;
2723
+ /**
2724
+ * Format: float
2725
+ * @description Higher values decrease the likelihood of repeated sequences.
2726
+ * @default 1
2727
+ */
2728
+ repetition_penalty?: number;
2729
+ /**
2730
+ * Format: float
2731
+ * @description Higher values increase the likelihood of new topics appearing.
2732
+ * @default 1.1
2733
+ */
2734
+ presence_penalty?: number;
2735
+ /**
2736
+ * Format: float
2737
+ * @description Probability below which less likely tokens are filtered out.
2738
+ * @default 0.95
2739
+ */
2740
+ top_p?: number;
2741
+ /** @description Maximum number of tokens to generate. */
2742
+ max_tokens?: number;
2743
+ };
2744
+ };
2745
+ };
2746
+ responses: {
2747
+ /** @description OK */
2748
+ 200: {
2749
+ content: {
2750
+ "application/json": {
2751
+ /** @description Response choices. */
2752
+ choices: {
2753
+ /** @description Text response, if `json_schema` was not provided. */
2754
+ text?: string;
2755
+ /** @description JSON response, if `json_schema` was provided. */
2756
+ json_object?: {
2757
+ [key: string]: unknown;
2758
+ };
2759
+ }[];
2760
+ };
2761
+ };
2762
+ };
2763
+ };
2764
+ };
2765
+ /**
2766
+ * Mixtral8x7BInstruct
2767
+ * @description Compute text using instruct-tuned [Mixtral 8x7B](https://mistral.ai/news/mixtral-of-experts/).
2768
+ */
2769
+ Mixtral8x7BInstruct: {
2770
+ requestBody?: {
2771
+ content: {
2772
+ /**
2773
+ * @example {
2774
+ * "prompt": "Who is Don Quixote?",
2775
+ * "num_choices": 2,
2776
+ * "temperature": 0.4,
2777
+ * "max_tokens": 800
2778
+ * }
2779
+ */
2780
+ "application/json": {
2781
+ /** @description Input prompt. */
2782
+ prompt: string;
2783
+ /** @description System prompt. */
2784
+ system_prompt?: string;
2785
+ /**
2786
+ * @description Number of choices to generate.
2787
+ * @default 1
2788
+ */
2789
+ num_choices?: number;
2790
+ /** @description JSON schema to guide response. */
2791
+ json_schema?: {
2792
+ [key: string]: unknown;
2793
+ };
2794
+ /**
2795
+ * Format: float
2796
+ * @description Higher values make the output more random, lower values make the output more deterministic.
2797
+ */
2798
+ temperature?: number;
2799
+ /**
2800
+ * Format: float
2801
+ * @description Higher values decrease the likelihood of repeating previous tokens.
2802
+ * @default 0
2803
+ */
2804
+ frequency_penalty?: number;
2805
+ /**
2806
+ * Format: float
2807
+ * @description Higher values decrease the likelihood of repeated sequences.
2808
+ * @default 1
2809
+ */
2810
+ repetition_penalty?: number;
2811
+ /**
2812
+ * Format: float
2813
+ * @description Higher values increase the likelihood of new topics appearing.
2814
+ * @default 1.1
2815
+ */
2816
+ presence_penalty?: number;
2817
+ /**
2818
+ * Format: float
2819
+ * @description Probability below which less likely tokens are filtered out.
2820
+ * @default 0.95
2821
+ */
2822
+ top_p?: number;
2823
+ /** @description Maximum number of tokens to generate. */
2824
+ max_tokens?: number;
2825
+ };
2826
+ };
2827
+ };
2828
+ responses: {
2829
+ /** @description OK */
2830
+ 200: {
2831
+ content: {
2832
+ "application/json": {
2833
+ /** @description Response choices. */
2834
+ choices: {
2835
+ /** @description Text response, if `json_schema` was not provided. */
2836
+ text?: string;
2837
+ /** @description JSON response, if `json_schema` was provided. */
2838
+ json_object?: {
2839
+ [key: string]: unknown;
2840
+ };
2841
+ }[];
2842
+ };
2843
+ };
2844
+ };
2845
+ };
2846
+ };
2847
+ /**
2848
+ * Llama3Instruct8B
2849
+ * @description Compute text using instruct-tuned [Llama 3 8B](https://llama.meta.com/llama3/).
2850
+ */
2851
+ Llama3Instruct8B: {
2852
+ requestBody?: {
2853
+ content: {
2854
+ /**
2855
+ * @example {
2856
+ * "prompt": "Who is Don Quixote?",
2857
+ * "num_choices": 2,
2858
+ * "temperature": 0.4,
2859
+ * "max_tokens": 800
2860
+ * }
2861
+ */
2862
+ "application/json": {
2863
+ /** @description Input prompt. */
2864
+ prompt: string;
2865
+ /** @description System prompt. */
2866
+ system_prompt?: string;
2867
+ /**
2868
+ * @description Number of choices to generate.
2869
+ * @default 1
2870
+ */
2871
+ num_choices?: number;
2872
+ /**
2873
+ * Format: float
2874
+ * @description Higher values make the output more random, lower values make the output more deterministic.
2875
+ */
2876
+ temperature?: number;
2877
+ /**
2878
+ * Format: float
2879
+ * @description Higher values decrease the likelihood of repeating previous tokens.
2880
+ * @default 0
2881
+ */
2882
+ frequency_penalty?: number;
2883
+ /**
2884
+ * Format: float
2885
+ * @description Higher values decrease the likelihood of repeated sequences.
2886
+ * @default 1
2887
+ */
2888
+ repetition_penalty?: number;
2889
+ /**
2890
+ * Format: float
2891
+ * @description Higher values increase the likelihood of new topics appearing.
2892
+ * @default 1.1
2893
+ */
2894
+ presence_penalty?: number;
2895
+ /**
2896
+ * Format: float
2897
+ * @description Probability below which less likely tokens are filtered out.
2898
+ * @default 0.95
2899
+ */
2900
+ top_p?: number;
2901
+ /** @description Maximum number of tokens to generate. */
2902
+ max_tokens?: number;
2903
+ /** @description JSON schema to guide response. */
2904
+ json_schema?: {
2905
+ [key: string]: unknown;
2906
+ };
2907
+ };
2908
+ };
2909
+ };
2910
+ responses: {
2911
+ /** @description OK */
2912
+ 200: {
2913
+ content: {
2914
+ "application/json": {
2915
+ /** @description Response choices. */
2916
+ choices: {
2917
+ /** @description Text response. */
2918
+ text?: string;
2919
+ /** @description JSON response, if `json_schema` was provided. */
2920
+ json_object?: {
2921
+ [key: string]: unknown;
2922
+ };
2923
+ }[];
2924
+ };
2925
+ };
2926
+ };
2927
+ };
2928
+ };
2929
+ /**
2930
+ * Llama3Instruct70B
2931
+ * @description Compute text using instruct-tuned [Llama 3 70B](https://llama.meta.com/llama3/).
2932
+ */
2933
+ Llama3Instruct70B: {
2934
+ requestBody?: {
2935
+ content: {
2936
+ /**
2937
+ * @example {
2938
+ * "prompt": "Who is Don Quixote?",
2939
+ * "num_choices": 2,
2940
+ * "temperature": 0.4,
2941
+ * "max_tokens": 800
2942
+ * }
2943
+ */
2944
+ "application/json": {
2945
+ /** @description Input prompt. */
2946
+ prompt: string;
2947
+ /** @description System prompt. */
2948
+ system_prompt?: string;
2949
+ /**
2950
+ * @description Number of choices to generate.
2951
+ * @default 1
2952
+ */
2953
+ num_choices?: number;
2954
+ /**
2955
+ * Format: float
2956
+ * @description Higher values make the output more random, lower values make the output more deterministic.
2957
+ */
2958
+ temperature?: number;
2959
+ /**
2960
+ * Format: float
2961
+ * @description Higher values decrease the likelihood of repeating previous tokens.
2962
+ * @default 0
2963
+ */
2964
+ frequency_penalty?: number;
2965
+ /**
2966
+ * Format: float
2967
+ * @description Higher values decrease the likelihood of repeated sequences.
2968
+ * @default 1
2969
+ */
2970
+ repetition_penalty?: number;
2971
+ /**
2972
+ * Format: float
2973
+ * @description Higher values increase the likelihood of new topics appearing.
2974
+ * @default 1.1
2975
+ */
2976
+ presence_penalty?: number;
2977
+ /**
2978
+ * Format: float
2979
+ * @description Probability below which less likely tokens are filtered out.
2980
+ * @default 0.95
2981
+ */
2982
+ top_p?: number;
2983
+ /** @description Maximum number of tokens to generate. */
2984
+ max_tokens?: number;
2985
+ };
2986
+ };
2987
+ };
2988
+ responses: {
2989
+ /** @description OK */
2990
+ 200: {
2991
+ content: {
2992
+ "application/json": {
2993
+ /** @description Response choices. */
2994
+ choices: {
2995
+ /** @description Text response. */
2996
+ text?: string;
2997
+ }[];
2998
+ };
2999
+ };
3000
+ };
3001
+ };
3002
+ };
3003
+ /**
3004
+ * Firellava13B
3005
+ * @description Compute text with image input using [FireLLaVA 13B](https://fireworks.ai/blog/firellava-the-first-commercially-permissive-oss-llava-model).
3006
+ */
3007
+ Firellava13B: {
3008
+ requestBody?: {
3009
+ content: {
3010
+ /**
3011
+ * @example {
3012
+ * "prompt": "what are these paintings of and who made them?",
3013
+ * "image_uris": [
3014
+ * "https://media.substrate.run/docs-fuji-red.jpg",
3015
+ * "https://media.substrate.run/docs-fuji-blue.jpg"
3016
+ * ]
3017
+ * }
3018
+ */
3019
+ "application/json": {
3020
+ /** @description Text prompt. */
3021
+ prompt: string;
3022
+ /** @description Image prompts. */
3023
+ image_uris: string[];
3024
+ /** @description Maximum number of tokens to generate. */
3025
+ max_tokens?: number;
3026
+ };
3027
+ };
3028
+ };
3029
+ responses: {
3030
+ /** @description OK */
3031
+ 200: {
3032
+ content: {
3033
+ "application/json": {
3034
+ /** @description Text response. */
3035
+ text: string;
3036
+ };
3037
+ };
3038
+ };
3039
+ };
3040
+ };
3041
+ /**
3042
+ * GenerateImage
3043
+ * @description Generate an image.
3044
+ */
3045
+ GenerateImage: {
3046
+ requestBody?: {
3047
+ content: {
3048
+ /**
3049
+ * @example {
3050
+ * "prompt": "hokusai futuristic supercell spiral cloud with glowing core over turbulent ocean",
3051
+ * "store": "hosted"
3052
+ * }
3053
+ */
3054
+ "application/json": {
3055
+ /** @description Text prompt. */
3056
+ prompt: string;
3057
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3058
+ store?: string;
3059
+ };
3060
+ };
3061
+ };
3062
+ responses: {
3063
+ /** @description OK */
3064
+ 200: {
3065
+ content: {
3066
+ "application/json": {
3067
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3068
+ image_uri: string;
3069
+ };
3070
+ };
3071
+ };
3072
+ };
3073
+ };
3074
+ /**
3075
+ * MultiGenerateImage
3076
+ * @description Generate multiple images.
3077
+ */
3078
+ MultiGenerateImage: {
3079
+ requestBody?: {
3080
+ content: {
3081
+ /**
3082
+ * @example {
3083
+ * "prompt": "hokusai futuristic supercell spiral cloud with glowing core over turbulent ocean",
3084
+ * "num_images": 2,
3085
+ * "store": "hosted"
3086
+ * }
3087
+ */
3088
+ "application/json": {
3089
+ /** @description Text prompt. */
3090
+ prompt: string;
3091
+ /**
3092
+ * @description Number of images to generate.
3093
+ * @default 2
3094
+ */
3095
+ num_images: number;
3096
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3097
+ store?: string;
3098
+ };
3099
+ };
3100
+ };
3101
+ responses: {
3102
+ /** @description OK */
3103
+ 200: {
3104
+ content: {
3105
+ "application/json": {
3106
+ /** @description Generated images. */
3107
+ outputs: {
3108
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3109
+ image_uri: string;
3110
+ }[];
3111
+ };
3112
+ };
3113
+ };
3114
+ };
3115
+ };
3116
+ /**
3117
+ * InpaintImage
3118
+ * @description Edit an image using image generation inside part of the image or the full image.
3119
+ */
3120
+ InpaintImage: {
3121
+ requestBody?: {
3122
+ content: {
3123
+ /**
3124
+ * @example {
3125
+ * "image_uri": "https://media.substrate.run/docs-klimt-park.jpg",
3126
+ * "mask_image_uri": "https://media.substrate.run/spiral-logo.jpeg",
3127
+ * "prompt": "large tropical colorful bright anime birds in a dark jungle full of vines, high resolution",
3128
+ * "store": "hosted"
3129
+ * }
3130
+ */
3131
+ "application/json": {
3132
+ /** @description Original image. */
3133
+ image_uri: string;
3134
+ /** @description Text prompt. */
3135
+ prompt: string;
3136
+ /** @description Mask image that controls which pixels are inpainted. If unset, the entire image is edited (image-to-image). */
3137
+ mask_image_uri?: string;
3138
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3139
+ store?: string;
3140
+ };
3141
+ };
3142
+ };
3143
+ responses: {
3144
+ /** @description OK */
3145
+ 200: {
3146
+ content: {
3147
+ "application/json": {
3148
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3149
+ image_uri: string;
3150
+ };
3151
+ };
3152
+ };
3153
+ };
3154
+ };
3155
+ /**
3156
+ * MultiInpaintImage
3157
+ * @description Edit multiple images using image generation.
3158
+ */
3159
+ MultiInpaintImage: {
3160
+ requestBody?: {
3161
+ content: {
3162
+ /**
3163
+ * @example {
3164
+ * "image_uri": "https://media.substrate.run/docs-klimt-park.jpg",
3165
+ * "mask_image_uri": "https://media.substrate.run/spiral-logo.jpeg",
3166
+ * "prompt": "large tropical colorful bright anime birds in a dark jungle full of vines, high resolution",
3167
+ * "num_images": 2,
3168
+ * "store": "hosted"
3169
+ * }
3170
+ */
3171
+ "application/json": {
3172
+ /** @description Original image. */
3173
+ image_uri: string;
3174
+ /** @description Text prompt. */
3175
+ prompt: string;
3176
+ /** @description Mask image that controls which pixels are edited (inpainting). If unset, the entire image is edited (image-to-image). */
3177
+ mask_image_uri?: string;
3178
+ /**
3179
+ * @description Number of images to generate.
3180
+ * @default 2
3181
+ */
3182
+ num_images: number;
3183
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3184
+ store?: string;
3185
+ };
3186
+ };
3187
+ };
3188
+ responses: {
3189
+ /** @description OK */
3190
+ 200: {
3191
+ content: {
3192
+ "application/json": {
3193
+ /** @description Generated images. */
3194
+ outputs: {
3195
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3196
+ image_uri: string;
3197
+ }[];
3198
+ };
3199
+ };
3200
+ };
3201
+ };
3202
+ };
3203
+ /**
3204
+ * StableDiffusionXLLightning
3205
+ * @description Generate an image using [Stable Diffusion XL Lightning](https://arxiv.org/abs/2402.13929).
3206
+ */
3207
+ StableDiffusionXLLightning: {
3208
+ requestBody?: {
3209
+ content: {
3210
+ /**
3211
+ * @example {
3212
+ * "prompt": "hokusai futuristic supercell spiral cloud with glowing core over turbulent ocean",
3213
+ * "negative_prompt": "night, moon",
3214
+ * "num_images": 2,
3215
+ * "seeds": [
3216
+ * 330699,
3217
+ * 136464
3218
+ * ],
3219
+ * "store": "hosted"
3220
+ * }
3221
+ */
3222
+ "application/json": {
3223
+ /** @description Text prompt. */
3224
+ prompt: string;
3225
+ /** @description Negative input prompt. */
3226
+ negative_prompt?: string;
3227
+ /**
3228
+ * @description Number of images to generate.
3229
+ * @default 1
3230
+ */
3231
+ num_images?: number;
3232
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3233
+ store?: string;
3234
+ /**
3235
+ * @description Height of output image, in pixels.
3236
+ * @default 1024
3237
+ */
3238
+ height?: number;
3239
+ /**
3240
+ * @description Width of output image, in pixels.
3241
+ * @default 1024
3242
+ */
3243
+ width?: number;
3244
+ /** @description Seeds for deterministic generation. Default is a random seed. */
3245
+ seeds?: number[];
3246
+ };
3247
+ };
3248
+ };
3249
+ responses: {
3250
+ /** @description OK */
3251
+ 200: {
3252
+ content: {
3253
+ "application/json": {
3254
+ /** @description Generated images. */
3255
+ outputs: {
3256
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3257
+ image_uri: string;
3258
+ /** @description The random noise seed used for generation. */
3259
+ seed: number;
3260
+ }[];
3261
+ };
3262
+ };
3263
+ };
3264
+ };
3265
+ };
3266
+ /**
3267
+ * StableDiffusionXLInpaint
3268
+ * @description Edit an image using [Stable Diffusion XL](https://arxiv.org/abs/2307.01952). Supports inpainting (edit part of the image with a mask) and image-to-image (edit the full image).
3269
+ */
3270
+ StableDiffusionXLInpaint: {
3271
+ requestBody?: {
3272
+ content: {
3273
+ /**
3274
+ * @example {
3275
+ * "image_uri": "https://media.substrate.run/docs-klimt-park.jpg",
3276
+ * "mask_image_uri": "https://media.substrate.run/spiral-logo.jpeg",
3277
+ * "prompt": "large tropical colorful bright birds in a jungle, high resolution oil painting",
3278
+ * "negative_prompt": "dark, cartoon, anime",
3279
+ * "strength": 0.8,
3280
+ * "num_images": 2,
3281
+ * "store": "hosted",
3282
+ * "seeds": [
3283
+ * 1607280,
3284
+ * 1720395
3285
+ * ]
3286
+ * }
3287
+ */
3288
+ "application/json": {
3289
+ /** @description Original image. */
3290
+ image_uri: string;
3291
+ /** @description Text prompt. */
3292
+ prompt: string;
3293
+ /** @description Mask image that controls which pixels are edited (inpainting). If unset, the entire image is edited (image-to-image). */
3294
+ mask_image_uri?: string;
3295
+ /**
3296
+ * @description Number of images to generate.
3297
+ * @default 1
3298
+ */
3299
+ num_images: number;
3300
+ /**
3301
+ * @description Resolution of the output image, in pixels.
3302
+ * @default 1024
3303
+ */
3304
+ output_resolution?: number;
3305
+ /** @description Negative input prompt. */
3306
+ negative_prompt?: string;
3307
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3308
+ store?: string;
3309
+ /**
3310
+ * Format: float
3311
+ * @description Controls the strength of the generation process.
3312
+ * @default 0.8
3313
+ */
3314
+ strength?: number;
3315
+ /** @description Random noise seeds. Default is random seeds for each generation. */
3316
+ seeds?: number[];
3317
+ };
3318
+ };
3319
+ };
3320
+ responses: {
3321
+ /** @description OK */
3322
+ 200: {
3323
+ content: {
3324
+ "application/json": {
3325
+ /** @description Generated images. */
3326
+ outputs: {
3327
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3328
+ image_uri: string;
3329
+ /** @description The random noise seed used for generation. */
3330
+ seed: number;
3331
+ }[];
3332
+ };
3333
+ };
3334
+ };
3335
+ };
3336
+ };
3337
+ /**
3338
+ * StableDiffusionXLControlNet
3339
+ * @description Generate an image with generation structured by an input image, using Stable Diffusion XL with [ControlNet](https://arxiv.org/abs/2302.05543).
3340
+ */
3341
+ StableDiffusionXLControlNet: {
3342
+ requestBody?: {
3343
+ content: {
3344
+ /**
3345
+ * @example {
3346
+ * "image_uri": "https://media.substrate.run/spiral-logo.jpeg",
3347
+ * "prompt": "the futuristic solarpunk city of atlantis at sunset, cinematic bokeh HD",
3348
+ * "control_method": "illusion",
3349
+ * "conditioning_scale": 1,
3350
+ * "strength": 1,
3351
+ * "store": "hosted",
3352
+ * "num_images": 2,
3353
+ * "seeds": [
3354
+ * 1607226,
3355
+ * 1720395
3356
+ * ]
3357
+ * }
3358
+ */
3359
+ "application/json": {
3360
+ /** @description Input image. */
3361
+ image_uri: string;
3362
+ /**
3363
+ * @description Strategy to control generation using the input image.
3364
+ * @enum {string}
3365
+ */
3366
+ control_method: "edge" | "depth" | "illusion" | "tile";
3367
+ /** @description Text prompt. */
3368
+ prompt: string;
3369
+ /**
3370
+ * @description Number of images to generate.
3371
+ * @default 1
3372
+ */
3373
+ num_images: number;
3374
+ /**
3375
+ * @description Resolution of the output image, in pixels.
3376
+ * @default 1024
3377
+ */
3378
+ output_resolution?: number;
3379
+ /** @description Negative input prompt. */
3380
+ negative_prompt?: string;
3381
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3382
+ store?: string;
3383
+ /**
3384
+ * Format: float
3385
+ * @description Controls the influence of the input image on the generated output.
3386
+ * @default 0.5
3387
+ */
3388
+ conditioning_scale?: number;
3389
+ /**
3390
+ * Format: float
3391
+ * @description Controls how much to transform the input image.
3392
+ * @default 0.5
3393
+ */
3394
+ strength?: number;
3395
+ /** @description Random noise seeds. Default is random seeds for each generation. */
3396
+ seeds?: number[];
3397
+ };
3398
+ };
3399
+ };
3400
+ responses: {
3401
+ /** @description OK */
3402
+ 200: {
3403
+ content: {
3404
+ "application/json": {
3405
+ /** @description Generated images. */
3406
+ outputs: {
3407
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3408
+ image_uri: string;
3409
+ /** @description The random noise seed used for generation. */
3410
+ seed: number;
3411
+ }[];
3412
+ };
3413
+ };
3414
+ };
3415
+ };
3416
+ };
3417
+ /**
3418
+ * StableVideoDiffusion
3419
+ * @description Generates a video using a still image as conditioning frame.
3420
+ */
3421
+ StableVideoDiffusion: {
3422
+ requestBody?: {
3423
+ content: {
3424
+ /**
3425
+ * @example {
3426
+ * "image_uri": "https://media.substrate.run/apple-forest.jpeg",
3427
+ * "store": "hosted"
3428
+ * }
3429
+ */
3430
+ "application/json": {
3431
+ /** @description Original image. */
3432
+ image_uri: string;
3433
+ /** @description Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string. */
3434
+ store?: string;
3435
+ /**
3436
+ * @description Output video format.
3437
+ * @default gif
3438
+ * @enum {string}
3439
+ */
3440
+ output_format?: "gif" | "webp" | "mp4" | "frames";
3441
+ /** @description Seed for deterministic generation. Default is a random seed. */
3442
+ seed?: number;
3443
+ /**
3444
+ * @description Frames per second of the generated video. Ignored if output format is `frames`.
3445
+ * @default 7
3446
+ */
3447
+ fps?: number;
3448
+ /**
3449
+ * @description The motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video.
3450
+ * @default 180
3451
+ */
3452
+ motion_bucket_id?: number;
3453
+ /**
3454
+ * Format: float
3455
+ * @description The amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video.
3456
+ * @default 0.1
3457
+ */
3458
+ noise?: number;
3459
+ };
3460
+ };
3461
+ };
3462
+ responses: {
3463
+ /** @description OK */
3464
+ 200: {
3465
+ content: {
3466
+ "application/json": {
3467
+ /** @description Generated video. */
3468
+ video_uri?: string;
3469
+ /** @description Generated frames. */
3470
+ frame_uris?: string[];
3471
+ };
3472
+ };
3473
+ };
3474
+ };
3475
+ };
3476
+ /**
3477
+ * InterpolateFrames
3478
+ * @description Generates a interpolation frames between each adjacent frames.
3479
+ */
3480
+ InterpolateFrames: {
3481
+ requestBody?: {
3482
+ content: {
3483
+ /**
3484
+ * @example {
3485
+ * "frame_uris": [
3486
+ * "https://media.substrate.run/apple-forest2.jpeg",
3487
+ * "https://media.substrate.run/apple-forest3.jpeg"
3488
+ * ],
3489
+ * "store": "hosted"
3490
+ * }
3491
+ */
3492
+ "application/json": {
3493
+ /** @description Frames. */
3494
+ frame_uris: string[];
3495
+ /** @description Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string. */
3496
+ store?: string;
3497
+ /**
3498
+ * @description Output video format.
3499
+ * @default gif
3500
+ * @enum {string}
3501
+ */
3502
+ output_format?: "gif" | "webp" | "mp4" | "frames";
3503
+ /**
3504
+ * @description Frames per second of the generated video. Ignored if output format is `frames`.
3505
+ * @default 7
3506
+ */
3507
+ fps?: number;
3508
+ /**
3509
+ * @description Number of interpolation steps. Each step adds an interpolated frame between adjacent frames. For example, 2 steps over 2 frames produces 5 frames.
3510
+ * @default 2
3511
+ */
3512
+ num_steps?: number;
3513
+ };
3514
+ };
3515
+ };
3516
+ responses: {
3517
+ /** @description OK */
3518
+ 200: {
3519
+ content: {
3520
+ "application/json": {
3521
+ /** @description Generated video. */
3522
+ video_uri?: string;
3523
+ /** @description Output frames. */
3524
+ frame_uris?: string[];
3525
+ };
3526
+ };
3527
+ };
3528
+ };
3529
+ };
3530
+ /**
3531
+ * TranscribeSpeech
3532
+ * @description Transcribe speech in an audio or video file.
3533
+ */
3534
+ TranscribeSpeech: {
3535
+ requestBody?: {
3536
+ content: {
3537
+ /**
3538
+ * @example {
3539
+ * "audio_uri": "https://media.substrate.run/dfw-clip.m4a",
3540
+ * "prompt": "David Foster Wallace interviewed about US culture, and Infinite Jest",
3541
+ * "segment": true,
3542
+ * "align": true,
3543
+ * "diarize": true,
3544
+ * "suggest_chapters": true
3545
+ * }
3546
+ */
3547
+ "application/json": {
3548
+ /** @description Input audio. */
3549
+ audio_uri: string;
3550
+ /** @description Prompt to guide model on the content and context of input audio. */
3551
+ prompt?: string;
3552
+ /**
3553
+ * @description Language of input audio in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format.
3554
+ * @default en
3555
+ */
3556
+ language?: string;
3557
+ /**
3558
+ * @description Segment the text into sentences with approximate timestamps.
3559
+ * @default false
3560
+ */
3561
+ segment?: boolean;
3562
+ /**
3563
+ * @description Align transcription to produce more accurate sentence-level timestamps and word-level timestamps. An array of word segments will be included in each sentence segment.
3564
+ * @default false
3565
+ */
3566
+ align?: boolean;
3567
+ /**
3568
+ * @description Identify speakers for each segment. Speaker IDs will be included in each segment.
3569
+ * @default false
3570
+ */
3571
+ diarize?: boolean;
3572
+ /**
3573
+ * @description Suggest automatic chapter markers.
3574
+ * @default false
3575
+ */
3576
+ suggest_chapters?: boolean;
3577
+ };
3578
+ };
3579
+ };
3580
+ responses: {
3581
+ /** @description OK */
3582
+ 200: {
3583
+ content: {
3584
+ "application/json": {
3585
+ /** @description Transcribed text. */
3586
+ text: string;
3587
+ /** @description Transcribed segments, if `segment` is enabled. */
3588
+ segments?: {
3589
+ /** @description Text of segment. */
3590
+ text: string;
3591
+ /**
3592
+ * Format: float
3593
+ * @description Start time of segment, in seconds.
3594
+ */
3595
+ start: number;
3596
+ /**
3597
+ * Format: float
3598
+ * @description End time of segment, in seconds.
3599
+ */
3600
+ end: number;
3601
+ /** @description ID of speaker, if `diarize` is enabled. */
3602
+ speaker?: string;
3603
+ /** @description Aligned words, if `align` is enabled. */
3604
+ words?: {
3605
+ /** @description Text of word. */
3606
+ word: string;
3607
+ /**
3608
+ * Format: float
3609
+ * @description Start time of word, in seconds.
3610
+ */
3611
+ start?: number;
3612
+ /**
3613
+ * Format: float
3614
+ * @description End time of word, in seconds.
3615
+ */
3616
+ end?: number;
3617
+ /** @description ID of speaker, if `diarize` is enabled. */
3618
+ speaker?: string;
3619
+ }[];
3620
+ }[];
3621
+ /** @description Chapter markers, if `suggest_chapters` is enabled. */
3622
+ chapters?: {
3623
+ /** @description Chapter title. */
3624
+ title: string;
3625
+ /**
3626
+ * Format: float
3627
+ * @description Start time of chapter, in seconds.
3628
+ */
3629
+ start: number;
3630
+ }[];
3631
+ };
3632
+ };
3633
+ };
3634
+ };
3635
+ };
3636
+ /**
3637
+ * GenerateSpeech
3638
+ * @description Generate speech from text.
3639
+ */
3640
+ GenerateSpeech: {
3641
+ requestBody?: {
3642
+ content: {
3643
+ /**
3644
+ * @example {
3645
+ * "text": "Substrate: an underlying substance or layer.",
3646
+ * "store": "hosted"
3647
+ * }
3648
+ */
3649
+ "application/json": {
3650
+ /** @description Input text. */
3651
+ text: string;
3652
+ /** @description Use "hosted" to return an audio URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the audio data will be returned as a base64-encoded string. */
3653
+ store?: string;
3654
+ };
3655
+ };
3656
+ };
3657
+ responses: {
3658
+ /** @description OK */
3659
+ 200: {
3660
+ content: {
3661
+ "application/json": {
3662
+ /** @description Base 64-encoded WAV audio bytes, or a hosted audio url if `store` is provided. */
3663
+ audio_uri: string;
3664
+ };
3665
+ };
3666
+ };
3667
+ };
3668
+ };
3669
+ /**
3670
+ * RemoveBackground
3671
+ * @description Remove the background from an image and return the foreground segment as a cut-out or a mask.
3672
+ */
3673
+ RemoveBackground: {
3674
+ requestBody?: {
3675
+ content: {
3676
+ /**
3677
+ * @example {
3678
+ * "image_uri": "https://media.substrate.run/apple-forest.jpeg",
3679
+ * "store": "hosted"
3680
+ * }
3681
+ */
3682
+ "application/json": {
3683
+ /** @description Input image. */
3684
+ image_uri: string;
3685
+ /**
3686
+ * @description Return a mask image instead of the original content.
3687
+ * @default false
3688
+ */
3689
+ return_mask?: boolean;
3690
+ /**
3691
+ * @description Invert the mask image. Only takes effect if `return_mask` is true.
3692
+ * @default false
3693
+ */
3694
+ invert_mask?: boolean;
3695
+ /** @description Hex value background color. Transparent if unset. */
3696
+ background_color?: string;
3697
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3698
+ store?: string;
3699
+ };
3700
+ };
3701
+ };
3702
+ responses: {
3703
+ /** @description OK */
3704
+ 200: {
3705
+ content: {
3706
+ "application/json": {
3707
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3708
+ image_uri: string;
3709
+ };
3710
+ };
3711
+ };
3712
+ };
3713
+ };
3714
+ /**
3715
+ * EraseImage
3716
+ * @description Erase the masked part of an image, e.g. to remove an object by inpainting.
3717
+ */
3718
+ EraseImage: {
3719
+ requestBody?: {
3720
+ content: {
3721
+ /**
3722
+ * @example {
3723
+ * "image_uri": "https://media.substrate.run/apple-forest.jpeg",
3724
+ * "mask_image_uri": "https://media.substrate.run/apple-forest-mask.jpeg",
3725
+ * "store": "hosted"
3726
+ * }
3727
+ */
3728
+ "application/json": {
3729
+ /** @description Input image. */
3730
+ image_uri: string;
3731
+ /** @description Mask image that controls which pixels are inpainted. */
3732
+ mask_image_uri: string;
3733
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3734
+ store?: string;
3735
+ };
3736
+ };
3737
+ };
3738
+ responses: {
3739
+ /** @description OK */
3740
+ 200: {
3741
+ content: {
3742
+ "application/json": {
3743
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3744
+ image_uri: string;
3745
+ };
3746
+ };
3747
+ };
3748
+ };
3749
+ };
3750
+ /**
3751
+ * UpscaleImage
3752
+ * @description Upscale an image using image generation.
3753
+ */
3754
+ UpscaleImage: {
3755
+ requestBody?: {
3756
+ content: {
3757
+ /**
3758
+ * @example {
3759
+ * "prompt": "high resolution detailed spiral shell",
3760
+ * "image_uri": "https://media.substrate.run/docs-shell-emoji.jpg",
3761
+ * "store": "hosted"
3762
+ * }
3763
+ */
3764
+ "application/json": {
3765
+ /** @description Prompt to guide model on the content of image to upscale. */
3766
+ prompt?: string;
3767
+ /** @description Input image. */
3768
+ image_uri: string;
3769
+ /**
3770
+ * @description Resolution of the output image, in pixels.
3771
+ * @default 1024
3772
+ */
3773
+ output_resolution?: number;
3774
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3775
+ store?: string;
3776
+ };
3777
+ };
3778
+ };
3779
+ responses: {
3780
+ /** @description OK */
3781
+ 200: {
3782
+ content: {
3783
+ "application/json": {
3784
+ /** @description Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3785
+ image_uri: string;
3786
+ };
3787
+ };
3788
+ };
3789
+ };
3790
+ };
3791
+ /**
3792
+ * SegmentUnderPoint
3793
+ * @description Segment an image under a point and return the segment.
3794
+ */
3795
+ SegmentUnderPoint: {
3796
+ requestBody?: {
3797
+ content: {
3798
+ /**
3799
+ * @example {
3800
+ * "image_uri": "https://media.substrate.run/docs-vg-bedroom.jpg",
3801
+ * "point": {
3802
+ * "x": 189,
3803
+ * "y": 537
3804
+ * },
3805
+ * "store": "hosted"
3806
+ * }
3807
+ */
3808
+ "application/json": {
3809
+ /** @description Input image. */
3810
+ image_uri: string;
3811
+ /** Point */
3812
+ point: {
3813
+ /** @description X position. */
3814
+ x: number;
3815
+ /** @description Y position. */
3816
+ y: number;
3817
+ };
3818
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3819
+ store?: string;
3820
+ };
3821
+ };
3822
+ };
3823
+ responses: {
3824
+ /** @description OK */
3825
+ 200: {
3826
+ content: {
3827
+ "application/json": {
3828
+ /** @description Detected segments in 'mask image' format. Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3829
+ mask_image_uri: string;
3830
+ };
3831
+ };
3832
+ };
3833
+ };
3834
+ };
3835
+ /**
3836
+ * SegmentAnything
3837
+ * @description Segment an image using [SegmentAnything](https://github.com/facebookresearch/segment-anything).
3838
+ */
3839
+ SegmentAnything: {
3840
+ requestBody?: {
3841
+ content: {
3842
+ /**
3843
+ * @example {
3844
+ * "image_uri": "https://media.substrate.run/docs-vg-bedroom.jpg",
3845
+ * "point_prompts": [
3846
+ * {
3847
+ * "x": 189,
3848
+ * "y": 537
3849
+ * }
3850
+ * ],
3851
+ * "store": "hosted"
3852
+ * }
3853
+ */
3854
+ "application/json": {
3855
+ /** @description Input image. */
3856
+ image_uri: string;
3857
+ /** @description Point prompts, to detect a segment under the point. One of `point_prompts` or `box_prompts` must be set. */
3858
+ point_prompts?: {
3859
+ /** @description X position. */
3860
+ x: number;
3861
+ /** @description Y position. */
3862
+ y: number;
3863
+ }[];
3864
+ /** @description Box prompts, to detect a segment within the bounding box. One of `point_prompts` or `box_prompts` must be set. */
3865
+ box_prompts?: {
3866
+ /**
3867
+ * Format: float
3868
+ * @description Top left corner x.
3869
+ */
3870
+ x1: number;
3871
+ /**
3872
+ * Format: float
3873
+ * @description Top left corner y.
3874
+ */
3875
+ y1: number;
3876
+ /**
3877
+ * Format: float
3878
+ * @description Bottom right corner x.
3879
+ */
3880
+ x2: number;
3881
+ /**
3882
+ * Format: float
3883
+ * @description Bottom right corner y.
3884
+ */
3885
+ y2: number;
3886
+ }[];
3887
+ /** @description Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the image data will be returned as a base64-encoded string. */
3888
+ store?: string;
3889
+ };
3890
+ };
3891
+ };
3892
+ responses: {
3893
+ /** @description OK */
3894
+ 200: {
3895
+ content: {
3896
+ "application/json": {
3897
+ /** @description Detected segments in 'mask image' format. Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. */
3898
+ mask_image_uri: string;
3899
+ };
3900
+ };
3901
+ };
3902
+ };
3903
+ };
3904
+ /**
3905
+ * SplitDocument
3906
+ * @description Split document into text segments.
3907
+ */
3908
+ SplitDocument: {
3909
+ requestBody?: {
3910
+ content: {
3911
+ /**
3912
+ * @example {
3913
+ * "doc_id": "example_pdf",
3914
+ * "uri": "https://arxiv.org/pdf/2405.07945",
3915
+ * "metadata": {
3916
+ * "title": "GRASS II: Simulations of Potential Granulation Noise Mitigation Methods"
3917
+ * }
3918
+ * }
3919
+ */
3920
+ "application/json": {
3921
+ /** @description URI of the document. */
3922
+ uri: string;
3923
+ /** @description Document ID. */
3924
+ doc_id?: string;
3925
+ /** @description Document metadata. */
3926
+ metadata?: {
3927
+ [key: string]: unknown;
3928
+ };
3929
+ /** @description Maximum number of units per chunk. Defaults to 1024 tokens for text or 40 lines for code. */
3930
+ chunk_size?: number;
3931
+ /** @description Number of units to overlap between chunks. Defaults to 200 tokens for text or 15 lines for code. */
3932
+ chunk_overlap?: number;
3933
+ };
3934
+ };
3935
+ };
3936
+ responses: {
3937
+ /** @description OK */
3938
+ 200: {
3939
+ content: {
3940
+ "application/json": {
3941
+ /** @description Document chunks */
3942
+ items: {
3943
+ /** @description Text to embed. */
3944
+ text: string;
3945
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
3946
+ metadata?: {
3947
+ [key: string]: unknown;
3948
+ };
3949
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
3950
+ doc_id?: string;
3951
+ }[];
3952
+ };
3953
+ };
3954
+ };
3955
+ };
3956
+ };
3957
+ /**
3958
+ * EmbedText
3959
+ * @description Generate embedding for a text document.
3960
+ */
3961
+ EmbedText: {
3962
+ requestBody?: {
3963
+ content: {
3964
+ /**
3965
+ * @example {
3966
+ * "text": "Argon is the third most abundant gas in Earth's atmosphere, at 0.934% (9340 ppmv). It is more than twice as abundant as water vapor.",
3967
+ * "model": "jina-v2",
3968
+ * "collection_name": "smoke_tests",
3969
+ * "metadata": {
3970
+ * "group": "18"
3971
+ * },
3972
+ * "embedded_metadata_keys": [
3973
+ * "group"
3974
+ * ]
3975
+ * }
3976
+ */
3977
+ "application/json": {
3978
+ /** @description Text to embed. */
3979
+ text: string;
3980
+ /** @description Vector store name. */
3981
+ collection_name?: string;
3982
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
3983
+ metadata?: {
3984
+ [key: string]: unknown;
3985
+ };
3986
+ /** @description Choose keys from `metadata` to embed with text. */
3987
+ embedded_metadata_keys?: string[];
3988
+ /** @description Vector store document ID. Ignored if `store` is unset. */
3989
+ doc_id?: string;
3990
+ /**
3991
+ * @description Selected embedding model.
3992
+ * @default jina-v2
3993
+ * @enum {string}
3994
+ */
3995
+ model?: "jina-v2" | "clip";
3996
+ };
3997
+ };
3998
+ };
3999
+ responses: {
4000
+ /** @description OK */
4001
+ 200: {
4002
+ content: {
4003
+ "application/json": {
4004
+ /** Embedding */
4005
+ embedding: {
4006
+ /** @description Embedding vector. */
4007
+ vector: number[];
4008
+ /** @description Vector store document ID. */
4009
+ doc_id?: string;
4010
+ /** @description Vector store document metadata. */
4011
+ metadata?: {
4012
+ [key: string]: unknown;
4013
+ };
4014
+ };
4015
+ };
4016
+ };
4017
+ };
4018
+ };
4019
+ };
4020
+ /**
4021
+ * MultiEmbedText
4022
+ * @description Generate embeddings for multiple text documents.
4023
+ */
4024
+ MultiEmbedText: {
4025
+ requestBody?: {
4026
+ content: {
4027
+ /**
4028
+ * @example {
4029
+ * "model": "jina-v2",
4030
+ * "items": [
4031
+ * {
4032
+ * "text": "Osmium is the densest naturally occurring element. When experimentally measured using X-ray crystallography, it has a density of 22.59 g/cm3. Manufacturers use its alloys with platinum, iridium, and other platinum-group metals to make fountain pen nib tipping, electrical contacts, and in other applications that require extreme durability and hardness.",
4033
+ * "metadata": {
4034
+ * "group": "8"
4035
+ * }
4036
+ * },
4037
+ * {
4038
+ * "text": "Despite its abundant presence in the universe and Solar System—ranking fifth in cosmic abundance following hydrogen, helium, oxygen, and carbon—neon is comparatively scarce on Earth.",
4039
+ * "metadata": {
4040
+ * "group": "18"
4041
+ * }
4042
+ * }
4043
+ * ],
4044
+ * "collection_name": "smoke_tests",
4045
+ * "embedded_metadata_keys": [
4046
+ * "group"
4047
+ * ]
4048
+ * }
4049
+ */
4050
+ "application/json": {
4051
+ /** @description Items to embed. */
4052
+ items: {
4053
+ /** @description Text to embed. */
4054
+ text: string;
4055
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
4056
+ metadata?: {
4057
+ [key: string]: unknown;
4058
+ };
4059
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
4060
+ doc_id?: string;
4061
+ }[];
4062
+ /** @description Vector store name. */
4063
+ collection_name?: string;
4064
+ /** @description Choose keys from `metadata` to embed with text. */
4065
+ embedded_metadata_keys?: string[];
4066
+ /**
4067
+ * @description Selected embedding model.
4068
+ * @default jina-v2
4069
+ * @enum {string}
4070
+ */
4071
+ model?: "jina-v2" | "clip";
4072
+ };
4073
+ };
4074
+ };
4075
+ responses: {
4076
+ /** @description OK */
4077
+ 200: {
4078
+ content: {
4079
+ "application/json": {
4080
+ /** @description Generated embeddings. */
4081
+ embeddings: {
4082
+ /** @description Embedding vector. */
4083
+ vector: number[];
4084
+ /** @description Vector store document ID. */
4085
+ doc_id?: string;
4086
+ /** @description Vector store document metadata. */
4087
+ metadata?: {
4088
+ [key: string]: unknown;
4089
+ };
4090
+ }[];
4091
+ };
4092
+ };
4093
+ };
4094
+ };
4095
+ };
4096
+ /**
4097
+ * EmbedImage
4098
+ * @description Generate embedding for an image.
4099
+ */
4100
+ EmbedImage: {
4101
+ requestBody?: {
4102
+ content: {
4103
+ /**
4104
+ * @example {
4105
+ * "image_uri": "https://media.substrate.run/docs-fuji-red.jpg",
4106
+ * "collection_name": "smoke_tests"
4107
+ * }
4108
+ */
4109
+ "application/json": {
4110
+ /** @description Image to embed. */
4111
+ image_uri: string;
4112
+ /** @description Vector store name. */
4113
+ collection_name?: string;
4114
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
4115
+ doc_id?: string;
4116
+ /**
4117
+ * @description Selected embedding model.
4118
+ * @default clip
4119
+ * @enum {string}
4120
+ */
4121
+ model?: "clip";
4122
+ };
4123
+ };
4124
+ };
4125
+ responses: {
4126
+ /** @description OK */
4127
+ 200: {
4128
+ content: {
4129
+ "application/json": {
4130
+ /** Embedding */
4131
+ embedding: {
4132
+ /** @description Embedding vector. */
4133
+ vector: number[];
4134
+ /** @description Vector store document ID. */
4135
+ doc_id?: string;
4136
+ /** @description Vector store document metadata. */
4137
+ metadata?: {
4138
+ [key: string]: unknown;
4139
+ };
4140
+ };
4141
+ };
4142
+ };
4143
+ };
4144
+ };
4145
+ };
4146
+ /**
4147
+ * MultiEmbedImage
4148
+ * @description Generate embeddings for multiple images.
4149
+ */
4150
+ MultiEmbedImage: {
4151
+ requestBody?: {
4152
+ content: {
4153
+ /**
4154
+ * @example {
4155
+ * "items": [
4156
+ * {
4157
+ * "image_uri": "https://media.substrate.run/docs-fuji-red.jpg"
4158
+ * },
4159
+ * {
4160
+ * "image_uri": "https://media.substrate.run/docs-fuji-blue.jpg"
4161
+ * }
4162
+ * ],
4163
+ * "collection_name": "smoke_tests"
4164
+ * }
4165
+ */
4166
+ "application/json": {
4167
+ /** @description Items to embed. */
4168
+ items: {
4169
+ /** @description Image to embed. */
4170
+ image_uri: string;
4171
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
4172
+ doc_id?: string;
4173
+ }[];
4174
+ /** @description Vector store name. */
4175
+ collection_name?: string;
4176
+ /**
4177
+ * @description Selected embedding model.
4178
+ * @default clip
4179
+ * @enum {string}
4180
+ */
4181
+ model?: "clip";
4182
+ };
4183
+ };
4184
+ };
4185
+ responses: {
4186
+ /** @description OK */
4187
+ 200: {
4188
+ content: {
4189
+ "application/json": {
4190
+ /** @description Generated embeddings. */
4191
+ embeddings: {
4192
+ /** @description Embedding vector. */
4193
+ vector: number[];
4194
+ /** @description Vector store document ID. */
4195
+ doc_id?: string;
4196
+ /** @description Vector store document metadata. */
4197
+ metadata?: {
4198
+ [key: string]: unknown;
4199
+ };
4200
+ }[];
4201
+ };
4202
+ };
4203
+ };
4204
+ };
4205
+ };
4206
+ /**
4207
+ * JinaV2
4208
+ * @description Generate embeddings for multiple text documents using [Jina Embeddings 2](https://arxiv.org/abs/2310.19923).
4209
+ */
4210
+ JinaV2: {
4211
+ requestBody?: {
4212
+ content: {
4213
+ /**
4214
+ * @example {
4215
+ * "items": [
4216
+ * {
4217
+ * "text": "Hassium is a superheavy element; it has been produced in a laboratory only in very small quantities by fusing heavy nuclei with lighter ones. Natural occurrences of the element have been hypothesised but never found.",
4218
+ * "metadata": {
4219
+ * "group": "8"
4220
+ * }
4221
+ * },
4222
+ * {
4223
+ * "text": "Xenon is also used to search for hypothetical weakly interacting massive particles and as a propellant for ion thrusters in spacecraft.",
4224
+ * "metadata": {
4225
+ * "group": "18"
4226
+ * }
4227
+ * }
4228
+ * ],
4229
+ * "collection_name": "smoke_tests",
4230
+ * "embedded_metadata_keys": [
4231
+ * "group"
4232
+ * ]
4233
+ * }
4234
+ */
4235
+ "application/json": {
4236
+ /** @description Items to embed. */
4237
+ items: {
4238
+ /** @description Text to embed. */
4239
+ text: string;
4240
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
4241
+ metadata?: {
4242
+ [key: string]: unknown;
4243
+ };
4244
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
4245
+ doc_id?: string;
4246
+ }[];
4247
+ /** @description Vector store name. */
4248
+ collection_name?: string;
4249
+ /** @description Choose keys from `metadata` to embed with text. */
4250
+ embedded_metadata_keys?: string[];
4251
+ };
4252
+ };
4253
+ };
4254
+ responses: {
4255
+ /** @description OK */
4256
+ 200: {
4257
+ content: {
4258
+ "application/json": {
4259
+ /** @description Generated embeddings. */
4260
+ embeddings: {
4261
+ /** @description Embedding vector. */
4262
+ vector: number[];
4263
+ /** @description Vector store document ID. */
4264
+ doc_id?: string;
4265
+ /** @description Vector store document metadata. */
4266
+ metadata?: {
4267
+ [key: string]: unknown;
4268
+ };
4269
+ }[];
4270
+ };
4271
+ };
4272
+ };
4273
+ };
4274
+ };
4275
+ /**
4276
+ * CLIP
4277
+ * @description Generate embeddings for text or images using [CLIP](https://openai.com/research/clip).
4278
+ */
4279
+ CLIP: {
4280
+ requestBody?: {
4281
+ content: {
4282
+ /**
4283
+ * @example {
4284
+ * "items": [
4285
+ * {
4286
+ * "image_uri": "https://media.substrate.run/docs-fuji-red.jpg"
4287
+ * },
4288
+ * {
4289
+ * "image_uri": "https://media.substrate.run/docs-fuji-blue.jpg"
4290
+ * }
4291
+ * ],
4292
+ * "collection_name": "smoke_tests"
4293
+ * }
4294
+ */
4295
+ "application/json": {
4296
+ /** @description Items to embed. */
4297
+ items: {
4298
+ /** @description Image to embed. */
4299
+ image_uri?: string;
4300
+ /** @description Text to embed. */
4301
+ text?: string;
4302
+ /** @description Metadata that can be used to query the vector store. Ignored if `collection_name` is unset. */
4303
+ metadata?: {
4304
+ [key: string]: unknown;
4305
+ };
4306
+ /** @description Vector store document ID. Ignored if `collection_name` is unset. */
4307
+ doc_id?: string;
4308
+ }[];
4309
+ /** @description Vector store name. */
4310
+ collection_name?: string;
4311
+ /** @description Choose keys from `metadata` to embed with text. Only applies to text items. */
4312
+ embedded_metadata_keys?: string[];
4313
+ };
4314
+ };
4315
+ };
4316
+ responses: {
4317
+ /** @description OK */
4318
+ 200: {
4319
+ content: {
4320
+ "application/json": {
4321
+ /** @description Generated embeddings. */
4322
+ embeddings: {
4323
+ /** @description Embedding vector. */
4324
+ vector: number[];
4325
+ /** @description Vector store document ID. */
4326
+ doc_id?: string;
4327
+ /** @description Vector store document metadata. */
4328
+ metadata?: {
4329
+ [key: string]: unknown;
4330
+ };
4331
+ }[];
4332
+ };
4333
+ };
4334
+ };
4335
+ };
4336
+ };
4337
+ /**
4338
+ * FindOrCreateVectorStore
4339
+ * @description Find a vector store matching the given collection name, or create a new vector store.
4340
+ */
4341
+ FindOrCreateVectorStore: {
4342
+ requestBody?: {
4343
+ content: {
4344
+ /**
4345
+ * @example {
4346
+ * "collection_name": "smoke_tests",
4347
+ * "model": "jina-v2"
4348
+ * }
4349
+ */
4350
+ "application/json": {
4351
+ /** @description Vector store name. */
4352
+ collection_name: string;
4353
+ /**
4354
+ * @description Selected embedding model.
4355
+ * @enum {string}
4356
+ */
4357
+ model: "jina-v2" | "clip";
4358
+ };
4359
+ };
4360
+ };
4361
+ responses: {
4362
+ /** @description Vector store created. */
4363
+ 200: {
4364
+ content: {
4365
+ "application/json": {
4366
+ /** @description Vector store name. */
4367
+ collection_name: string;
4368
+ /**
4369
+ * @description Selected embedding model.
4370
+ * @enum {string}
4371
+ */
4372
+ model: "jina-v2" | "clip";
4373
+ /** @description Number of leaves in the vector store. */
4374
+ num_leaves?: number;
4375
+ };
4376
+ };
4377
+ };
4378
+ };
4379
+ };
4380
+ /**
4381
+ * ListVectorStores
4382
+ * @description List all vector stores.
4383
+ */
4384
+ ListVectorStores: {
4385
+ requestBody?: {
4386
+ content: {
4387
+ /** @example {} */
4388
+ "application/json": Record<string, never>;
4389
+ };
4390
+ };
4391
+ responses: {
4392
+ /** @description List of vector stores. */
4393
+ 200: {
4394
+ content: {
4395
+ "application/json": {
4396
+ /** @description List of vector stores. */
4397
+ items?: {
4398
+ /** @description Vector store name. */
4399
+ collection_name: string;
4400
+ /**
4401
+ * @description Selected embedding model.
4402
+ * @enum {string}
4403
+ */
4404
+ model: "jina-v2" | "clip";
4405
+ /** @description Number of leaves in the vector store. */
4406
+ num_leaves?: number;
4407
+ }[];
4408
+ };
4409
+ };
4410
+ };
4411
+ };
4412
+ };
4413
+ /**
4414
+ * DeleteVectorStore
4415
+ * @description Delete a vector store.
4416
+ */
4417
+ DeleteVectorStore: {
4418
+ requestBody?: {
4419
+ content: {
4420
+ /**
4421
+ * @example {
4422
+ * "collection_name": "fake_store",
4423
+ * "model": "jina-v2"
4424
+ * }
4425
+ */
4426
+ "application/json": {
4427
+ /** @description Vector store name. */
4428
+ collection_name: string;
4429
+ /**
4430
+ * @description Selected embedding model.
4431
+ * @enum {string}
4432
+ */
4433
+ model: "jina-v2" | "clip";
4434
+ };
4435
+ };
4436
+ };
4437
+ responses: {
4438
+ /** @description OK */
4439
+ 200: {
4440
+ content: {
4441
+ "application/json": {
4442
+ /** @description Vector store name. */
4443
+ collection_name: string;
4444
+ /**
4445
+ * @description Selected embedding model.
4446
+ * @enum {string}
4447
+ */
4448
+ model: "jina-v2" | "clip";
4449
+ };
4450
+ };
4451
+ };
4452
+ };
4453
+ };
4454
+ /**
4455
+ * QueryVectorStore
4456
+ * @description Query a vector store for similar vectors.
4457
+ */
4458
+ QueryVectorStore: {
4459
+ requestBody?: {
4460
+ content: {
4461
+ /**
4462
+ * @example {
4463
+ * "collection_name": "smoke_tests",
4464
+ * "model": "jina-v2",
4465
+ * "query_strings": [
4466
+ * "gas",
4467
+ * "metal"
4468
+ * ],
4469
+ * "top_k": 1,
4470
+ * "include_metadata": true
4471
+ * }
4472
+ */
4473
+ "application/json": {
4474
+ /** @description Vector store to query against. */
4475
+ collection_name: string;
4476
+ /**
4477
+ * @description Selected embedding model.
4478
+ * @enum {string}
4479
+ */
4480
+ model: "jina-v2" | "clip";
4481
+ /** @description Texts to embed and use for the query. */
4482
+ query_strings?: string[];
4483
+ /** @description Image URIs to embed and use for the query. */
4484
+ query_image_uris?: string[];
4485
+ /** @description Vectors to use for the query. */
4486
+ query_vectors?: number[][];
4487
+ /** @description Document IDs to use for the query. */
4488
+ query_ids?: string[];
4489
+ /**
4490
+ * @description Number of results to return.
4491
+ * @default 10
4492
+ */
4493
+ top_k?: number;
4494
+ /**
4495
+ * @description The size of the dynamic candidate list for searching the index graph.
4496
+ * @default 40
4497
+ */
4498
+ ef_search?: number;
4499
+ /**
4500
+ * @description The number of leaves in the index tree to search.
4501
+ * @default 40
4502
+ */
4503
+ num_leaves_to_search?: number;
4504
+ /**
4505
+ * @description Include the values of the vectors in the response.
4506
+ * @default false
4507
+ */
4508
+ include_values?: boolean;
4509
+ /**
4510
+ * @description Include the metadata of the vectors in the response.
4511
+ * @default false
4512
+ */
4513
+ include_metadata?: boolean;
4514
+ /** @description Filter metadata by key-value pairs. */
4515
+ filters?: {
4516
+ [key: string]: unknown;
4517
+ };
4518
+ };
4519
+ };
4520
+ };
4521
+ responses: {
4522
+ /** @description Query results. */
4523
+ 200: {
4524
+ content: {
4525
+ "application/json": {
4526
+ /** @description Query results. */
4527
+ results: {
4528
+ /** @description Document ID. */
4529
+ id: string;
4530
+ /**
4531
+ * Format: float
4532
+ * @description Similarity score.
4533
+ */
4534
+ distance: number;
4535
+ /** @description Embedding vector. */
4536
+ vector?: number[];
4537
+ /** @description Document metadata. */
4538
+ metadata?: {
4539
+ [key: string]: unknown;
4540
+ };
4541
+ }[][];
4542
+ /** @description Vector store name. */
4543
+ collection_name?: string;
4544
+ /**
4545
+ * @description Selected embedding model.
4546
+ * @enum {string}
4547
+ */
4548
+ model?: "jina-v2" | "clip";
4549
+ };
4550
+ };
4551
+ };
4552
+ };
4553
+ };
4554
+ /**
4555
+ * FetchVectors
4556
+ * @description Fetch vectors from a vector store.
4557
+ */
4558
+ FetchVectors: {
4559
+ requestBody?: {
4560
+ content: {
4561
+ /**
4562
+ * @example {
4563
+ * "collection_name": "smoke_tests",
4564
+ * "model": "jina-v2",
4565
+ * "ids": [
4566
+ * "dd8f3774e05d42caa53cfbaa7389c08f"
4567
+ * ]
4568
+ * }
4569
+ */
4570
+ "application/json": {
4571
+ /** @description Vector store name. */
4572
+ collection_name: string;
4573
+ /**
4574
+ * @description Selected embedding model.
4575
+ * @enum {string}
4576
+ */
4577
+ model: "jina-v2" | "clip";
4578
+ /** @description Document IDs to retrieve. */
4579
+ ids: string[];
4580
+ };
4581
+ };
4582
+ };
4583
+ responses: {
4584
+ /** @description Vector data. */
4585
+ 200: {
4586
+ content: {
4587
+ "application/json": {
4588
+ /** @description Retrieved vectors. */
4589
+ vectors: {
4590
+ /** @description Document ID. */
4591
+ id: string;
4592
+ /** @description Embedding vector. */
4593
+ vector: number[];
4594
+ /** @description Document metadata. */
4595
+ metadata: {
4596
+ [key: string]: unknown;
4597
+ };
4598
+ }[];
4599
+ };
4600
+ };
4601
+ };
4602
+ };
4603
+ };
4604
+ /**
4605
+ * UpdateVectors
4606
+ * @description Update vectors in a vector store.
4607
+ */
4608
+ UpdateVectors: {
4609
+ requestBody?: {
4610
+ content: {
4611
+ /**
4612
+ * @example {
4613
+ * "collection_name": "smoke_tests",
4614
+ * "model": "jina-v2",
4615
+ * "vectors": [
4616
+ * {
4617
+ * "id": "dd8f3774e05d42caa53cfbaa7389c08f",
4618
+ * "metadata": {
4619
+ * "appearance": "silvery, blue cast"
4620
+ * }
4621
+ * }
4622
+ * ]
4623
+ * }
4624
+ */
4625
+ "application/json": {
4626
+ /** @description Vector store name. */
4627
+ collection_name: string;
4628
+ /**
4629
+ * @description Selected embedding model.
4630
+ * @enum {string}
4631
+ */
4632
+ model: "jina-v2" | "clip";
4633
+ /** @description Vectors to upsert. */
4634
+ vectors: {
4635
+ /** @description Document ID. */
4636
+ id: string;
4637
+ /** @description Embedding vector. */
4638
+ vector?: number[];
4639
+ /** @description Document metadata. */
4640
+ metadata?: {
4641
+ [key: string]: unknown;
4642
+ };
4643
+ }[];
4644
+ };
4645
+ };
4646
+ };
4647
+ responses: {
4648
+ /** @description Count of updated vectors. */
4649
+ 200: {
4650
+ content: {
4651
+ "application/json": {
4652
+ /** @description Number of vectors modified. */
4653
+ count: number;
4654
+ };
4655
+ };
4656
+ };
4657
+ };
4658
+ };
4659
+ /**
4660
+ * DeleteVectors
4661
+ * @description Delete vectors in a vector store.
4662
+ */
4663
+ DeleteVectors: {
4664
+ requestBody?: {
4665
+ content: {
4666
+ /**
4667
+ * @example {
4668
+ * "collection_name": "smoke_tests",
4669
+ * "model": "jina-v2",
4670
+ * "ids": [
4671
+ * "ac32b9a133dd4e3689004f6e8f0fd6cd",
4672
+ * "629df177c7644062a68bceeff223cefa"
4673
+ * ]
4674
+ * }
4675
+ */
4676
+ "application/json": {
4677
+ /** @description Vector store name. */
4678
+ collection_name: string;
4679
+ /**
4680
+ * @description Selected embedding model.
4681
+ * @enum {string}
4682
+ */
4683
+ model: "jina-v2" | "clip";
4684
+ /** @description Document IDs to delete. */
4685
+ ids: string[];
4686
+ };
4687
+ };
4688
+ };
4689
+ responses: {
4690
+ /** @description Count of deleted vectors. */
4691
+ 200: {
4692
+ content: {
4693
+ "application/json": {
4694
+ /** @description Number of vectors modified. */
4695
+ count: number;
4696
+ };
4697
+ };
4698
+ };
4699
+ };
4700
+ };
4701
+ }