@llumiverse/common 0.20.0 → 0.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/lib/cjs/capability/azure_foundry.js +158 -0
  2. package/lib/cjs/capability/azure_foundry.js.map +1 -0
  3. package/lib/cjs/capability.js +8 -3
  4. package/lib/cjs/capability.js.map +1 -1
  5. package/lib/cjs/options/azure_foundry.js +425 -0
  6. package/lib/cjs/options/azure_foundry.js.map +1 -0
  7. package/lib/cjs/options.js +16 -9
  8. package/lib/cjs/options.js.map +1 -1
  9. package/lib/cjs/types.js +95 -1
  10. package/lib/cjs/types.js.map +1 -1
  11. package/lib/esm/capability/azure_foundry.js +155 -0
  12. package/lib/esm/capability/azure_foundry.js.map +1 -0
  13. package/lib/esm/capability.js +8 -3
  14. package/lib/esm/capability.js.map +1 -1
  15. package/lib/esm/options/azure_foundry.js +421 -0
  16. package/lib/esm/options/azure_foundry.js.map +1 -0
  17. package/lib/esm/options.js +16 -9
  18. package/lib/esm/options.js.map +1 -1
  19. package/lib/esm/types.js +94 -0
  20. package/lib/esm/types.js.map +1 -1
  21. package/lib/types/capability/azure_foundry.d.ts +7 -0
  22. package/lib/types/capability/azure_foundry.d.ts.map +1 -0
  23. package/lib/types/capability.d.ts +3 -3
  24. package/lib/types/capability.d.ts.map +1 -1
  25. package/lib/types/options/azure_foundry.d.ts +52 -0
  26. package/lib/types/options/azure_foundry.d.ts.map +1 -0
  27. package/lib/types/options.d.ts +2 -2
  28. package/lib/types/options.d.ts.map +1 -1
  29. package/lib/types/types.d.ts +22 -0
  30. package/lib/types/types.d.ts.map +1 -1
  31. package/package.json +5 -5
  32. package/src/capability/azure_foundry.ts +179 -0
  33. package/src/capability.ts +11 -7
  34. package/src/options/azure_foundry.ts +485 -0
  35. package/src/options.ts +17 -11
  36. package/src/types.ts +113 -0
@@ -0,0 +1,485 @@
1
+ import { ModelOptionsInfo, ModelOptionInfoItem, ModelOptions, OptionType, SharedOptions } from "../types.js";
2
+
3
+ // Helper function to parse composite model IDs
4
+ function parseAzureFoundryModelId(compositeId: string): { deploymentName: string; baseModel: string } {
5
+ const parts = compositeId.split('::');
6
+ if (parts.length === 2) {
7
+ return {
8
+ deploymentName: parts[0],
9
+ baseModel: parts[1]
10
+ };
11
+ }
12
+
13
+ // Backwards compatibility: if no delimiter found, treat as deployment name
14
+ return {
15
+ deploymentName: compositeId,
16
+ baseModel: compositeId
17
+ };
18
+ }
19
+
20
+ // Union type of all Azure Foundry options
21
+ export type AzureFoundryOptions = AzureFoundryOpenAIOptions | AzureFoundryDeepSeekOptions | AzureFoundryThinkingOptions | AzureFoundryTextOptions | AzureFoundryImageOptions;
22
+
23
+ export interface AzureFoundryOpenAIOptions {
24
+ _option_id: "azure-foundry-openai";
25
+ max_tokens?: number;
26
+ temperature?: number;
27
+ top_p?: number;
28
+ presence_penalty?: number;
29
+ frequency_penalty?: number;
30
+ stop_sequence?: string[];
31
+ image_detail?: "low" | "high" | "auto";
32
+ reasoning_effort?: "low" | "medium" | "high";
33
+ }
34
+
35
+ export interface AzureFoundryDeepSeekOptions {
36
+ _option_id: "azure-foundry-deepseek";
37
+ max_tokens?: number;
38
+ temperature?: number;
39
+ top_p?: number;
40
+ stop_sequence?: string[];
41
+ }
42
+
43
+ export interface AzureFoundryThinkingOptions {
44
+ _option_id: "azure-foundry-thinking";
45
+ max_tokens?: number;
46
+ temperature?: number;
47
+ top_p?: number;
48
+ stop_sequence?: string[];
49
+ reasoning_effort?: "low" | "medium" | "high";
50
+ image_detail?: "low" | "high" | "auto";
51
+ }
52
+
53
+ export interface AzureFoundryTextOptions {
54
+ _option_id: "azure-foundry-text";
55
+ max_tokens?: number;
56
+ temperature?: number;
57
+ top_p?: number;
58
+ top_k?: number;
59
+ presence_penalty?: number;
60
+ frequency_penalty?: number;
61
+ stop_sequence?: string[];
62
+ seed?: number;
63
+ }
64
+
65
+ export interface AzureFoundryImageOptions {
66
+ _option_id: "azure-foundry-image";
67
+ width?: number;
68
+ height?: number;
69
+ quality?: "standard" | "hd";
70
+ style?: "vivid" | "natural";
71
+ response_format?: "url" | "b64_json";
72
+ size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
73
+ }
74
+
75
+ export function getMaxTokensLimitAzureFoundry(model: string): number | undefined {
76
+ // Extract base model from composite ID (deployment::baseModel)
77
+ const { baseModel } = parseAzureFoundryModelId(model);
78
+ const modelLower = baseModel.toLowerCase();
79
+ // GPT models
80
+ if (modelLower.includes("gpt-4o")) {
81
+ if (modelLower.includes("mini")) {
82
+ return 16384;
83
+ }
84
+ return 16384;
85
+ }
86
+ if (modelLower.includes("gpt-4")) {
87
+ if (modelLower.includes("turbo")) {
88
+ return 4096;
89
+ }
90
+ if (modelLower.includes("32k")) {
91
+ return 32768;
92
+ }
93
+ return 8192;
94
+ }
95
+ if (modelLower.includes("gpt-35") || modelLower.includes("gpt-3.5")) {
96
+ return 4096;
97
+ }
98
+ // O-series models
99
+ if (modelLower.includes("o1")) {
100
+ if (modelLower.includes("preview")) {
101
+ return 32768;
102
+ }
103
+ if (modelLower.includes("mini")) {
104
+ return 65536;
105
+ }
106
+ return 100000;
107
+ }
108
+ if (modelLower.includes("o3")) {
109
+ if (modelLower.includes("mini")) {
110
+ return 100000;
111
+ }
112
+ return 100000;
113
+ }
114
+ if (modelLower.includes("o4")) {
115
+ return 100000;
116
+ }
117
+ // DeepSeek models
118
+ if (modelLower.includes("deepseek")) {
119
+ if (modelLower.includes("r1")) {
120
+ return 163840;
121
+ }
122
+ if (modelLower.includes("v3")) {
123
+ return 131072;
124
+ }
125
+ }
126
+ // Claude models
127
+ if (modelLower.includes("claude")) {
128
+ if (modelLower.includes("3-5") || modelLower.includes("3-7")) {
129
+ return 8192;
130
+ }
131
+ if (modelLower.includes("3")) {
132
+ return 4096;
133
+ }
134
+ return 4096;
135
+ }
136
+ // Llama models
137
+ if (modelLower.includes("llama")) {
138
+ if (modelLower.includes("3.1") || modelLower.includes("3.3")) {
139
+ return 8192;
140
+ }
141
+ if (modelLower.includes("4")) {
142
+ return 1000000; // 1M context
143
+ }
144
+ return 8192;
145
+ }
146
+ // Mistral models
147
+ if (modelLower.includes("mistral")) {
148
+ if (modelLower.includes("large")) {
149
+ return 4096;
150
+ }
151
+ if (modelLower.includes("small")) {
152
+ return 4096;
153
+ }
154
+ return 4096;
155
+ }
156
+ // Phi models
157
+ if (modelLower.includes("phi")) {
158
+ return 4096;
159
+ }
160
+ // AI21 Jamba models
161
+ if (modelLower.includes("jamba")) {
162
+ return 4096;
163
+ }
164
+ // Cohere models
165
+ if (modelLower.includes("cohere")) {
166
+ if (modelLower.includes("command-a")) {
167
+ return 8000;
168
+ }
169
+ return 4096;
170
+ }
171
+ // Grok models
172
+ if (modelLower.includes("grok")) {
173
+ return 131072;
174
+ }
175
+ return undefined;
176
+ }
177
+
178
+ export function getAzureFoundryOptions(model: string, _option?: ModelOptions): ModelOptionsInfo {
179
+ // Extract base model from composite ID (deployment::baseModel)
180
+ const { baseModel } = parseAzureFoundryModelId(model);
181
+ const modelLower = baseModel.toLowerCase();
182
+ const max_tokens_limit = getMaxTokensLimitAzureFoundry(model);
183
+ // Image generation models
184
+ if (modelLower.includes("dall-e") || modelLower.includes("gpt-image")) {
185
+ return {
186
+ _option_id: "azure-foundry-image",
187
+ options: [
188
+ {
189
+ name: "size",
190
+ type: OptionType.enum,
191
+ enum: {
192
+ "256x256": "256x256",
193
+ "512x512": "512x512",
194
+ "1024x1024": "1024x1024",
195
+ "1792x1024": "1792x1024",
196
+ "1024x1792": "1024x1792"
197
+ },
198
+ default: "1024x1024",
199
+ description: "The size of the generated image"
200
+ },
201
+ {
202
+ name: "quality",
203
+ type: OptionType.enum,
204
+ enum: { "Standard": "standard", "HD": "hd" },
205
+ default: "standard",
206
+ description: "The quality of the generated image"
207
+ },
208
+ {
209
+ name: "style",
210
+ type: OptionType.enum,
211
+ enum: { "Vivid": "vivid", "Natural": "natural" },
212
+ default: "vivid",
213
+ description: "The style of the generated image"
214
+ },
215
+ {
216
+ name: "response_format",
217
+ type: OptionType.enum,
218
+ enum: { "URL": "url", "Base64 JSON": "b64_json" },
219
+ default: "url",
220
+ description: "The format of the response"
221
+ }
222
+ ]
223
+ };
224
+ }
225
+ // Vision model options
226
+ const visionOptions: ModelOptionInfoItem[] = isVisionModel(modelLower) ? [
227
+ {
228
+ name: "image_detail",
229
+ type: OptionType.enum,
230
+ enum: { "Low": "low", "High": "high", "Auto": "auto" },
231
+ default: "auto",
232
+ description: "Controls how the model processes input images"
233
+ }
234
+ ] : [];
235
+ // O-series and thinking models
236
+ if (modelLower.includes("o1") || modelLower.includes("o3") || modelLower.includes("o4")) {
237
+ const reasoningOptions: ModelOptionInfoItem[] = (modelLower.includes("o3") || isO1Full(modelLower)) ? [
238
+ {
239
+ name: "reasoning_effort",
240
+ type: OptionType.enum,
241
+ enum: { "Low": "low", "Medium": "medium", "High": "high" },
242
+ default: "medium",
243
+ description: "How much effort the model should put into reasoning"
244
+ }
245
+ ] : [];
246
+ return {
247
+ _option_id: "azure-foundry-thinking",
248
+ options: [
249
+ {
250
+ name: SharedOptions.max_tokens,
251
+ type: OptionType.numeric,
252
+ min: 1,
253
+ max: max_tokens_limit,
254
+ integer: true,
255
+ description: "The maximum number of tokens to generate"
256
+ },
257
+ {
258
+ name: SharedOptions.temperature,
259
+ type: OptionType.numeric,
260
+ min: 0.0,
261
+ max: 2.0,
262
+ default: 1.0,
263
+ step: 0.1,
264
+ description: "Controls randomness in the output"
265
+ },
266
+ {
267
+ name: SharedOptions.top_p,
268
+ type: OptionType.numeric,
269
+ min: 0,
270
+ max: 1,
271
+ step: 0.1,
272
+ description: "Nucleus sampling parameter"
273
+ },
274
+ {
275
+ name: SharedOptions.stop_sequence,
276
+ type: OptionType.string_list,
277
+ value: [],
278
+ description: "Sequences where the model will stop generating"
279
+ },
280
+ ...reasoningOptions,
281
+ ...visionOptions
282
+ ]
283
+ };
284
+ }
285
+ // DeepSeek R1 models
286
+ if (modelLower.includes("deepseek") && modelLower.includes("r1")) {
287
+ return {
288
+ _option_id: "azure-foundry-deepseek",
289
+ options: [
290
+ {
291
+ name: SharedOptions.max_tokens,
292
+ type: OptionType.numeric,
293
+ min: 1,
294
+ max: max_tokens_limit,
295
+ integer: true,
296
+ description: "The maximum number of tokens to generate"
297
+ },
298
+ {
299
+ name: SharedOptions.temperature,
300
+ type: OptionType.numeric,
301
+ min: 0.0,
302
+ max: 2.0,
303
+ default: 0.7,
304
+ step: 0.1,
305
+ description: "Lower temperatures recommended for DeepSeek R1 (0.3-0.7)"
306
+ },
307
+ {
308
+ name: SharedOptions.top_p,
309
+ type: OptionType.numeric,
310
+ min: 0,
311
+ max: 1,
312
+ step: 0.1,
313
+ description: "Nucleus sampling parameter"
314
+ },
315
+ {
316
+ name: SharedOptions.stop_sequence,
317
+ type: OptionType.string_list,
318
+ value: [],
319
+ description: "Sequences where the model will stop generating"
320
+ }
321
+ ]
322
+ };
323
+ }
324
+ // OpenAI models (GPT-4, GPT-4o, GPT-3.5)
325
+ if (modelLower.includes("gpt-")) {
326
+ return {
327
+ _option_id: "azure-foundry-openai",
328
+ options: [
329
+ {
330
+ name: SharedOptions.max_tokens,
331
+ type: OptionType.numeric,
332
+ min: 1,
333
+ max: max_tokens_limit,
334
+ integer: true,
335
+ step: 200,
336
+ description: "The maximum number of tokens to generate"
337
+ },
338
+ {
339
+ name: SharedOptions.temperature,
340
+ type: OptionType.numeric,
341
+ min: 0.0,
342
+ max: 2.0,
343
+ default: 0.7,
344
+ step: 0.1,
345
+ description: "Controls randomness in the output"
346
+ },
347
+ {
348
+ name: SharedOptions.top_p,
349
+ type: OptionType.numeric,
350
+ min: 0,
351
+ max: 1,
352
+ step: 0.1,
353
+ description: "Nucleus sampling parameter"
354
+ },
355
+ {
356
+ name: SharedOptions.presence_penalty,
357
+ type: OptionType.numeric,
358
+ min: -2.0,
359
+ max: 2.0,
360
+ step: 0.1,
361
+ description: "Penalize new tokens based on their presence in the text"
362
+ },
363
+ {
364
+ name: SharedOptions.frequency_penalty,
365
+ type: OptionType.numeric,
366
+ min: -2.0,
367
+ max: 2.0,
368
+ step: 0.1,
369
+ description: "Penalize new tokens based on their frequency in the text"
370
+ },
371
+ {
372
+ name: SharedOptions.stop_sequence,
373
+ type: OptionType.string_list,
374
+ value: [],
375
+ description: "Sequences where the model will stop generating"
376
+ },
377
+ ...visionOptions
378
+ ]
379
+ };
380
+ }
381
+ // General text models (Claude, Llama, Mistral, Phi, etc.)
382
+ const baseOptions: ModelOptionInfoItem[] = [
383
+ {
384
+ name: SharedOptions.max_tokens,
385
+ type: OptionType.numeric,
386
+ min: 1,
387
+ max: max_tokens_limit,
388
+ integer: true,
389
+ step: 200,
390
+ description: "The maximum number of tokens to generate"
391
+ },
392
+ {
393
+ name: SharedOptions.temperature,
394
+ type: OptionType.numeric,
395
+ min: 0.0,
396
+ max: 2.0,
397
+ default: 0.7,
398
+ step: 0.1,
399
+ description: "Controls randomness in the output"
400
+ },
401
+ {
402
+ name: SharedOptions.top_p,
403
+ type: OptionType.numeric,
404
+ min: 0,
405
+ max: 1,
406
+ step: 0.1,
407
+ description: "Nucleus sampling parameter"
408
+ },
409
+ {
410
+ name: SharedOptions.stop_sequence,
411
+ type: OptionType.string_list,
412
+ value: [],
413
+ description: "Sequences where the model will stop generating"
414
+ }
415
+ ];
416
+ // Add model-specific options
417
+ const additionalOptions: ModelOptionInfoItem[] = [];
418
+ // Add top_k for certain models
419
+ if (modelLower.includes("claude") || modelLower.includes("mistral") || modelLower.includes("phi")) {
420
+ additionalOptions.push({
421
+ name: SharedOptions.top_k,
422
+ type: OptionType.numeric,
423
+ min: 1,
424
+ integer: true,
425
+ step: 1,
426
+ description: "Limits token sampling to the top k tokens"
427
+ });
428
+ }
429
+
430
+ // Add penalty options for certain models
431
+ if (modelLower.includes("claude") || modelLower.includes("jamba") || modelLower.includes("cohere")) {
432
+ additionalOptions.push(
433
+ {
434
+ name: SharedOptions.presence_penalty,
435
+ type: OptionType.numeric,
436
+ min: -2.0,
437
+ max: 2.0,
438
+ step: 0.1,
439
+ description: "Penalize new tokens based on their presence in the text"
440
+ },
441
+ {
442
+ name: SharedOptions.frequency_penalty,
443
+ type: OptionType.numeric,
444
+ min: -2.0,
445
+ max: 2.0,
446
+ step: 0.1,
447
+ description: "Penalize new tokens based on their frequency in the text"
448
+ }
449
+ );
450
+ }
451
+ // Add seed option for certain models
452
+ if (modelLower.includes("mistral") || modelLower.includes("phi") || modelLower.includes("gemini")) {
453
+ additionalOptions.push({
454
+ name: SharedOptions.seed,
455
+ type: OptionType.numeric,
456
+ integer: true,
457
+ description: "Random seed for reproducible generation"
458
+ });
459
+ }
460
+ return {
461
+ _option_id: "azure-foundry-text",
462
+ options: [
463
+ ...baseOptions,
464
+ ...additionalOptions,
465
+ ...visionOptions
466
+ ]
467
+ };
468
+ }
469
+
470
+ function isVisionModel(modelLower: string): boolean {
471
+ return modelLower.includes("gpt-4o") ||
472
+ modelLower.includes("gpt-4-turbo") ||
473
+ modelLower.includes("claude-3") ||
474
+ modelLower.includes("llama-3.2") ||
475
+ modelLower.includes("llama-4") ||
476
+ modelLower.includes("gemini") ||
477
+ isO1Full(modelLower);
478
+ }
479
+
480
+ function isO1Full(modelLower: string): boolean {
481
+ if (modelLower.includes("o1")) {
482
+ return !modelLower.includes("mini") && !modelLower.includes("preview");
483
+ }
484
+ return false;
485
+ }
package/src/options.ts CHANGED
@@ -3,18 +3,24 @@ import { getGroqOptions } from "./options/groq.js";
3
3
  import { getOpenAiOptions } from "./options/openai.js";
4
4
  import { getVertexAiOptions } from "./options/vertexai.js";
5
5
  import { textOptionsFallback } from "./options/fallback.js";
6
- import { ModelOptionsInfo, ModelOptions } from "./types.js";
6
+ import { ModelOptionsInfo, ModelOptions, Providers } from "./types.js";
7
+ import { getAzureFoundryOptions } from "./options/azure_foundry.js";
7
8
 
8
- export function getOptions(model: string, provider?: string, options?: ModelOptions): ModelOptionsInfo {
9
- switch (provider?.toLowerCase()) {
10
- case "bedrock":
11
- return getBedrockOptions(model ?? "", options);
12
- case "vertexai":
13
- return getVertexAiOptions(model ?? "", options);
14
- case "openai":
15
- return getOpenAiOptions(model ?? "", options);
16
- case "groq":
17
- return getGroqOptions(model ?? "", options);
9
+ export function getOptions(model: string, provider?: string | Providers, options?: ModelOptions): ModelOptionsInfo {
10
+ if(!provider) {
11
+ return textOptionsFallback;
12
+ }
13
+ switch (provider.toLowerCase()) {
14
+ case Providers.bedrock:
15
+ return getBedrockOptions(model, options);
16
+ case Providers.vertexai:
17
+ return getVertexAiOptions(model, options);
18
+ case Providers.openai:
19
+ return getOpenAiOptions(model, options);
20
+ case Providers.groq:
21
+ return getGroqOptions(model, options);
22
+ case Providers.azure_foundry:
23
+ return getAzureFoundryOptions(model, options);
18
24
  default:
19
25
  return textOptionsFallback;
20
26
  }
package/src/types.ts CHANGED
@@ -4,6 +4,119 @@ import { GroqOptions } from './options/groq.js';
4
4
  import { OpenAiOptions } from './options/openai.js';
5
5
  import { VertexAIOptions } from './options/vertexai.js';
6
6
 
7
+ // ============== Provider details ===============
8
+
9
+ export enum Providers {
10
+ openai = 'openai',
11
+ azure_openai = 'azure_openai',
12
+ azure_foundry = 'azure_foundry',
13
+ huggingface_ie = 'huggingface_ie',
14
+ replicate = 'replicate',
15
+ bedrock = 'bedrock',
16
+ vertexai = 'vertexai',
17
+ togetherai = 'togetherai',
18
+ mistralai = 'mistralai',
19
+ groq = 'groq',
20
+ watsonx = 'watsonx'
21
+ }
22
+
23
+ export interface ProviderParams {
24
+ id: Providers;
25
+ name: string;
26
+ requiresApiKey: boolean;
27
+ requiresEndpointUrl: boolean;
28
+ endpointPlaceholder?: string;
29
+ supportSearch?: boolean;
30
+ }
31
+
32
+ export const ProviderList: Record<Providers, ProviderParams> = {
33
+ openai:
34
+ {
35
+ id: Providers.openai,
36
+ name: "OpenAI",
37
+ requiresApiKey: true,
38
+ requiresEndpointUrl: false,
39
+ supportSearch: false,
40
+ },
41
+ azure_openai:
42
+ {
43
+ id: Providers.azure_openai,
44
+ name: "Azure OpenAI",
45
+ requiresApiKey: false,
46
+ requiresEndpointUrl: true,
47
+ supportSearch: false,
48
+ },
49
+ azure_foundry:
50
+ {
51
+ id: Providers.azure_foundry,
52
+ name: "Azure Foundry",
53
+ requiresApiKey: true,
54
+ requiresEndpointUrl: true,
55
+ supportSearch: false,
56
+ },
57
+ huggingface_ie:
58
+ {
59
+ id: Providers.huggingface_ie,
60
+ name: "HuggingFace Inference Endpoint",
61
+ requiresApiKey: true,
62
+ requiresEndpointUrl: true,
63
+ },
64
+ replicate:
65
+ {
66
+ id: Providers.replicate,
67
+ name: "Repicate",
68
+ requiresApiKey: true,
69
+ requiresEndpointUrl: false,
70
+ supportSearch: true,
71
+ },
72
+ bedrock:
73
+ {
74
+ id: Providers.bedrock,
75
+ name: "AWS Bedrock",
76
+ requiresApiKey: false,
77
+ requiresEndpointUrl: false,
78
+ endpointPlaceholder: "region name (eg. us-east-1)",
79
+ supportSearch: false,
80
+ },
81
+ vertexai: {
82
+ id: Providers.vertexai,
83
+ name: "Google Vertex AI",
84
+ requiresApiKey: false,
85
+ requiresEndpointUrl: false,
86
+ supportSearch: false,
87
+ },
88
+ togetherai: {
89
+ id: Providers.togetherai,
90
+ name: "Together AI",
91
+ requiresApiKey: false,
92
+ requiresEndpointUrl: false,
93
+ supportSearch: false,
94
+ },
95
+ mistralai: {
96
+ id: Providers.mistralai,
97
+ name: "Mistral AI",
98
+ requiresApiKey: false,
99
+ requiresEndpointUrl: false,
100
+ supportSearch: false,
101
+ },
102
+ groq: {
103
+ id: Providers.groq,
104
+ name: "Groq Cloud",
105
+ requiresApiKey: false,
106
+ requiresEndpointUrl: false,
107
+ supportSearch: false,
108
+ },
109
+ watsonx: {
110
+ id: Providers.watsonx,
111
+ name: "IBM WatsonX",
112
+ requiresApiKey: true,
113
+ requiresEndpointUrl: true,
114
+ supportSearch: false
115
+ },
116
+ }
117
+
118
+ // ============== Embeddings ===============
119
+
7
120
  export interface EmbeddingsOptions {
8
121
  /**
9
122
  * The text to generate the embeddings for. One of text or image is required.