@ai-sdk/fal 3.0.0-beta.3 → 3.0.0-beta.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,24 +1,24 @@
1
1
  import {
2
- Experimental_VideoModelV3,
3
- ImageModelV3,
4
2
  NoSuchModelError,
5
- ProviderV3,
6
- SpeechModelV3,
7
- TranscriptionModelV3,
3
+ type Experimental_VideoModelV4,
4
+ type ImageModelV4,
5
+ type ProviderV4,
6
+ type SpeechModelV4,
7
+ type TranscriptionModelV4,
8
8
  } from '@ai-sdk/provider';
9
- import type { FetchFunction } from '@ai-sdk/provider-utils';
10
9
  import {
11
10
  withoutTrailingSlash,
12
11
  withUserAgentSuffix,
12
+ type FetchFunction,
13
13
  } from '@ai-sdk/provider-utils';
14
14
  import { FalImageModel } from './fal-image-model';
15
- import { FalImageModelId } from './fal-image-settings';
16
- import { FalTranscriptionModelId } from './fal-transcription-options';
15
+ import type { FalImageModelId } from './fal-image-settings';
16
+ import type { FalTranscriptionModelId } from './fal-transcription-options';
17
17
  import { FalTranscriptionModel } from './fal-transcription-model';
18
- import { FalSpeechModelId } from './fal-speech-settings';
18
+ import type { FalSpeechModelId } from './fal-speech-settings';
19
19
  import { FalSpeechModel } from './fal-speech-model';
20
20
  import { FalVideoModel } from './fal-video-model';
21
- import { FalVideoModelId } from './fal-video-settings';
21
+ import type { FalVideoModelId } from './fal-video-settings';
22
22
  import { VERSION } from './version';
23
23
 
24
24
  export interface FalProviderSettings {
@@ -46,36 +46,36 @@ export interface FalProviderSettings {
46
46
  fetch?: FetchFunction;
47
47
  }
48
48
 
49
- export interface FalProvider extends ProviderV3 {
49
+ export interface FalProvider extends ProviderV4 {
50
50
  /**
51
51
  * Creates a model for image generation.
52
52
  */
53
- image(modelId: FalImageModelId): ImageModelV3;
53
+ image(modelId: FalImageModelId): ImageModelV4;
54
54
 
55
55
  /**
56
56
  * Creates a model for image generation.
57
57
  */
58
- imageModel(modelId: FalImageModelId): ImageModelV3;
58
+ imageModel(modelId: FalImageModelId): ImageModelV4;
59
59
 
60
60
  /**
61
61
  * Creates a model for transcription.
62
62
  */
63
- transcription(modelId: FalTranscriptionModelId): TranscriptionModelV3;
63
+ transcription(modelId: FalTranscriptionModelId): TranscriptionModelV4;
64
64
 
65
65
  /**
66
66
  * Creates a model for video generation.
67
67
  */
68
- video(modelId: FalVideoModelId): Experimental_VideoModelV3;
68
+ video(modelId: FalVideoModelId): Experimental_VideoModelV4;
69
69
 
70
70
  /**
71
71
  * Creates a model for video generation.
72
72
  */
73
- videoModel(modelId: FalVideoModelId): Experimental_VideoModelV3;
73
+ videoModel(modelId: FalVideoModelId): Experimental_VideoModelV4;
74
74
 
75
75
  /**
76
76
  * Creates a model for speech generation.
77
77
  */
78
- speech(modelId: FalSpeechModelId): SpeechModelV3;
78
+ speech(modelId: FalSpeechModelId): SpeechModelV4;
79
79
 
80
80
  /**
81
81
  * @deprecated Use `embeddingModel` instead.
@@ -182,7 +182,7 @@ export function createFal(options: FalProviderSettings = {}): FalProvider {
182
182
  };
183
183
 
184
184
  return {
185
- specificationVersion: 'v3' as const,
185
+ specificationVersion: 'v4' as const,
186
186
  imageModel: createImageModel,
187
187
  image: createImageModel,
188
188
  languageModel: (modelId: string) => {
@@ -1,4 +1,4 @@
1
- import { SpeechModelV3, SharedV3Warning } from '@ai-sdk/provider';
1
+ import type { SpeechModelV4, SharedV4Warning } from '@ai-sdk/provider';
2
2
  import {
3
3
  combineHeaders,
4
4
  createBinaryResponseHandler,
@@ -7,12 +7,15 @@ import {
7
7
  getFromApi,
8
8
  parseProviderOptions,
9
9
  postJsonToApi,
10
+ serializeModelOptions,
11
+ WORKFLOW_SERIALIZE,
12
+ WORKFLOW_DESERIALIZE,
10
13
  } from '@ai-sdk/provider-utils';
11
14
  import { z } from 'zod/v4';
12
- import { FalConfig } from './fal-config';
15
+ import type { FalConfig } from './fal-config';
13
16
  import { falFailedResponseHandler } from './fal-error';
14
17
  import { FAL_EMOTIONS, FAL_LANGUAGE_BOOSTS } from './fal-api-types';
15
- import { FalSpeechModelId } from './fal-speech-settings';
18
+ import type { FalSpeechModelId } from './fal-speech-settings';
16
19
 
17
20
  const falSpeechModelOptionsSchema = z.looseObject({
18
21
  voice_setting: z
@@ -39,13 +42,27 @@ interface FalSpeechModelConfig extends FalConfig {
39
42
  };
40
43
  }
41
44
 
42
- export class FalSpeechModel implements SpeechModelV3 {
43
- readonly specificationVersion = 'v3';
45
+ export class FalSpeechModel implements SpeechModelV4 {
46
+ readonly specificationVersion = 'v4';
44
47
 
45
48
  get provider(): string {
46
49
  return this.config.provider;
47
50
  }
48
51
 
52
+ static [WORKFLOW_SERIALIZE](model: FalSpeechModel) {
53
+ return serializeModelOptions({
54
+ modelId: model.modelId,
55
+ config: model.config,
56
+ });
57
+ }
58
+
59
+ static [WORKFLOW_DESERIALIZE](options: {
60
+ modelId: FalSpeechModelId;
61
+ config: FalSpeechModelConfig;
62
+ }) {
63
+ return new FalSpeechModel(options.modelId, options.config);
64
+ }
65
+
49
66
  constructor(
50
67
  readonly modelId: FalSpeechModelId,
51
68
  private readonly config: FalSpeechModelConfig,
@@ -58,8 +75,8 @@ export class FalSpeechModel implements SpeechModelV3 {
58
75
  speed,
59
76
  language,
60
77
  providerOptions,
61
- }: Parameters<SpeechModelV3['doGenerate']>[0]) {
62
- const warnings: SharedV3Warning[] = [];
78
+ }: Parameters<SpeechModelV4['doGenerate']>[0]) {
79
+ const warnings: SharedV4Warning[] = [];
63
80
 
64
81
  const falOptions = await parseProviderOptions({
65
82
  provider: 'fal',
@@ -98,8 +115,8 @@ export class FalSpeechModel implements SpeechModelV3 {
98
115
  }
99
116
 
100
117
  async doGenerate(
101
- options: Parameters<SpeechModelV3['doGenerate']>[0],
102
- ): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>> {
118
+ options: Parameters<SpeechModelV4['doGenerate']>[0],
119
+ ): Promise<Awaited<ReturnType<SpeechModelV4['doGenerate']>>> {
103
120
  const currentDate = this.config._internal?.currentDate?.() ?? new Date();
104
121
  const { requestBody, warnings } = await this.getArgs(options);
105
122
 
@@ -112,7 +129,7 @@ export class FalSpeechModel implements SpeechModelV3 {
112
129
  path: `https://fal.run/${this.modelId}`,
113
130
  modelId: this.modelId,
114
131
  }),
115
- headers: combineHeaders(this.config.headers(), options.headers),
132
+ headers: combineHeaders(this.config.headers?.(), options.headers),
116
133
  body: requestBody,
117
134
  failedResponseHandler: falFailedResponseHandler,
118
135
  successfulResponseHandler: createJsonResponseHandler(
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  AISDKError,
3
- TranscriptionModelV3,
4
- SharedV3Warning,
3
+ type TranscriptionModelV4,
4
+ type SharedV4Warning,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  combineHeaders,
@@ -12,12 +12,15 @@ import {
12
12
  getFromApi,
13
13
  parseProviderOptions,
14
14
  postJsonToApi,
15
+ serializeModelOptions,
16
+ WORKFLOW_SERIALIZE,
17
+ WORKFLOW_DESERIALIZE,
15
18
  } from '@ai-sdk/provider-utils';
16
19
  import { z } from 'zod/v4';
17
- import { FalConfig } from './fal-config';
20
+ import type { FalConfig } from './fal-config';
18
21
  import { falErrorDataSchema, falFailedResponseHandler } from './fal-error';
19
- import { FalTranscriptionModelId } from './fal-transcription-options';
20
- import { FalTranscriptionAPITypes } from './fal-api-types';
22
+ import type { FalTranscriptionModelId } from './fal-transcription-options';
23
+ import type { FalTranscriptionAPITypes } from './fal-api-types';
21
24
 
22
25
  // https://fal.ai/models/fal-ai/whisper/api?platform=http
23
26
  const falTranscriptionModelOptionsSchema = z.object({
@@ -67,13 +70,27 @@ interface FalTranscriptionModelConfig extends FalConfig {
67
70
  };
68
71
  }
69
72
 
70
- export class FalTranscriptionModel implements TranscriptionModelV3 {
71
- readonly specificationVersion = 'v3';
73
+ export class FalTranscriptionModel implements TranscriptionModelV4 {
74
+ readonly specificationVersion = 'v4';
72
75
 
73
76
  get provider(): string {
74
77
  return this.config.provider;
75
78
  }
76
79
 
80
+ static [WORKFLOW_SERIALIZE](model: FalTranscriptionModel) {
81
+ return serializeModelOptions({
82
+ modelId: model.modelId,
83
+ config: model.config,
84
+ });
85
+ }
86
+
87
+ static [WORKFLOW_DESERIALIZE](options: {
88
+ modelId: FalTranscriptionModelId;
89
+ config: FalTranscriptionModelConfig;
90
+ }) {
91
+ return new FalTranscriptionModel(options.modelId, options.config);
92
+ }
93
+
77
94
  constructor(
78
95
  readonly modelId: FalTranscriptionModelId,
79
96
  private readonly config: FalTranscriptionModelConfig,
@@ -81,8 +98,8 @@ export class FalTranscriptionModel implements TranscriptionModelV3 {
81
98
 
82
99
  private async getArgs({
83
100
  providerOptions,
84
- }: Parameters<TranscriptionModelV3['doGenerate']>[0]) {
85
- const warnings: SharedV3Warning[] = [];
101
+ }: Parameters<TranscriptionModelV4['doGenerate']>[0]) {
102
+ const warnings: SharedV4Warning[] = [];
86
103
 
87
104
  // Parse provider options
88
105
  const falOptions = await parseProviderOptions({
@@ -121,8 +138,8 @@ export class FalTranscriptionModel implements TranscriptionModelV3 {
121
138
  }
122
139
 
123
140
  async doGenerate(
124
- options: Parameters<TranscriptionModelV3['doGenerate']>[0],
125
- ): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>> {
141
+ options: Parameters<TranscriptionModelV4['doGenerate']>[0],
142
+ ): Promise<Awaited<ReturnType<TranscriptionModelV4['doGenerate']>>> {
126
143
  const currentDate = this.config._internal?.currentDate?.() ?? new Date();
127
144
  const { body, warnings } = await this.getArgs(options);
128
145
 
@@ -138,7 +155,7 @@ export class FalTranscriptionModel implements TranscriptionModelV3 {
138
155
  path: `https://queue.fal.run/fal-ai/${this.modelId}`,
139
156
  modelId: this.modelId,
140
157
  }),
141
- headers: combineHeaders(this.config.headers(), options.headers),
158
+ headers: combineHeaders(this.config.headers?.(), options.headers),
142
159
  body: {
143
160
  ...body,
144
161
  audio_url: audioUrl,
@@ -170,7 +187,7 @@ export class FalTranscriptionModel implements TranscriptionModelV3 {
170
187
  path: `https://queue.fal.run/fal-ai/${this.modelId}/requests/${queueResponse.request_id}`,
171
188
  modelId: this.modelId,
172
189
  }),
173
- headers: combineHeaders(this.config.headers(), options.headers),
190
+ headers: combineHeaders(this.config.headers?.(), options.headers),
174
191
  failedResponseHandler: async ({
175
192
  requestBodyValues,
176
193
  response,
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  AISDKError,
3
- type Experimental_VideoModelV3,
4
- type SharedV3Warning,
3
+ type Experimental_VideoModelV4,
4
+ type SharedV4Warning,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  combineHeaders,
@@ -63,8 +63,8 @@ interface FalVideoModelConfig extends FalConfig {
63
63
  };
64
64
  }
65
65
 
66
- export class FalVideoModel implements Experimental_VideoModelV3 {
67
- readonly specificationVersion = 'v3';
66
+ export class FalVideoModel implements Experimental_VideoModelV4 {
67
+ readonly specificationVersion = 'v4';
68
68
  readonly maxVideosPerCall = 1; // FAL video models support 1 video at a time
69
69
 
70
70
  get provider(): string {
@@ -81,10 +81,10 @@ export class FalVideoModel implements Experimental_VideoModelV3 {
81
81
  ) {}
82
82
 
83
83
  async doGenerate(
84
- options: Parameters<Experimental_VideoModelV3['doGenerate']>[0],
85
- ): Promise<Awaited<ReturnType<Experimental_VideoModelV3['doGenerate']>>> {
84
+ options: Parameters<Experimental_VideoModelV4['doGenerate']>[0],
85
+ ): Promise<Awaited<ReturnType<Experimental_VideoModelV4['doGenerate']>>> {
86
86
  const currentDate = this.config._internal?.currentDate?.() ?? new Date();
87
- const warnings: SharedV3Warning[] = [];
87
+ const warnings: SharedV4Warning[] = [];
88
88
 
89
89
  const falOptions = (await parseProviderOptions({
90
90
  provider: 'fal',
@@ -158,7 +158,7 @@ export class FalVideoModel implements Experimental_VideoModelV3 {
158
158
  path: `https://queue.fal.run/fal-ai/${this.normalizedModelId}`,
159
159
  modelId: this.modelId,
160
160
  }),
161
- headers: combineHeaders(this.config.headers(), options.headers),
161
+ headers: combineHeaders(this.config.headers?.(), options.headers),
162
162
  body,
163
163
  failedResponseHandler: falFailedResponseHandler,
164
164
  successfulResponseHandler:
@@ -189,7 +189,7 @@ export class FalVideoModel implements Experimental_VideoModelV3 {
189
189
  path: responseUrl,
190
190
  modelId: this.modelId,
191
191
  }),
192
- headers: combineHeaders(this.config.headers(), options.headers),
192
+ headers: combineHeaders(this.config.headers?.(), options.headers),
193
193
  failedResponseHandler: async ({
194
194
  response,
195
195
  url,
package/dist/index.d.mts DELETED
@@ -1,156 +0,0 @@
1
- import { ProviderV3, ImageModelV3, TranscriptionModelV3, Experimental_VideoModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
- import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { FetchFunction, InferSchema } from '@ai-sdk/provider-utils';
4
- import { z } from 'zod/v4';
5
-
6
- type FalImageModelId = 'fal-ai/aura-sr' | 'fal-ai/bria/background/remove' | 'fal-ai/bria/eraser' | 'fal-ai/bria/product-shot' | 'fal-ai/bria/reimagine' | 'bria/text-to-image/3.2' | 'fal-ai/bria/text-to-image/base' | 'fal-ai/bria/text-to-image/fast' | 'fal-ai/bria/text-to-image/hd' | 'fal-ai/bytedance/dreamina/v3.1/text-to-image' | 'fal-ai/ccsr' | 'fal-ai/clarity-upscaler' | 'fal-ai/creative-upscaler' | 'fal-ai/esrgan' | 'fal-ai/flux-general' | 'fal-ai/flux-general/differential-diffusion' | 'fal-ai/flux-general/image-to-image' | 'fal-ai/flux-general/inpainting' | 'fal-ai/flux-general/rf-inversion' | 'fal-ai/flux-kontext-lora/text-to-image' | 'fal-ai/flux-lora' | 'fal-ai/flux-lora/image-to-image' | 'fal-ai/flux-lora/inpainting' | 'fal-ai/flux-pro/kontext' | 'fal-ai/flux-pro/kontext/max' | 'fal-ai/flux-pro/v1.1' | 'fal-ai/flux-pro/v1.1-ultra' | 'fal-ai/flux-pro/v1.1-ultra-finetuned' | 'fal-ai/flux-pro/v1.1-ultra/redux' | 'fal-ai/flux-pro/v1.1/redux' | 'fal-ai/flux/dev' | 'fal-ai/flux/dev/image-to-image' | 'fal-ai/flux/dev/redux' | 'fal-ai/flux/krea' | 'fal-ai/flux/krea/image-to-image' | 'fal-ai/flux/krea/redux' | 'fal-ai/flux/schnell' | 'fal-ai/flux/schnell/redux' | 'fal-ai/ideogram/character' | 'fal-ai/ideogram/character/edit' | 'fal-ai/ideogram/character/remix' | 'fal-ai/imagen4/preview' | 'fal-ai/luma-photon' | 'fal-ai/luma-photon/flash' | 'fal-ai/object-removal' | 'fal-ai/omnigen-v2' | 'fal-ai/qwen-image' | 'fal-ai/recraft/v3/text-to-image' | 'fal-ai/recraft/v3/image-to-image' | 'fal-ai/sana/sprint' | 'fal-ai/sana/v1.5/4.8b' | 'fal-ai/sana/v1.5/1.6b' | 'fal-ai/sky-raccoon' | 'fal-ai/wan/v2.2-5b/text-to-image' | 'fal-ai/wan/v2.2-a14b/text-to-image' | 'fal-ai/fashn/tryon/v1.6' | (string & {});
7
-
8
- type FalTranscriptionModelId = 'whisper' | 'wizper' | (string & {});
9
-
10
- type FalSpeechModelId = 'fal-ai/minimax/voice-clone' | 'fal-ai/minimax/voice-design' | 'fal-ai/dia-tts/voice-clone' | 'fal-ai/minimax/speech-02-hd' | 'fal-ai/minimax/speech-02-turbo' | 'fal-ai/dia-tts' | 'resemble-ai/chatterboxhd/text-to-speech' | (string & {});
11
-
12
- type FalVideoModelId = 'luma-dream-machine' | 'luma-ray-2' | 'luma-ray-2-flash' | 'minimax-video' | 'minimax-video-01' | 'hunyuan-video' | (string & {});
13
-
14
- interface FalProviderSettings {
15
- /**
16
- * fal.ai API key. Default value is taken from the `FAL_API_KEY` environment
17
- * variable, falling back to `FAL_KEY`.
18
- */
19
- apiKey?: string;
20
- /**
21
- * Base URL for the API calls.
22
- * The default prefix is `https://fal.run`.
23
- */
24
- baseURL?: string;
25
- /**
26
- * Custom headers to include in the requests.
27
- */
28
- headers?: Record<string, string>;
29
- /**
30
- * Custom fetch implementation. You can use it as a middleware to intercept
31
- * requests, or to provide a custom fetch implementation for e.g. testing.
32
- */
33
- fetch?: FetchFunction;
34
- }
35
- interface FalProvider extends ProviderV3 {
36
- /**
37
- * Creates a model for image generation.
38
- */
39
- image(modelId: FalImageModelId): ImageModelV3;
40
- /**
41
- * Creates a model for image generation.
42
- */
43
- imageModel(modelId: FalImageModelId): ImageModelV3;
44
- /**
45
- * Creates a model for transcription.
46
- */
47
- transcription(modelId: FalTranscriptionModelId): TranscriptionModelV3;
48
- /**
49
- * Creates a model for video generation.
50
- */
51
- video(modelId: FalVideoModelId): Experimental_VideoModelV3;
52
- /**
53
- * Creates a model for video generation.
54
- */
55
- videoModel(modelId: FalVideoModelId): Experimental_VideoModelV3;
56
- /**
57
- * Creates a model for speech generation.
58
- */
59
- speech(modelId: FalSpeechModelId): SpeechModelV3;
60
- /**
61
- * @deprecated Use `embeddingModel` instead.
62
- */
63
- textEmbeddingModel(modelId: string): never;
64
- }
65
- /**
66
- * Create a fal.ai provider instance.
67
- */
68
- declare function createFal(options?: FalProviderSettings): FalProvider;
69
- /**
70
- * Default fal.ai provider instance.
71
- */
72
- declare const fal: FalProvider;
73
-
74
- declare const falImageModelOptionsSchema: _ai_sdk_provider_utils.LazySchema<Record<string, unknown>>;
75
- type FalImageModelOptions = InferSchema<typeof falImageModelOptionsSchema>;
76
-
77
- declare const falSpeechModelOptionsSchema: z.ZodObject<{
78
- voice_setting: z.ZodOptional<z.ZodNullable<z.ZodObject<{
79
- speed: z.ZodOptional<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
80
- vol: z.ZodOptional<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
81
- voice_id: z.ZodOptional<z.ZodOptional<z.ZodNullable<z.ZodString>>>;
82
- pitch: z.ZodOptional<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
83
- english_normalization: z.ZodOptional<z.ZodOptional<z.ZodNullable<z.ZodBoolean>>>;
84
- emotion: z.ZodOptional<z.ZodOptional<z.ZodNullable<z.ZodEnum<{
85
- happy: "happy";
86
- sad: "sad";
87
- angry: "angry";
88
- fearful: "fearful";
89
- disgusted: "disgusted";
90
- surprised: "surprised";
91
- neutral: "neutral";
92
- }>>>>;
93
- }, z.core.$strip>>>;
94
- audio_setting: z.ZodOptional<z.ZodNullable<z.ZodRecord<z.ZodString, z.ZodUnknown>>>;
95
- language_boost: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
96
- Chinese: "Chinese";
97
- "Chinese,Yue": "Chinese,Yue";
98
- English: "English";
99
- Arabic: "Arabic";
100
- Russian: "Russian";
101
- Spanish: "Spanish";
102
- French: "French";
103
- Portuguese: "Portuguese";
104
- German: "German";
105
- Turkish: "Turkish";
106
- Dutch: "Dutch";
107
- Ukrainian: "Ukrainian";
108
- Vietnamese: "Vietnamese";
109
- Indonesian: "Indonesian";
110
- Japanese: "Japanese";
111
- Italian: "Italian";
112
- Korean: "Korean";
113
- Thai: "Thai";
114
- Polish: "Polish";
115
- Romanian: "Romanian";
116
- Greek: "Greek";
117
- Czech: "Czech";
118
- Finnish: "Finnish";
119
- Hindi: "Hindi";
120
- auto: "auto";
121
- }>>>;
122
- pronunciation_dict: z.ZodOptional<z.ZodNullable<z.ZodRecord<z.ZodString, z.ZodString>>>;
123
- }, z.core.$loose>;
124
- type FalSpeechModelOptions = z.infer<typeof falSpeechModelOptionsSchema>;
125
-
126
- declare const falTranscriptionModelOptionsSchema: z.ZodObject<{
127
- language: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodEnum<{
128
- en: "en";
129
- }>, z.ZodString]>>>>;
130
- diarize: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodBoolean>>>;
131
- chunkLevel: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodEnum<{
132
- segment: "segment";
133
- word: "word";
134
- }>>>>;
135
- version: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodEnum<{
136
- 3: "3";
137
- }>>>>;
138
- batchSize: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
139
- numSpeakers: z.ZodOptional<z.ZodNullable<z.ZodNullable<z.ZodNumber>>>;
140
- }, z.core.$strip>;
141
- type FalTranscriptionModelOptions = z.infer<typeof falTranscriptionModelOptionsSchema>;
142
-
143
- type FalVideoModelOptions = {
144
- loop?: boolean | null;
145
- motionStrength?: number | null;
146
- pollIntervalMs?: number | null;
147
- pollTimeoutMs?: number | null;
148
- resolution?: string | null;
149
- negativePrompt?: string | null;
150
- promptOptimizer?: boolean | null;
151
- [key: string]: unknown;
152
- };
153
-
154
- declare const VERSION: string;
155
-
156
- export { type FalImageModelOptions, type FalImageModelOptions as FalImageProviderOptions, type FalProvider, type FalProviderSettings, type FalSpeechModelOptions, type FalTranscriptionModelOptions, type FalVideoModelId, type FalVideoModelOptions, type FalVideoModelOptions as FalVideoProviderOptions, VERSION, createFal, fal };