@imgly/plugin-ai-video-generation-web 0.2.17 → 1.69.0-nightly.20260130
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +2 -231
- package/README.md +4 -1283
- package/dist/.tsbuildinfo +1 -1
- package/dist/eachlabs/index.mjs +2 -2
- package/dist/eachlabs/index.mjs.map +4 -4
- package/dist/fal-ai/Veo31FastFirstLastFrameToVideo.d.ts +1 -1
- package/dist/fal-ai/Veo31FirstLastFrameToVideo.d.ts +2 -2
- package/dist/fal-ai/index.mjs +10 -10
- package/dist/fal-ai/index.mjs.map +4 -4
- package/dist/index.mjs +4 -4
- package/dist/index.mjs.map +4 -4
- package/dist/runware/index.mjs +1 -1
- package/dist/runware/index.mjs.map +4 -4
- package/package.json +4 -8
package/README.md
CHANGED
|
@@ -1,1288 +1,9 @@
|
|
|
1
|
-
#
|
|
1
|
+
# @imgly/plugin-ai-video-generation-web
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
AI video generation plugin for the CE.SDK editor
|
|
4
4
|
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
The `@imgly/plugin-ai-video-generation-web` package enables users to generate videos using AI directly within CreativeEditor SDK. This shipped provider leverages the [fal.ai](https://fal.ai) platform to provide high-quality video generation from text-to-video and image-to-video transformations.
|
|
8
|
-
|
|
9
|
-
Features include:
|
|
10
|
-
|
|
11
|
-
- Text-to-video generation
|
|
12
|
-
- Image-to-video transformations
|
|
13
|
-
- Multiple model options
|
|
14
|
-
- Automatic history tracking
|
|
15
|
-
- Canvas menu quick actions
|
|
16
|
-
- Seamless integration with CreativeEditor SDK
|
|
17
|
-
|
|
18
|
-
## Installation
|
|
19
|
-
|
|
20
|
-
```bash
|
|
21
|
-
npm install @imgly/plugin-ai-video-generation-web
|
|
22
|
-
```
|
|
23
|
-
|
|
24
|
-
## Usage
|
|
25
|
-
|
|
26
|
-
### Basic Configuration
|
|
27
|
-
|
|
28
|
-
To use the plugin, import it and configure it with your preferred providers:
|
|
29
|
-
|
|
30
|
-
#### Single Provider Configuration
|
|
31
|
-
|
|
32
|
-
```typescript
|
|
33
|
-
import CreativeEditorSDK from '@cesdk/cesdk-js';
|
|
34
|
-
import VideoGeneration from '@imgly/plugin-ai-video-generation-web';
|
|
35
|
-
import FalAiVideo from '@imgly/plugin-ai-video-generation-web/fal-ai';
|
|
36
|
-
// For Runware providers
|
|
37
|
-
import RunwareVideo from '@imgly/plugin-ai-video-generation-web/runware';
|
|
38
|
-
|
|
39
|
-
// Initialize CreativeEditor SDK
|
|
40
|
-
CreativeEditorSDK.create(domElement, {
|
|
41
|
-
license: 'your-license-key'
|
|
42
|
-
// Other configuration options...
|
|
43
|
-
}).then(async (cesdk) => {
|
|
44
|
-
// Add the video generation plugin
|
|
45
|
-
cesdk.addPlugin(
|
|
46
|
-
VideoGeneration({
|
|
47
|
-
// Text-to-video provider
|
|
48
|
-
text2video: FalAiVideo.MinimaxVideo01Live({
|
|
49
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
50
|
-
headers: {
|
|
51
|
-
'x-custom-header': 'value',
|
|
52
|
-
'x-client-version': '1.0.0'
|
|
53
|
-
},
|
|
54
|
-
// Optional: Configure default property values
|
|
55
|
-
properties: {
|
|
56
|
-
duration: { default: 5 }, // Default duration in seconds
|
|
57
|
-
aspect_ratio: { default: '16:9' } // Default aspect ratio
|
|
58
|
-
}
|
|
59
|
-
}),
|
|
60
|
-
|
|
61
|
-
// Image-to-video provider (optional)
|
|
62
|
-
image2video: FalAiVideo.MinimaxVideo01LiveImageToVideo({
|
|
63
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
64
|
-
headers: {
|
|
65
|
-
'x-custom-header': 'value',
|
|
66
|
-
'x-client-version': '1.0.0'
|
|
67
|
-
}
|
|
68
|
-
}),
|
|
69
|
-
|
|
70
|
-
// Optional configuration
|
|
71
|
-
debug: false,
|
|
72
|
-
dryRun: false
|
|
73
|
-
})
|
|
74
|
-
);
|
|
75
|
-
});
|
|
76
|
-
```
|
|
77
|
-
|
|
78
|
-
#### Multiple Providers Configuration
|
|
79
|
-
|
|
80
|
-
You can configure multiple providers for each generation type, and users will see a selection box to choose between them:
|
|
81
|
-
|
|
82
|
-
```typescript
|
|
83
|
-
import CreativeEditorSDK from '@cesdk/cesdk-js';
|
|
84
|
-
import VideoGeneration from '@imgly/plugin-ai-video-generation-web';
|
|
85
|
-
import FalAiVideo from '@imgly/plugin-ai-video-generation-web/fal-ai';
|
|
86
|
-
|
|
87
|
-
// Initialize CreativeEditor SDK
|
|
88
|
-
CreativeEditorSDK.create(domElement, {
|
|
89
|
-
license: 'your-license-key'
|
|
90
|
-
// Other configuration options...
|
|
91
|
-
}).then(async (cesdk) => {
|
|
92
|
-
// Add the video generation plugin with multiple providers
|
|
93
|
-
cesdk.addPlugin(
|
|
94
|
-
VideoGeneration({
|
|
95
|
-
// Multiple text-to-video providers
|
|
96
|
-
text2video: [
|
|
97
|
-
FalAiVideo.MinimaxVideo01Live({
|
|
98
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
99
|
-
headers: {
|
|
100
|
-
'x-custom-header': 'value',
|
|
101
|
-
'x-client-version': '1.0.0'
|
|
102
|
-
}
|
|
103
|
-
}),
|
|
104
|
-
FalAiVideo.PixverseV35TextToVideo({
|
|
105
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
106
|
-
headers: {
|
|
107
|
-
'x-custom-header': 'value',
|
|
108
|
-
'x-client-version': '1.0.0'
|
|
109
|
-
}
|
|
110
|
-
})
|
|
111
|
-
],
|
|
112
|
-
|
|
113
|
-
// Image-to-video provider (optional)
|
|
114
|
-
image2video: FalAiVideo.MinimaxVideo01LiveImageToVideo({
|
|
115
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
116
|
-
headers: {
|
|
117
|
-
'x-custom-header': 'value',
|
|
118
|
-
'x-client-version': '1.0.0'
|
|
119
|
-
}
|
|
120
|
-
}),
|
|
121
|
-
|
|
122
|
-
// Optional configuration
|
|
123
|
-
debug: false,
|
|
124
|
-
dryRun: false
|
|
125
|
-
})
|
|
126
|
-
);
|
|
127
|
-
});
|
|
128
|
-
```
|
|
129
|
-
|
|
130
|
-
### Providers
|
|
131
|
-
|
|
132
|
-
The plugin comes with pre-configured providers for fal.ai models:
|
|
133
|
-
|
|
134
|
-
#### 1. MinimaxVideo01Live (Text-to-Video)
|
|
135
|
-
|
|
136
|
-
A model that generates videos based on text prompts:
|
|
137
|
-
|
|
138
|
-
```typescript
|
|
139
|
-
text2video: FalAiVideo.MinimaxVideo01Live({
|
|
140
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
141
|
-
headers: {
|
|
142
|
-
'x-custom-header': 'value',
|
|
143
|
-
'x-client-version': '1.0.0'
|
|
144
|
-
},
|
|
145
|
-
// Optional: Configure default property values
|
|
146
|
-
properties: {
|
|
147
|
-
prompt_optimizer: { default: true } // Enable automatic prompt enhancement
|
|
148
|
-
}
|
|
149
|
-
});
|
|
150
|
-
```
|
|
151
|
-
|
|
152
|
-
Key features:
|
|
153
|
-
|
|
154
|
-
- Generate videos from text descriptions
|
|
155
|
-
- Fixed output dimensions (1280×720)
|
|
156
|
-
- 5-second video duration
|
|
157
|
-
- Custom headers support for API requests
|
|
158
|
-
|
|
159
|
-
**Custom Translations:**
|
|
160
|
-
|
|
161
|
-
```typescript
|
|
162
|
-
cesdk.i18n.setTranslations({
|
|
163
|
-
en: {
|
|
164
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/minimax/video-01-live.property.prompt': 'Describe your Minimax video'
|
|
165
|
-
}
|
|
166
|
-
});
|
|
167
|
-
```
|
|
168
|
-
|
|
169
|
-
#### 2. MinimaxVideo01LiveImageToVideo (Image-to-Video)
|
|
170
|
-
|
|
171
|
-
A model that transforms still images into videos:
|
|
172
|
-
|
|
173
|
-
```typescript
|
|
174
|
-
image2video: FalAiVideo.MinimaxVideo01LiveImageToVideo({
|
|
175
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
176
|
-
headers: {
|
|
177
|
-
'x-custom-header': 'value',
|
|
178
|
-
'x-client-version': '1.0.0'
|
|
179
|
-
},
|
|
180
|
-
// Optional: Configure default property values
|
|
181
|
-
properties: {
|
|
182
|
-
prompt_optimizer: { default: true } // Enable automatic prompt enhancement
|
|
183
|
-
}
|
|
184
|
-
});
|
|
185
|
-
```
|
|
186
|
-
|
|
187
|
-
Key features:
|
|
188
|
-
|
|
189
|
-
- Transform existing images into videos
|
|
190
|
-
- Available through canvas quick actions
|
|
191
|
-
- Maintains original image aspect ratio
|
|
192
|
-
- Custom headers support for API requests
|
|
193
|
-
|
|
194
|
-
#### 3. MinimaxHailuo02StandardImageToVideo (Image-to-Video)
|
|
195
|
-
|
|
196
|
-
An advanced model that transforms still images into videos using Hailuo 02 Standard:
|
|
197
|
-
|
|
198
|
-
```typescript
|
|
199
|
-
image2video: FalAiVideo.MinimaxHailuo02StandardImageToVideo({
|
|
200
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
201
|
-
headers: {
|
|
202
|
-
'x-custom-header': 'value',
|
|
203
|
-
'x-client-version': '1.0.0'
|
|
204
|
-
},
|
|
205
|
-
// Optional: Configure default property values
|
|
206
|
-
properties: {
|
|
207
|
-
resolution: { default: '768P' }, // Options: '512P' (912×512), '768P' (1280×720)
|
|
208
|
-
duration: { default: 6 } // Duration in seconds (6 or 10)
|
|
209
|
-
}
|
|
210
|
-
});
|
|
211
|
-
```
|
|
212
|
-
|
|
213
|
-
Key features:
|
|
214
|
-
|
|
215
|
-
- Transform existing images into videos
|
|
216
|
-
- Available through canvas quick actions
|
|
217
|
-
- Selectable resolutions (512P: 912×512, 768P: 1280×720)
|
|
218
|
-
- Adjustable durations (6 or 10 seconds)
|
|
219
|
-
- Custom headers support for API requests
|
|
220
|
-
|
|
221
|
-
#### 4. PixverseV35TextToVideo (Text-to-Video)
|
|
222
|
-
|
|
223
|
-
An alternative text-to-video model:
|
|
224
|
-
|
|
225
|
-
```typescript
|
|
226
|
-
text2video: FalAiVideo.PixverseV35TextToVideo({
|
|
227
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
228
|
-
headers: {
|
|
229
|
-
'x-custom-header': 'value',
|
|
230
|
-
'x-client-version': '1.0.0'
|
|
231
|
-
},
|
|
232
|
-
// Optional: Configure default property values
|
|
233
|
-
properties: {
|
|
234
|
-
seed: { default: 42 } // Fixed seed for reproducible generation
|
|
235
|
-
}
|
|
236
|
-
});
|
|
237
|
-
```
|
|
238
|
-
|
|
239
|
-
Key features:
|
|
240
|
-
|
|
241
|
-
- Alternative text-to-video generation
|
|
242
|
-
- Custom headers support for API requests
|
|
243
|
-
|
|
244
|
-
#### 5. KlingVideoV21MasterTextToVideo (Text-to-Video)
|
|
245
|
-
|
|
246
|
-
A model based on KlingVideo V2.1 that generates videos from text prompts:
|
|
247
|
-
|
|
248
|
-
```typescript
|
|
249
|
-
text2video: FalAiVideo.KlingVideoV21MasterTextToVideo({
|
|
250
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
251
|
-
// Optional: Configure default property values
|
|
252
|
-
properties: {
|
|
253
|
-
aspect_ratio: { default: '16:9' }, // Options: '16:9', '9:16', '1:1'
|
|
254
|
-
duration: { default: '5s' } // Options: '5s', '10s'
|
|
255
|
-
}
|
|
256
|
-
});
|
|
257
|
-
```
|
|
258
|
-
|
|
259
|
-
Key features:
|
|
260
|
-
|
|
261
|
-
- Generate videos from text descriptions
|
|
262
|
-
- Adjustable aspect ratios (16:9, 9:16, 1:1)
|
|
263
|
-
- Selectable durations (5 s or 10 s)
|
|
264
|
-
- Adaptive resolution (height fixed at 720 px, width is calculated)
|
|
265
|
-
|
|
266
|
-
**Custom Translations:**
|
|
267
|
-
|
|
268
|
-
```typescript
|
|
269
|
-
cesdk.i18n.setTranslations({
|
|
270
|
-
en: {
|
|
271
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/kling-video/v2.1/master/text-to-video.property.prompt': 'Describe your KlingVideo',
|
|
272
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/kling-video/v2.1/master/text-to-video.property.aspect_ratio': 'Video Format',
|
|
273
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/kling-video/v2.1/master/text-to-video.property.duration': 'Video Length (seconds)'
|
|
274
|
-
}
|
|
275
|
-
});
|
|
276
|
-
```
|
|
277
|
-
|
|
278
|
-
#### 6. KlingVideoV21MasterImageToVideo (Image-to-Video)
|
|
279
|
-
|
|
280
|
-
A model that converts still images into videos using KlingVideo V2.1:
|
|
281
|
-
|
|
282
|
-
```typescript
|
|
283
|
-
image2video: FalAiVideo.KlingVideoV21MasterImageToVideo({
|
|
284
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
285
|
-
// Optional: Configure default property values
|
|
286
|
-
properties: {
|
|
287
|
-
duration: { default: '5s' } // Options: '5s', '10s'
|
|
288
|
-
}
|
|
289
|
-
});
|
|
290
|
-
```
|
|
291
|
-
|
|
292
|
-
Key features:
|
|
293
|
-
|
|
294
|
-
- Transform existing images into videos
|
|
295
|
-
- Maintains original image aspect ratio (fallback to 1280 × 720)
|
|
296
|
-
- Canvas quick-action integration
|
|
297
|
-
- Selectable durations (5 s or 10 s)
|
|
298
|
-
|
|
299
|
-
#### 6. ByteDanceSeedanceV1ProImageToVideo (Image-to-Video)
|
|
300
|
-
|
|
301
|
-
A model that transforms images into videos using ByteDance Seedance v1 Pro:
|
|
302
|
-
|
|
303
|
-
```typescript
|
|
304
|
-
image2video: FalAiVideo.ByteDanceSeedanceV1ProImageToVideo({
|
|
305
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
306
|
-
// Optional: Configure default property values
|
|
307
|
-
properties: {
|
|
308
|
-
aspect_ratio: { default: 'auto' }, // Options: '21:9', '16:9', '4:3', '1:1', '3:4', '9:16', 'auto'
|
|
309
|
-
duration: { default: 5 }, // Duration in seconds (3-12)
|
|
310
|
-
resolution: { default: '720p' } // Options: '480p', '720p', '1080p'
|
|
311
|
-
}
|
|
312
|
-
});
|
|
313
|
-
```
|
|
314
|
-
|
|
315
|
-
Key features:
|
|
316
|
-
|
|
317
|
-
- Transform existing images into dynamic videos
|
|
318
|
-
- Multiple aspect ratio options (21:9, 16:9, 4:3, 1:1, 3:4, 9:16, or auto from image)
|
|
319
|
-
- Adjustable duration (3-12 seconds, default 5)
|
|
320
|
-
- Resolution options (480p, 720p, 1080p)
|
|
321
|
-
- Maintains image quality while adding motion
|
|
322
|
-
|
|
323
|
-
#### 7. ByteDanceSeedanceV1ProTextToVideo (Text-to-Video)
|
|
324
|
-
|
|
325
|
-
A model that generates videos from text using ByteDance Seedance v1 Pro:
|
|
326
|
-
|
|
327
|
-
```typescript
|
|
328
|
-
text2video: FalAiVideo.ByteDanceSeedanceV1ProTextToVideo({
|
|
329
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
330
|
-
// Optional: Configure default property values
|
|
331
|
-
properties: {
|
|
332
|
-
aspect_ratio: { default: '16:9' }, // Options: '21:9', '16:9', '4:3', '1:1', '3:4', '9:16'
|
|
333
|
-
duration: { default: 5 }, // Duration in seconds (3-12)
|
|
334
|
-
resolution: { default: '720p' } // Options: '480p', '720p', '1080p'
|
|
335
|
-
}
|
|
336
|
-
});
|
|
337
|
-
```
|
|
338
|
-
|
|
339
|
-
Key features:
|
|
340
|
-
|
|
341
|
-
- Generate videos from text descriptions
|
|
342
|
-
- Multiple aspect ratio options (21:9, 16:9, 4:3, 1:1, 3:4, 9:16)
|
|
343
|
-
- Adjustable duration (3-12 seconds, default 5)
|
|
344
|
-
- Resolution options (480p, 720p, 1080p)
|
|
345
|
-
- High-quality motion synthesis from text prompts
|
|
346
|
-
|
|
347
|
-
#### 8. Veo3TextToVideo (Text-to-Video)
|
|
348
|
-
|
|
349
|
-
An advanced text-to-video model:
|
|
350
|
-
|
|
351
|
-
```typescript
|
|
352
|
-
text2video: FalAiVideo.Veo3TextToVideo({
|
|
353
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
354
|
-
// Optional: Configure default property values
|
|
355
|
-
properties: {
|
|
356
|
-
aspect_ratio: { default: '16:9' }, // Options: '16:9', '9:16', '1:1'
|
|
357
|
-
duration: { default: 8 } // Fixed at 8 seconds for this provider
|
|
358
|
-
}
|
|
359
|
-
});
|
|
360
|
-
```
|
|
361
|
-
|
|
362
|
-
Key features:
|
|
363
|
-
|
|
364
|
-
- Generate videos from text descriptions
|
|
365
|
-
- Supports aspect ratios 16:9, 9:16 and 1:1 (defaults to 16:9)
|
|
366
|
-
- Fixed duration of 8 seconds
|
|
367
|
-
- Optional audio generation via `generate_audio`
|
|
368
|
-
|
|
369
|
-
#### 9. Veo31TextToVideo (Text-to-Video)
|
|
370
|
-
|
|
371
|
-
Google's Veo 3.1 text-to-video model with enhanced capabilities:
|
|
372
|
-
|
|
373
|
-
```typescript
|
|
374
|
-
text2video: FalAiVideo.Veo31TextToVideo({
|
|
375
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
376
|
-
// Optional: Configure default property values
|
|
377
|
-
properties: {
|
|
378
|
-
aspect_ratio: { default: '16:9' }, // Options: '16:9', '9:16', '1:1'
|
|
379
|
-
duration: { default: '8s' }, // Options: '4s', '6s', '8s'
|
|
380
|
-
resolution: { default: '720p' }, // Options: '720p', '1080p'
|
|
381
|
-
generate_audio: { default: true } // Enable audio generation
|
|
382
|
-
}
|
|
383
|
-
});
|
|
384
|
-
```
|
|
385
|
-
|
|
386
|
-
Key features:
|
|
387
|
-
|
|
388
|
-
- Generate videos from text descriptions
|
|
389
|
-
- Supports aspect ratios 16:9, 9:16 and 1:1 (defaults to 16:9)
|
|
390
|
-
- Variable duration options: 4s, 6s, or 8s
|
|
391
|
-
- Resolution options: 720p (1280×720) or 1080p (1920×1080)
|
|
392
|
-
- Optional audio generation
|
|
393
|
-
|
|
394
|
-
#### 10. Veo31FastTextToVideo (Text-to-Video)
|
|
395
|
-
|
|
396
|
-
Faster and more cost-effective version of Google's Veo 3.1 text-to-video model:
|
|
397
|
-
|
|
398
|
-
```typescript
|
|
399
|
-
text2video: FalAiVideo.Veo31FastTextToVideo({
|
|
400
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
401
|
-
// Optional: Configure default property values
|
|
402
|
-
properties: {
|
|
403
|
-
aspect_ratio: { default: '16:9' }, // Options: '16:9', '9:16', '1:1'
|
|
404
|
-
duration: { default: '8s' }, // Options: '4s', '6s', '8s'
|
|
405
|
-
resolution: { default: '720p' }, // Options: '720p', '1080p'
|
|
406
|
-
generate_audio: { default: true } // Enable audio generation
|
|
407
|
-
}
|
|
408
|
-
});
|
|
409
|
-
```
|
|
410
|
-
|
|
411
|
-
Key features:
|
|
412
|
-
|
|
413
|
-
- Generate videos from text descriptions with faster processing
|
|
414
|
-
- Supports aspect ratios 16:9, 9:16 and 1:1 (defaults to 16:9)
|
|
415
|
-
- Variable duration options: 4s, 6s, or 8s
|
|
416
|
-
- Resolution options: 720p (1280×720) or 1080p (1920×1080)
|
|
417
|
-
- Optional audio generation
|
|
418
|
-
- More cost-effective than the standard Veo 3.1 model
|
|
419
|
-
|
|
420
|
-
#### 11. Veo31ImageToVideo (Image-to-Video)
|
|
421
|
-
|
|
422
|
-
A model that transforms still images into videos using Google's Veo 3.1:
|
|
423
|
-
|
|
424
|
-
```typescript
|
|
425
|
-
image2video: FalAiVideo.Veo31ImageToVideo({
|
|
426
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
427
|
-
// Optional: Configure default property values
|
|
428
|
-
properties: {
|
|
429
|
-
aspect_ratio: { default: 'auto' }, // Options: 'auto', '9:16', '16:9', '1:1'
|
|
430
|
-
resolution: { default: '720p' }, // Options: '720p', '1080p'
|
|
431
|
-
duration: { default: '8s' }, // Fixed at 8 seconds
|
|
432
|
-
generate_audio: { default: true } // Enable audio generation
|
|
433
|
-
}
|
|
434
|
-
});
|
|
435
|
-
```
|
|
436
|
-
|
|
437
|
-
Key features:
|
|
438
|
-
|
|
439
|
-
- Transform existing images into videos
|
|
440
|
-
- Multiple aspect ratio options (auto, 9:16, 16:9, 1:1)
|
|
441
|
-
- Resolution options: 720p (1280×720) or 1080p (1920×1080)
|
|
442
|
-
- Fixed duration of 8 seconds
|
|
443
|
-
- Optional audio generation
|
|
444
|
-
- Canvas quick-action integration
|
|
445
|
-
- Auto aspect ratio preserves source image dimensions
|
|
446
|
-
|
|
447
|
-
#### 12. Veo31FastImageToVideo (Image-to-Video)
|
|
448
|
-
|
|
449
|
-
Faster and more cost-effective version of Google's Veo 3.1 image-to-video model:
|
|
450
|
-
|
|
451
|
-
```typescript
|
|
452
|
-
image2video: FalAiVideo.Veo31FastImageToVideo({
|
|
453
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
454
|
-
// Optional: Configure default property values
|
|
455
|
-
properties: {
|
|
456
|
-
aspect_ratio: { default: 'auto' }, // Options: 'auto', '9:16', '16:9', '1:1'
|
|
457
|
-
resolution: { default: '720p' }, // Options: '720p', '1080p'
|
|
458
|
-
duration: { default: '8s' }, // Fixed at 8 seconds
|
|
459
|
-
generate_audio: { default: true } // Enable audio generation
|
|
460
|
-
}
|
|
461
|
-
});
|
|
462
|
-
```
|
|
463
|
-
|
|
464
|
-
Key features:
|
|
465
|
-
|
|
466
|
-
- Transform existing images into videos with faster processing
|
|
467
|
-
- Multiple aspect ratio options (auto, 9:16, 16:9, 1:1)
|
|
468
|
-
- Resolution options: 720p (1280×720) or 1080p (1920×1080)
|
|
469
|
-
- Fixed duration of 8 seconds
|
|
470
|
-
- Optional audio generation
|
|
471
|
-
- Canvas quick-action integration
|
|
472
|
-
- More cost-effective than the standard Veo 3.1 model
|
|
473
|
-
- Auto aspect ratio preserves source image dimensions
|
|
474
|
-
|
|
475
|
-
#### 13. Veo31FastFirstLastFrameToVideo (Image-to-Video)
|
|
476
|
-
|
|
477
|
-
An experimental dual-image transformation model using Veo 3.1 Fast that creates videos by interpolating between two images:
|
|
478
|
-
|
|
479
|
-
```typescript
|
|
480
|
-
image2video: FalAiVideo.Veo31FastFirstLastFrameToVideo({
|
|
481
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
482
|
-
// Optional: Configure default property values
|
|
483
|
-
properties: {
|
|
484
|
-
aspect_ratio: { default: 'auto' }, // Options: 'auto', '9:16', '16:9', '1:1'
|
|
485
|
-
resolution: { default: '720p' }, // Options: '720p', '1080p'
|
|
486
|
-
duration: { default: '8s' } // Fixed at 8 seconds
|
|
487
|
-
}
|
|
488
|
-
});
|
|
489
|
-
```
|
|
490
|
-
|
|
491
|
-
Key features:
|
|
492
|
-
|
|
493
|
-
- Transform two images (first frame and last frame) into smooth video transitions
|
|
494
|
-
- Multiple aspect ratio options (auto, 9:16, 16:9, 1:1)
|
|
495
|
-
- Resolution options (720p, 1080p)
|
|
496
|
-
- Fixed duration of 8 seconds
|
|
497
|
-
- Custom UI with dual image selectors for first and last frames
|
|
498
|
-
- Optional prompt guidance for transition control
|
|
499
|
-
- Optional audio generation
|
|
500
|
-
|
|
501
|
-
**Note:** This provider uses a custom UI implementation with two image input fields (first_frame_url and last_frame_url) instead of the standard single image selector. This is a proof-of-concept implementation for handling multiple image inputs in video generation.
|
|
502
|
-
|
|
503
|
-
### Runware Providers
|
|
504
|
-
|
|
505
|
-
Runware provides access to multiple AI video models through a unified API. These providers require a Runware proxy URL for authentication.
|
|
506
|
-
|
|
507
|
-
#### 14. Veo31 (Text-to-Video & Image-to-Video) via Runware
|
|
508
|
-
|
|
509
|
-
Google's Veo 3.1 model accessed through Runware:
|
|
510
|
-
|
|
511
|
-
```typescript
|
|
512
|
-
// Text-to-Video
|
|
513
|
-
text2video: RunwareVideo.Veo31.Text2Video({
|
|
514
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
515
|
-
})
|
|
516
|
-
|
|
517
|
-
// Image-to-Video
|
|
518
|
-
image2video: RunwareVideo.Veo31.Image2Video({
|
|
519
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
520
|
-
})
|
|
521
|
-
```
|
|
522
|
-
|
|
523
|
-
Key features:
|
|
524
|
-
- Google's latest video generation model via Runware
|
|
525
|
-
- Text-to-video and image-to-video support
|
|
526
|
-
- Aspect ratios: 16:9, 9:16
|
|
527
|
-
- Duration: Fixed at 8 seconds
|
|
528
|
-
- Optional audio generation
|
|
529
|
-
- Async delivery with polling
|
|
530
|
-
|
|
531
|
-
#### 15. Veo31Fast (Text-to-Video & Image-to-Video) via Runware
|
|
532
|
-
|
|
533
|
-
Faster version of Google's Veo 3.1 model:
|
|
534
|
-
|
|
535
|
-
```typescript
|
|
536
|
-
// Text-to-Video
|
|
537
|
-
text2video: RunwareVideo.Veo31Fast.Text2Video({
|
|
538
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
539
|
-
})
|
|
540
|
-
|
|
541
|
-
// Image-to-Video
|
|
542
|
-
image2video: RunwareVideo.Veo31Fast.Image2Video({
|
|
543
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
544
|
-
})
|
|
545
|
-
```
|
|
546
|
-
|
|
547
|
-
Key features:
|
|
548
|
-
- Faster generation times
|
|
549
|
-
- Same capabilities as Veo31
|
|
550
|
-
- More cost-effective
|
|
551
|
-
- Aspect ratios: 16:9, 9:16
|
|
552
|
-
- Duration: Fixed at 8 seconds
|
|
553
|
-
|
|
554
|
-
#### 16. Sora2 (Text-to-Video & Image-to-Video) via Runware
|
|
555
|
-
|
|
556
|
-
OpenAI's Sora 2 video generation model:
|
|
557
|
-
|
|
558
|
-
```typescript
|
|
559
|
-
// Text-to-Video
|
|
560
|
-
text2video: RunwareVideo.Sora2.Text2Video({
|
|
561
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
562
|
-
})
|
|
563
|
-
|
|
564
|
-
// Image-to-Video
|
|
565
|
-
image2video: RunwareVideo.Sora2.Image2Video({
|
|
566
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
567
|
-
})
|
|
568
|
-
```
|
|
569
|
-
|
|
570
|
-
Key features:
|
|
571
|
-
- OpenAI's advanced video generation model
|
|
572
|
-
- Text-to-video and image-to-video support
|
|
573
|
-
- Accurate physics simulation
|
|
574
|
-
- Synchronized dialogue and high-fidelity visuals
|
|
575
|
-
- Resolutions: 1280×720, 720×1280
|
|
576
|
-
- Durations: 4, 8, or 12 seconds
|
|
577
|
-
|
|
578
|
-
#### 17. Sora2Pro (Text-to-Video & Image-to-Video) via Runware
|
|
579
|
-
|
|
580
|
-
Professional version of OpenAI's Sora 2:
|
|
581
|
-
|
|
582
|
-
```typescript
|
|
583
|
-
// Text-to-Video
|
|
584
|
-
text2video: RunwareVideo.Sora2Pro.Text2Video({
|
|
585
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
586
|
-
})
|
|
587
|
-
|
|
588
|
-
// Image-to-Video
|
|
589
|
-
image2video: RunwareVideo.Sora2Pro.Image2Video({
|
|
590
|
-
proxyUrl: 'http://your-runware-proxy.com/api/proxy'
|
|
591
|
-
})
|
|
592
|
-
```
|
|
593
|
-
|
|
594
|
-
Key features:
|
|
595
|
-
- Enhanced quality over standard Sora 2
|
|
596
|
-
- Text-to-video and image-to-video support
|
|
597
|
-
- Professional-grade output
|
|
598
|
-
- Resolutions: 1280×720, 720×1280, 1792×1024 (7:4), 1024×1792 (4:7)
|
|
599
|
-
- Durations: 4, 8, or 12 seconds
|
|
600
|
-
|
|
601
|
-
### EachLabs Providers
|
|
602
|
-
|
|
603
|
-
EachLabs provides access to multiple AI video models through a unified API. These providers require an EachLabs proxy URL for authentication.
|
|
604
|
-
|
|
605
|
-
#### 18. KlingV26ProTextToVideo (Text-to-Video) via EachLabs
|
|
606
|
-
|
|
607
|
-
Kling v2.6 Pro text-to-video model accessed through EachLabs:
|
|
608
|
-
|
|
609
|
-
```typescript
|
|
610
|
-
import EachLabsVideo from '@imgly/plugin-ai-video-generation-web/eachlabs';
|
|
611
|
-
|
|
612
|
-
text2video: EachLabsVideo.KlingV26ProTextToVideo({
|
|
613
|
-
proxyUrl: 'http://your-eachlabs-proxy.com/api/proxy',
|
|
614
|
-
// Optional: Configure default values
|
|
615
|
-
middlewares: [rateLimitMiddleware, errorMiddleware]
|
|
616
|
-
})
|
|
617
|
-
```
|
|
618
|
-
|
|
619
|
-
Key features:
|
|
620
|
-
- Kling v2.6 Pro - latest high-quality video generation
|
|
621
|
-
- Aspect ratios: 16:9, 9:16, 1:1
|
|
622
|
-
- Duration: 5 or 10 seconds
|
|
623
|
-
- Native audio generation (Chinese/English)
|
|
624
|
-
- Async delivery with polling
|
|
625
|
-
|
|
626
|
-
#### 19. KlingV26ProImageToVideo (Image-to-Video) via EachLabs
|
|
627
|
-
|
|
628
|
-
Kling v2.6 Pro image-to-video model:
|
|
629
|
-
|
|
630
|
-
```typescript
|
|
631
|
-
import EachLabsVideo from '@imgly/plugin-ai-video-generation-web/eachlabs';
|
|
632
|
-
|
|
633
|
-
image2video: EachLabsVideo.KlingV26ProImageToVideo({
|
|
634
|
-
proxyUrl: 'http://your-eachlabs-proxy.com/api/proxy',
|
|
635
|
-
// Optional: Configure default values
|
|
636
|
-
middlewares: [rateLimitMiddleware, errorMiddleware]
|
|
637
|
-
})
|
|
638
|
-
```
|
|
639
|
-
|
|
640
|
-
Key features:
|
|
641
|
-
- Transform existing images into videos
|
|
642
|
-
- Duration: 5 or 10 seconds
|
|
643
|
-
- Native audio generation (Chinese/English)
|
|
644
|
-
- Canvas quick-action integration
|
|
645
|
-
- Maintains image aspect ratio
|
|
646
|
-
|
|
647
|
-
#### 20. KlingO1ImageToVideo (Image-to-Video) via EachLabs
|
|
648
|
-
|
|
649
|
-
Kling O1 image-to-video model:
|
|
650
|
-
|
|
651
|
-
```typescript
|
|
652
|
-
import EachLabsVideo from '@imgly/plugin-ai-video-generation-web/eachlabs';
|
|
653
|
-
|
|
654
|
-
image2video: EachLabsVideo.KlingO1ImageToVideo({
|
|
655
|
-
proxyUrl: 'http://your-eachlabs-proxy.com/api/proxy',
|
|
656
|
-
// Optional: Configure default values
|
|
657
|
-
middlewares: [rateLimitMiddleware, errorMiddleware]
|
|
658
|
-
})
|
|
659
|
-
```
|
|
660
|
-
|
|
661
|
-
Key features:
|
|
662
|
-
- Transform existing images into videos
|
|
663
|
-
- Duration: 5 or 10 seconds
|
|
664
|
-
- Canvas quick-action integration
|
|
665
|
-
- Maintains image aspect ratio
|
|
666
|
-
|
|
667
|
-
#### 22. Veo31TextToVideo (Text-to-Video) via EachLabs
|
|
668
|
-
|
|
669
|
-
Google's Veo 3.1 text-to-video model accessed through EachLabs:
|
|
670
|
-
|
|
671
|
-
```typescript
|
|
672
|
-
import EachLabsVideo from '@imgly/plugin-ai-video-generation-web/eachlabs';
|
|
673
|
-
|
|
674
|
-
text2video: EachLabsVideo.Veo31TextToVideo({
|
|
675
|
-
proxyUrl: 'http://your-eachlabs-proxy.com/api/proxy',
|
|
676
|
-
// Optional: Configure default values
|
|
677
|
-
middlewares: [rateLimitMiddleware, errorMiddleware]
|
|
678
|
-
})
|
|
679
|
-
```
|
|
680
|
-
|
|
681
|
-
Key features:
|
|
682
|
-
- Google's Veo 3.1 - high-quality text-to-video generation
|
|
683
|
-
- Aspect ratios: 16:9, 9:16
|
|
684
|
-
- Resolution: 720p or 1080p
|
|
685
|
-
- 8-second video duration
|
|
686
|
-
- Optional audio generation
|
|
687
|
-
- Async delivery with polling
|
|
688
|
-
|
|
689
|
-
#### 23. Veo31ImageToVideo (Image-to-Video) via EachLabs
|
|
690
|
-
|
|
691
|
-
Google's Veo 3.1 image-to-video model:
|
|
692
|
-
|
|
693
|
-
```typescript
|
|
694
|
-
import EachLabsVideo from '@imgly/plugin-ai-video-generation-web/eachlabs';
|
|
695
|
-
|
|
696
|
-
image2video: EachLabsVideo.Veo31ImageToVideo({
|
|
697
|
-
proxyUrl: 'http://your-eachlabs-proxy.com/api/proxy',
|
|
698
|
-
// Optional: Configure default values
|
|
699
|
-
middlewares: [rateLimitMiddleware, errorMiddleware]
|
|
700
|
-
})
|
|
701
|
-
```
|
|
702
|
-
|
|
703
|
-
Key features:
|
|
704
|
-
- Transform existing images into videos
|
|
705
|
-
- Resolution: 720p or 1080p
|
|
706
|
-
- 8-second video duration
|
|
707
|
-
- Optional audio generation
|
|
708
|
-
- Canvas quick-action integration
|
|
709
|
-
|
|
710
|
-
### Feature Control
|
|
711
|
-
|
|
712
|
-
You can control various aspects of the video generation plugin using the Feature API:
|
|
713
|
-
|
|
714
|
-
```typescript
|
|
715
|
-
// Disable text-to-video generation
|
|
716
|
-
cesdk.feature.enable('ly.img.plugin-ai-video-generation-web.fromText', false);
|
|
717
|
-
|
|
718
|
-
// Disable image-to-video generation
|
|
719
|
-
cesdk.feature.enable('ly.img.plugin-ai-video-generation-web.fromImage', false);
|
|
720
|
-
|
|
721
|
-
// Disable provider selection
|
|
722
|
-
cesdk.feature.enable('ly.img.plugin-ai-video-generation-web.providerSelect', false);
|
|
723
|
-
|
|
724
|
-
// Disable specific quick actions
|
|
725
|
-
cesdk.feature.enable('ly.img.plugin-ai-video-generation-web.quickAction.createVideo', false);
|
|
726
|
-
```
|
|
727
|
-
|
|
728
|
-
For more information about Feature API and available feature flags, see the [@imgly/plugin-ai-generation-web documentation](https://github.com/imgly/plugins/tree/main/packages/plugin-ai-generation-web#available-feature-flags).
|
|
729
|
-
|
|
730
|
-
### Customizing Labels and Translations
|
|
731
|
-
|
|
732
|
-
You can customize all labels and text in the AI video generation interface using the translation system. This allows you to provide better labels for your users in any language.
|
|
733
|
-
|
|
734
|
-
#### Translation Key Structure
|
|
735
|
-
|
|
736
|
-
The system checks for translations in this order (highest to lowest priority):
|
|
737
|
-
|
|
738
|
-
1. **Provider-specific**: `ly.img.plugin-ai-video-generation-web.${provider}.property.${field}` - Override labels for a specific AI provider
|
|
739
|
-
2. **Generic**: `ly.img.plugin-ai-generation-web.property.${field}` - Override labels for all AI plugins
|
|
740
|
-
|
|
741
|
-
#### Basic Example
|
|
742
|
-
|
|
743
|
-
```typescript
|
|
744
|
-
// Customize labels for your AI video generation interface
|
|
745
|
-
cesdk.i18n.setTranslations({
|
|
746
|
-
en: {
|
|
747
|
-
// Generic labels (applies to ALL AI plugins)
|
|
748
|
-
'ly.img.plugin-ai-generation-web.property.prompt': 'Describe what you want to create',
|
|
749
|
-
'ly.img.plugin-ai-generation-web.property.duration': 'Video Duration',
|
|
750
|
-
|
|
751
|
-
// Provider-specific for MinimaxVideo01Live
|
|
752
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/minimax/video-01-live.property.prompt': 'Describe your video',
|
|
753
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/minimax/video-01-live.property.duration': 'Video Length',
|
|
754
|
-
|
|
755
|
-
// Provider-specific for KlingVideoV21Master
|
|
756
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/kling-video/v2.1/master/text-to-video.property.aspect_ratio': 'Video Aspect Ratio',
|
|
757
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/kling-video/v2.1/master/text-to-video.property.duration': 'Video Duration (seconds)'
|
|
758
|
-
}
|
|
759
|
-
});
|
|
760
|
-
```
|
|
761
|
-
|
|
762
|
-
#### QuickAction Translations
|
|
763
|
-
|
|
764
|
-
Video QuickActions (like "Create Video from Image") use their own translation keys with provider-specific overrides:
|
|
765
|
-
|
|
766
|
-
```typescript
|
|
767
|
-
cesdk.i18n.setTranslations({
|
|
768
|
-
en: {
|
|
769
|
-
// Provider-specific translations (highest priority)
|
|
770
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/minimax/video-01-live.quickAction.createVideo': 'Generate Minimax Video...',
|
|
771
|
-
'ly.img.plugin-ai-video-generation-web.fal-ai/minimax/video-01-live.quickAction.createVideo.prompt': 'Minimax Video Prompt',
|
|
772
|
-
|
|
773
|
-
// Generic plugin translations
|
|
774
|
-
'ly.img.plugin-ai-video-generation-web.quickAction.createVideo': 'Create Video...',
|
|
775
|
-
'ly.img.plugin-ai-video-generation-web.quickAction.createVideo.prompt': 'Video Prompt',
|
|
776
|
-
'ly.img.plugin-ai-video-generation-web.quickAction.createVideo.prompt.placeholder': 'e.g. "Make the image move slowly"',
|
|
777
|
-
'ly.img.plugin-ai-video-generation-web.quickAction.createVideo.apply': 'Generate'
|
|
778
|
-
}
|
|
779
|
-
});
|
|
780
|
-
```
|
|
781
|
-
|
|
782
|
-
**QuickAction Translation Priority:**
|
|
783
|
-
1. Provider-specific: `ly.img.plugin-ai-video-generation-web.${provider}.quickAction.${action}.${field}`
|
|
784
|
-
2. Generic plugin: `ly.img.plugin-ai-video-generation-web.quickAction.${action}.${field}`
|
|
785
|
-
|
|
786
|
-
**Translation Structure:**
|
|
787
|
-
- Base key (e.g., `.quickAction.createVideo`): Button text when QuickAction is collapsed
|
|
788
|
-
- `.prompt`: Label for input field when expanded
|
|
789
|
-
- `.prompt.placeholder`: Placeholder text for input field
|
|
790
|
-
- `.apply`: Text for action/submit button
|
|
791
|
-
|
|
792
|
-
### Configuration Options
|
|
793
|
-
|
|
794
|
-
The plugin accepts the following configuration options:
|
|
795
|
-
|
|
796
|
-
| Option | Type | Description | Default |
|
|
797
|
-
| ------------- | -------------------- | ----------------------------------------------- | --------- |
|
|
798
|
-
| `text2video` | Provider \| Provider[] | Provider(s) for text-to-video generation. When multiple providers are provided, users can select between them | undefined |
|
|
799
|
-
| `image2video` | Provider \| Provider[] | Provider(s) for image-to-video transformation. When multiple providers are provided, users can select between them | undefined |
|
|
800
|
-
| `debug` | boolean | Enable debug logging | false |
|
|
801
|
-
| `dryRun` | boolean | Simulate generation without API calls | false |
|
|
802
|
-
| `middleware` | Function[] | Array of middleware functions for the generation | undefined |
|
|
803
|
-
|
|
804
|
-
### Middleware Configuration
|
|
805
|
-
|
|
806
|
-
The `middleware` option allows you to add pre-processing and post-processing capabilities to the generation process:
|
|
807
|
-
|
|
808
|
-
```typescript
|
|
809
|
-
import VideoGeneration from '@imgly/plugin-ai-video-generation-web';
|
|
810
|
-
import FalAiVideo from '@imgly/plugin-ai-video-generation-web/fal-ai';
|
|
811
|
-
import { loggingMiddleware, rateLimitMiddleware } from '@imgly/plugin-ai-generation-web';
|
|
812
|
-
|
|
813
|
-
// Create middleware functions
|
|
814
|
-
const logging = loggingMiddleware();
|
|
815
|
-
const rateLimit = rateLimitMiddleware({
|
|
816
|
-
maxRequests: 5,
|
|
817
|
-
timeWindowMs: 300000, // 5 minutes
|
|
818
|
-
onRateLimitExceeded: (input, options, info) => {
|
|
819
|
-
console.log(`Video generation rate limit exceeded: ${info.currentCount}/${info.maxRequests}`);
|
|
820
|
-
return false; // Reject request
|
|
821
|
-
}
|
|
822
|
-
});
|
|
823
|
-
|
|
824
|
-
// Create custom middleware
|
|
825
|
-
const customMiddleware = async (input, options, next) => {
|
|
826
|
-
console.log('Before generation:', input);
|
|
827
|
-
|
|
828
|
-
// Add custom fields or modify the input
|
|
829
|
-
const modifiedInput = {
|
|
830
|
-
...input,
|
|
831
|
-
customField: 'custom value'
|
|
832
|
-
};
|
|
833
|
-
|
|
834
|
-
// Call the next middleware or generation function
|
|
835
|
-
const result = await next(modifiedInput, options);
|
|
836
|
-
|
|
837
|
-
console.log('After generation:', result);
|
|
838
|
-
|
|
839
|
-
// You can also modify the result before returning it
|
|
840
|
-
return result;
|
|
841
|
-
};
|
|
842
|
-
|
|
843
|
-
// Apply middleware to plugin
|
|
844
|
-
cesdk.addPlugin(
|
|
845
|
-
VideoGeneration({
|
|
846
|
-
text2video: FalAiVideo.MinimaxVideo01Live({
|
|
847
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy'
|
|
848
|
-
}),
|
|
849
|
-
middleware: [logging, rateLimit, customMiddleware] // Apply middleware in order
|
|
850
|
-
})
|
|
851
|
-
);
|
|
852
|
-
```
|
|
853
|
-
|
|
854
|
-
Built-in middleware options:
|
|
855
|
-
|
|
856
|
-
- **loggingMiddleware**: Logs generation requests and responses
|
|
857
|
-
- **rateLimitMiddleware**: Limits the number of generation requests in a time window
|
|
858
|
-
|
|
859
|
-
You can also create custom middleware functions to meet your specific needs.
|
|
860
|
-
|
|
861
|
-
#### Preventing Default Feedback
|
|
862
|
-
|
|
863
|
-
Middleware can suppress default UI feedback behaviors using `options.preventDefault()`:
|
|
864
|
-
|
|
865
|
-
```typescript
|
|
866
|
-
const customErrorMiddleware = async (input, options, next) => {
|
|
867
|
-
try {
|
|
868
|
-
return await next(input, options);
|
|
869
|
-
} catch (error) {
|
|
870
|
-
// Prevent default error notification
|
|
871
|
-
options.preventDefault();
|
|
872
|
-
|
|
873
|
-
// Show custom error notification
|
|
874
|
-
options.cesdk?.ui.showNotification({
|
|
875
|
-
type: 'error',
|
|
876
|
-
message: `Video generation failed: ${error.message}`,
|
|
877
|
-
action: {
|
|
878
|
-
label: 'Try Again',
|
|
879
|
-
onClick: () => {/* retry logic */}
|
|
880
|
-
}
|
|
881
|
-
});
|
|
882
|
-
|
|
883
|
-
throw error;
|
|
884
|
-
}
|
|
885
|
-
};
|
|
886
|
-
```
|
|
887
|
-
|
|
888
|
-
**What gets prevented:**
|
|
889
|
-
- Error/success notifications
|
|
890
|
-
- Block error state
|
|
891
|
-
- Console error logging
|
|
892
|
-
|
|
893
|
-
**What is NOT prevented:**
|
|
894
|
-
- Pending → Ready transition (loading spinner always stops)
|
|
895
|
-
|
|
896
|
-
For more details, see the [@imgly/plugin-ai-generation-web documentation](https://github.com/imgly/plugins/tree/main/packages/plugin-ai-generation-web#preventing-default-feedback).
|
|
897
|
-
|
|
898
|
-
### Using a Proxy
|
|
899
|
-
|
|
900
|
-
For security reasons, it's recommended to use a proxy server to handle API requests to fal.ai. The proxy URL is required when configuring providers:
|
|
901
|
-
|
|
902
|
-
```typescript
|
|
903
|
-
text2video: FalAiVideo.MinimaxVideo01Live({
|
|
904
|
-
proxyUrl: 'http://your-proxy-server.com/api/proxy',
|
|
905
|
-
headers: {
|
|
906
|
-
'x-custom-header': 'value',
|
|
907
|
-
'x-client-version': '1.0.0'
|
|
908
|
-
}
|
|
909
|
-
});
|
|
910
|
-
```
|
|
911
|
-
|
|
912
|
-
The `headers` option allows you to include custom HTTP headers in all API requests. This is useful for:
|
|
913
|
-
- Adding custom client identification headers
|
|
914
|
-
- Including version information
|
|
915
|
-
- Passing through metadata required by your API
|
|
916
|
-
- Adding correlation IDs for request tracing
|
|
917
|
-
|
|
918
|
-
You'll need to implement a proxy server that forwards requests to fal.ai and handles authentication.
|
|
919
|
-
|
|
920
|
-
## API Reference
|
|
921
|
-
|
|
922
|
-
### Main Plugin
|
|
923
|
-
|
|
924
|
-
```typescript
|
|
925
|
-
VideoGeneration(options: PluginConfiguration): EditorPlugin
|
|
926
|
-
```
|
|
927
|
-
|
|
928
|
-
Creates and returns a plugin that can be added to CreativeEditor SDK.
|
|
929
|
-
|
|
930
|
-
### Plugin Configuration
|
|
931
|
-
|
|
932
|
-
```typescript
|
|
933
|
-
interface PluginConfiguration {
|
|
934
|
-
// Provider(s) for text-to-video generation
|
|
935
|
-
text2video?: AiVideoProvider | AiVideoProvider[];
|
|
936
|
-
|
|
937
|
-
// Provider(s) for image-to-video generation
|
|
938
|
-
image2video?: AiVideoProvider | AiVideoProvider[];
|
|
939
|
-
|
|
940
|
-
// Enable debug logging
|
|
941
|
-
debug?: boolean;
|
|
942
|
-
|
|
943
|
-
// Skip actual API calls for testing
|
|
944
|
-
dryRun?: boolean;
|
|
945
|
-
|
|
946
|
-
// Extend the generation process
|
|
947
|
-
middleware?: GenerationMiddleware;
|
|
948
|
-
}
|
|
949
|
-
```
|
|
950
|
-
|
|
951
|
-
### Fal.ai Providers
|
|
952
|
-
|
|
953
|
-
#### MinimaxVideo01Live
|
|
954
|
-
|
|
955
|
-
```typescript
|
|
956
|
-
FalAiVideo.MinimaxVideo01Live(config: {
|
|
957
|
-
proxyUrl: string;
|
|
958
|
-
headers?: Record<string, string>;
|
|
959
|
-
debug?: boolean;
|
|
960
|
-
}): AiVideoProvider
|
|
961
|
-
```
|
|
962
|
-
|
|
963
|
-
#### MinimaxVideo01LiveImageToVideo
|
|
964
|
-
|
|
965
|
-
```typescript
|
|
966
|
-
FalAiVideo.MinimaxVideo01LiveImageToVideo(config: {
|
|
967
|
-
proxyUrl: string;
|
|
968
|
-
headers?: Record<string, string>;
|
|
969
|
-
debug?: boolean;
|
|
970
|
-
}): AiVideoProvider
|
|
971
|
-
```
|
|
972
|
-
|
|
973
|
-
#### MinimaxHailuo02StandardImageToVideo
|
|
974
|
-
|
|
975
|
-
```typescript
|
|
976
|
-
FalAiVideo.MinimaxHailuo02StandardImageToVideo(config: {
|
|
977
|
-
proxyUrl: string;
|
|
978
|
-
headers?: Record<string, string>;
|
|
979
|
-
debug?: boolean;
|
|
980
|
-
}): AiVideoProvider
|
|
981
|
-
```
|
|
982
|
-
|
|
983
|
-
#### PixverseV35TextToVideo
|
|
984
|
-
|
|
985
|
-
```typescript
|
|
986
|
-
FalAiVideo.PixverseV35TextToVideo(config: {
|
|
987
|
-
proxyUrl: string;
|
|
988
|
-
headers?: Record<string, string>;
|
|
989
|
-
debug?: boolean;
|
|
990
|
-
}): AiVideoProvider
|
|
991
|
-
```
|
|
992
|
-
|
|
993
|
-
#### KlingVideoV21MasterTextToVideo
|
|
994
|
-
|
|
995
|
-
```typescript
|
|
996
|
-
FalAiVideo.KlingVideoV21MasterTextToVideo(config: {
|
|
997
|
-
proxyUrl: string;
|
|
998
|
-
debug?: boolean;
|
|
999
|
-
}): AiVideoProvider
|
|
1000
|
-
```
|
|
1001
|
-
|
|
1002
|
-
#### KlingVideoV21MasterImageToVideo
|
|
1003
|
-
|
|
1004
|
-
```typescript
|
|
1005
|
-
FalAiVideo.KlingVideoV21MasterImageToVideo(config: {
|
|
1006
|
-
proxyUrl: string;
|
|
1007
|
-
debug?: boolean;
|
|
1008
|
-
}): AiVideoProvider
|
|
1009
|
-
```
|
|
1010
|
-
|
|
1011
|
-
#### ByteDanceSeedanceV1ProImageToVideo
|
|
1012
|
-
|
|
1013
|
-
```typescript
|
|
1014
|
-
FalAiVideo.ByteDanceSeedanceV1ProImageToVideo(config: {
|
|
1015
|
-
proxyUrl: string;
|
|
1016
|
-
debug?: boolean;
|
|
1017
|
-
}): AiVideoProvider
|
|
1018
|
-
```
|
|
1019
|
-
|
|
1020
|
-
#### ByteDanceSeedanceV1ProTextToVideo
|
|
1021
|
-
|
|
1022
|
-
```typescript
|
|
1023
|
-
FalAiVideo.ByteDanceSeedanceV1ProTextToVideo(config: {
|
|
1024
|
-
proxyUrl: string;
|
|
1025
|
-
debug?: boolean;
|
|
1026
|
-
}): AiVideoProvider
|
|
1027
|
-
```
|
|
1028
|
-
|
|
1029
|
-
#### Veo3TextToVideo
|
|
1030
|
-
|
|
1031
|
-
```typescript
|
|
1032
|
-
FalAiVideo.Veo3TextToVideo(config: {
|
|
1033
|
-
proxyUrl: string;
|
|
1034
|
-
debug?: boolean;
|
|
1035
|
-
}): AiVideoProvider
|
|
1036
|
-
```
|
|
1037
|
-
|
|
1038
|
-
#### Veo31TextToVideo
|
|
1039
|
-
|
|
1040
|
-
```typescript
|
|
1041
|
-
FalAiVideo.Veo31TextToVideo(config: {
|
|
1042
|
-
proxyUrl: string;
|
|
1043
|
-
debug?: boolean;
|
|
1044
|
-
}): AiVideoProvider
|
|
1045
|
-
```
|
|
1046
|
-
|
|
1047
|
-
#### Veo31FastTextToVideo
|
|
1048
|
-
|
|
1049
|
-
```typescript
|
|
1050
|
-
FalAiVideo.Veo31FastTextToVideo(config: {
|
|
1051
|
-
proxyUrl: string;
|
|
1052
|
-
debug?: boolean;
|
|
1053
|
-
}): AiVideoProvider
|
|
1054
|
-
```
|
|
1055
|
-
|
|
1056
|
-
#### Veo31ImageToVideo
|
|
1057
|
-
|
|
1058
|
-
```typescript
|
|
1059
|
-
FalAiVideo.Veo31ImageToVideo(config: {
|
|
1060
|
-
proxyUrl: string;
|
|
1061
|
-
debug?: boolean;
|
|
1062
|
-
}): AiVideoProvider
|
|
1063
|
-
```
|
|
1064
|
-
|
|
1065
|
-
#### Veo31FastImageToVideo
|
|
1066
|
-
|
|
1067
|
-
```typescript
|
|
1068
|
-
FalAiVideo.Veo31FastImageToVideo(config: {
|
|
1069
|
-
proxyUrl: string;
|
|
1070
|
-
debug?: boolean;
|
|
1071
|
-
}): AiVideoProvider
|
|
1072
|
-
```
|
|
1073
|
-
|
|
1074
|
-
#### Veo31FastFirstLastFrameToVideo
|
|
1075
|
-
|
|
1076
|
-
```typescript
|
|
1077
|
-
FalAiVideo.Veo31FastFirstLastFrameToVideo(config: {
|
|
1078
|
-
proxyUrl: string;
|
|
1079
|
-
debug?: boolean;
|
|
1080
|
-
}): AiVideoProvider
|
|
1081
|
-
```
|
|
1082
|
-
|
|
1083
|
-
### Runware Providers
|
|
1084
|
-
|
|
1085
|
-
All Runware video providers use the following configuration:
|
|
1086
|
-
|
|
1087
|
-
```typescript
|
|
1088
|
-
interface RunwareProviderConfiguration {
|
|
1089
|
-
proxyUrl: string; // HTTP endpoint URL for the Runware proxy
|
|
1090
|
-
debug?: boolean; // Enable debug logging
|
|
1091
|
-
middlewares?: any[]; // Optional middleware functions
|
|
1092
|
-
history?: false | '@imgly/local' | '@imgly/indexedDB' | (string & {});
|
|
1093
|
-
}
|
|
1094
|
-
```
|
|
1095
|
-
|
|
1096
|
-
#### Veo31.Text2Video / Veo31.Image2Video
|
|
1097
|
-
|
|
1098
|
-
```typescript
|
|
1099
|
-
RunwareVideo.Veo31.Text2Video(config: RunwareProviderConfiguration)
|
|
1100
|
-
RunwareVideo.Veo31.Image2Video(config: RunwareProviderConfiguration)
|
|
1101
|
-
```
|
|
1102
|
-
|
|
1103
|
-
#### Veo31Fast.Text2Video / Veo31Fast.Image2Video
|
|
1104
|
-
|
|
1105
|
-
```typescript
|
|
1106
|
-
RunwareVideo.Veo31Fast.Text2Video(config: RunwareProviderConfiguration)
|
|
1107
|
-
RunwareVideo.Veo31Fast.Image2Video(config: RunwareProviderConfiguration)
|
|
1108
|
-
```
|
|
1109
|
-
|
|
1110
|
-
#### Sora2.Text2Video / Sora2.Image2Video
|
|
1111
|
-
|
|
1112
|
-
```typescript
|
|
1113
|
-
RunwareVideo.Sora2.Text2Video(config: RunwareProviderConfiguration)
|
|
1114
|
-
RunwareVideo.Sora2.Image2Video(config: RunwareProviderConfiguration)
|
|
1115
|
-
```
|
|
1116
|
-
|
|
1117
|
-
#### Sora2Pro.Text2Video / Sora2Pro.Image2Video
|
|
1118
|
-
|
|
1119
|
-
```typescript
|
|
1120
|
-
RunwareVideo.Sora2Pro.Text2Video(config: RunwareProviderConfiguration)
|
|
1121
|
-
RunwareVideo.Sora2Pro.Image2Video(config: RunwareProviderConfiguration)
|
|
1122
|
-
```
|
|
1123
|
-
|
|
1124
|
-
### EachLabs Providers
|
|
1125
|
-
|
|
1126
|
-
All EachLabs video providers use the following configuration:
|
|
1127
|
-
|
|
1128
|
-
```typescript
|
|
1129
|
-
interface EachLabsProviderConfiguration {
|
|
1130
|
-
proxyUrl: string; // HTTP endpoint URL for the EachLabs proxy
|
|
1131
|
-
debug?: boolean; // Enable debug logging
|
|
1132
|
-
middlewares?: any[]; // Optional middleware functions
|
|
1133
|
-
history?: false | '@imgly/local' | '@imgly/indexedDB' | (string & {});
|
|
1134
|
-
}
|
|
1135
|
-
```
|
|
1136
|
-
|
|
1137
|
-
#### KlingV26ProTextToVideo
|
|
1138
|
-
|
|
1139
|
-
```typescript
|
|
1140
|
-
EachLabsVideo.KlingV26ProTextToVideo(config: EachLabsProviderConfiguration)
|
|
1141
|
-
```
|
|
1142
|
-
|
|
1143
|
-
#### KlingV26ProImageToVideo
|
|
1144
|
-
|
|
1145
|
-
```typescript
|
|
1146
|
-
EachLabsVideo.KlingV26ProImageToVideo(config: EachLabsProviderConfiguration)
|
|
1147
|
-
```
|
|
1148
|
-
|
|
1149
|
-
#### KlingO1ImageToVideo
|
|
1150
|
-
|
|
1151
|
-
```typescript
|
|
1152
|
-
EachLabsVideo.KlingO1ImageToVideo(config: EachLabsProviderConfiguration)
|
|
1153
|
-
```
|
|
1154
|
-
|
|
1155
|
-
#### Veo31TextToVideo
|
|
1156
|
-
|
|
1157
|
-
```typescript
|
|
1158
|
-
EachLabsVideo.Veo31TextToVideo(config: EachLabsProviderConfiguration)
|
|
1159
|
-
```
|
|
1160
|
-
|
|
1161
|
-
#### Veo31ImageToVideo
|
|
1162
|
-
|
|
1163
|
-
```typescript
|
|
1164
|
-
EachLabsVideo.Veo31ImageToVideo(config: EachLabsProviderConfiguration)
|
|
1165
|
-
```
|
|
1166
|
-
|
|
1167
|
-
## UI Integration
|
|
1168
|
-
|
|
1169
|
-
The plugin automatically registers the following UI components:
|
|
1170
|
-
|
|
1171
|
-
1. **Generation Panel**: A sidebar panel for text-to-video generation
|
|
1172
|
-
2. **Quick Actions**: Canvas menu items for image-to-video transformations
|
|
1173
|
-
3. **History Library**: Displays previously generated videos
|
|
1174
|
-
4. **Dock Component**: A button in the dock area to open the video generation panel
|
|
1175
|
-
|
|
1176
|
-
### Panel IDs
|
|
1177
|
-
|
|
1178
|
-
- Main panel: `ly.img.ai.video-generation`
|
|
1179
|
-
- Canvas quick actions: `ly.img.ai.video.canvasMenu`
|
|
1180
|
-
- Provider-specific panels:
|
|
1181
|
-
- MinimaxVideo01Live: `ly.img.ai.fal-ai/minimax/video-01-live`
|
|
1182
|
-
- MinimaxVideo01LiveImageToVideo: `ly.img.ai.fal-ai/minimax/video-01-live/image-to-video`
|
|
1183
|
-
- MinimaxHailuo02StandardImageToVideo: `ly.img.ai.fal-ai/minimax/hailuo-02/standard/image-to-video`
|
|
1184
|
-
- PixverseV35TextToVideo: `ly.img.ai.fal-ai/pixverse/v3.5/text-to-video`
|
|
1185
|
-
- KlingVideoV21MasterTextToVideo: `ly.img.ai.fal-ai/kling-video/v2.1/master/text-to-video`
|
|
1186
|
-
- KlingVideoV21MasterImageToVideo: `ly.img.ai.fal-ai/kling-video/v2.1/master/image-to-video`
|
|
1187
|
-
- ByteDanceSeedanceV1ProImageToVideo: `ly.img.ai.fal-ai/bytedance/seedance/v1/pro/image-to-video`
|
|
1188
|
-
- ByteDanceSeedanceV1ProTextToVideo: `ly.img.ai.fal-ai/bytedance/seedance/v1/pro/text-to-video`
|
|
1189
|
-
- Veo3TextToVideo: `ly.img.ai.fal-ai/veo3`
|
|
1190
|
-
- Veo31TextToVideo: `ly.img.ai.fal-ai/veo3.1`
|
|
1191
|
-
- Veo31FastTextToVideo: `ly.img.ai.fal-ai/veo3.1/fast`
|
|
1192
|
-
- Veo31ImageToVideo: `ly.img.ai.fal-ai/veo3.1/image-to-video`
|
|
1193
|
-
- Veo31FastImageToVideo: `ly.img.ai.fal-ai/veo3.1/fast/image-to-video`
|
|
1194
|
-
- Veo31FastFirstLastFrameToVideo: `ly.img.ai.fal-ai/veo3.1/fast/first-last-frame-to-video`
|
|
1195
|
-
- Runware Veo31.Text2Video: `ly.img.ai.runware/google/veo-3-1`
|
|
1196
|
-
- Runware Veo31.Image2Video: `ly.img.ai.runware/google/veo-3-1/image2video`
|
|
1197
|
-
- Runware Veo31Fast.Text2Video: `ly.img.ai.runware/google/veo-3-1-fast`
|
|
1198
|
-
- Runware Veo31Fast.Image2Video: `ly.img.ai.runware/google/veo-3-1-fast/image2video`
|
|
1199
|
-
- Runware Sora2.Text2Video: `ly.img.ai.runware/openai/sora-2`
|
|
1200
|
-
- Runware Sora2.Image2Video: `ly.img.ai.runware/openai/sora-2/image2video`
|
|
1201
|
-
- Runware Sora2Pro.Text2Video: `ly.img.ai.runware/openai/sora-2-pro`
|
|
1202
|
-
- Runware Sora2Pro.Image2Video: `ly.img.ai.runware/openai/sora-2-pro/image2video`
|
|
1203
|
-
- EachLabs KlingV26ProTextToVideo: `ly.img.ai.eachlabs/kling-v2-6-pro-text-to-video`
|
|
1204
|
-
- EachLabs KlingV26ProImageToVideo: `ly.img.ai.eachlabs/kling-v2-6-pro-image-to-video`
|
|
1205
|
-
- EachLabs KlingO1ImageToVideo: `ly.img.ai.eachlabs/kling-o1-image-to-video`
|
|
1206
|
-
- EachLabs Veo31TextToVideo: `ly.img.ai.eachlabs/veo3-1-text-to-video`
|
|
1207
|
-
- EachLabs Veo31ImageToVideo: `ly.img.ai.eachlabs/veo3-1-image-to-video`
|
|
1208
|
-
|
|
1209
|
-
### Asset History
|
|
1210
|
-
|
|
1211
|
-
Generated videos are automatically stored in asset sources with the following IDs:
|
|
1212
|
-
|
|
1213
|
-
- MinimaxVideo01Live: `fal-ai/minimax/video-01-live.history`
|
|
1214
|
-
- MinimaxVideo01LiveImageToVideo: `fal-ai/minimax/video-01-live/image-to-video.history`
|
|
1215
|
-
- MinimaxHailuo02StandardImageToVideo: `fal-ai/minimax/hailuo-02/standard/image-to-video.history`
|
|
1216
|
-
- PixverseV35TextToVideo: `fal-ai/pixverse/v3.5/text-to-video.history`
|
|
1217
|
-
- KlingVideoV21MasterTextToVideo: `fal-ai/kling-video/v2.1/master/text-to-video.history`
|
|
1218
|
-
- KlingVideoV21MasterImageToVideo: `fal-ai/kling-video/v2.1/master/image-to-video.history`
|
|
1219
|
-
- ByteDanceSeedanceV1ProImageToVideo: `fal-ai/bytedance/seedance/v1/pro/image-to-video.history`
|
|
1220
|
-
- ByteDanceSeedanceV1ProTextToVideo: `fal-ai/bytedance/seedance/v1/pro/text-to-video.history`
|
|
1221
|
-
- Veo3TextToVideo: `fal-ai/veo3.history`
|
|
1222
|
-
- Veo31TextToVideo: `fal-ai/veo3.1.history`
|
|
1223
|
-
- Veo31FastTextToVideo: `fal-ai/veo3.1/fast.history`
|
|
1224
|
-
- Veo31ImageToVideo: `fal-ai/veo3.1/image-to-video.history`
|
|
1225
|
-
- Veo31FastImageToVideo: `fal-ai/veo3.1/fast/image-to-video.history`
|
|
1226
|
-
- Veo31FastFirstLastFrameToVideo: `fal-ai/veo3.1/fast/first-last-frame-to-video.history`
|
|
1227
|
-
- Runware Veo31.Text2Video: `runware/google/veo-3-1.history`
|
|
1228
|
-
- Runware Veo31.Image2Video: `runware/google/veo-3-1/image2video.history`
|
|
1229
|
-
- Runware Veo31Fast.Text2Video: `runware/google/veo-3-1-fast.history`
|
|
1230
|
-
- Runware Veo31Fast.Image2Video: `runware/google/veo-3-1-fast/image2video.history`
|
|
1231
|
-
- Runware Sora2.Text2Video: `runware/openai/sora-2.history`
|
|
1232
|
-
- Runware Sora2.Image2Video: `runware/openai/sora-2/image2video.history`
|
|
1233
|
-
- Runware Sora2Pro.Text2Video: `runware/openai/sora-2-pro.history`
|
|
1234
|
-
- Runware Sora2Pro.Image2Video: `runware/openai/sora-2-pro/image2video.history`
|
|
1235
|
-
- EachLabs KlingV26ProTextToVideo: `eachlabs/kling-v2-6-pro-text-to-video.history`
|
|
1236
|
-
- EachLabs KlingV26ProImageToVideo: `eachlabs/kling-v2-6-pro-image-to-video.history`
|
|
1237
|
-
- EachLabs KlingO1ImageToVideo: `eachlabs/kling-o1-image-to-video.history`
|
|
1238
|
-
- EachLabs Veo31TextToVideo: `eachlabs/veo3-1-text-to-video.history`
|
|
1239
|
-
- EachLabs Veo31ImageToVideo: `eachlabs/veo3-1-image-to-video.history`
|
|
1240
|
-
|
|
1241
|
-
### Dock Integration
|
|
1242
|
-
|
|
1243
|
-
The plugin automatically registers a dock component with a sparkle icon that opens the video generation panel. To customize the component's position in the dock, use the `setDockOrder` method:
|
|
1244
|
-
|
|
1245
|
-
```typescript
|
|
1246
|
-
// Add the AI Video component to the beginning of the dock
|
|
1247
|
-
cesdk.ui.setDockOrder([
|
|
1248
|
-
'ly.img.ai.video-generation.dock',
|
|
1249
|
-
...cesdk.ui.getDockOrder()
|
|
1250
|
-
]);
|
|
1251
|
-
|
|
1252
|
-
// Or add it at a specific position
|
|
1253
|
-
const currentOrder = cesdk.ui.getDockOrder();
|
|
1254
|
-
currentOrder.splice(2, 0, 'ly.img.ai.video-generation.dock');
|
|
1255
|
-
cesdk.ui.setDockOrder(currentOrder);
|
|
1256
|
-
```
|
|
1257
|
-
|
|
1258
|
-
## Internationalization (i18n)
|
|
1259
|
-
|
|
1260
|
-
The Video Generation plugin supports full internationalization. To customize translations, set them **before** adding the plugin:
|
|
1261
|
-
|
|
1262
|
-
```typescript
|
|
1263
|
-
cesdk.i18n.setTranslations({
|
|
1264
|
-
en: {
|
|
1265
|
-
'@imgly/plugin-ai-video-generation-web.action.label': 'Create Video'
|
|
1266
|
-
},
|
|
1267
|
-
de: {
|
|
1268
|
-
'@imgly/plugin-ai-video-generation-web.action.label': 'Video erstellen'
|
|
1269
|
-
}
|
|
1270
|
-
});
|
|
1271
|
-
|
|
1272
|
-
// Then add the plugins - they won't override your custom translations
|
|
1273
|
-
await cesdk.addPlugin(AiApps({ providers: { /* ... */ } }));
|
|
1274
|
-
```
|
|
1275
|
-
|
|
1276
|
-
For detailed documentation on the translation system, see the [Internationalization section](https://github.com/imgly/plugins/tree/main/packages/plugin-ai-generation-web#internationalization-i18n) in the core AI generation package.
|
|
1277
|
-
|
|
1278
|
-
For all available translation keys, see the [translations.json](https://github.com/imgly/plugins/tree/main/packages/plugin-ai-video-generation-web/translations.json) file.
|
|
1279
|
-
|
|
1280
|
-
## Related Packages
|
|
1281
|
-
|
|
1282
|
-
- [@imgly/plugin-ai-generation-web](https://github.com/imgly/plugins/tree/main/packages/plugin-ai-generation-web) - Core utilities for AI generation
|
|
1283
|
-
- [@imgly/plugin-ai-image-generation-web](https://github.com/imgly/plugins/tree/main/packages/plugin-ai-image-generation-web) - AI image generation
|
|
1284
|
-
- [@imgly/plugin-ai-audio-generation-web](https://github.com/imgly/plugins/tree/main/packages/plugin-ai-audio-generation-web) - AI audio generation
|
|
5
|
+
For documentation, visit: https://img.ly/docs/cesdk
|
|
1285
6
|
|
|
1286
7
|
## License
|
|
1287
8
|
|
|
1288
|
-
This plugin is part of the IMG.LY plugin ecosystem for CreativeEditor SDK.
|
|
9
|
+
This plugin is part of the IMG.LY plugin ecosystem for CreativeEditor SDK.
|