@openpets/fal 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,73 @@
1
+ # FAL AI Image & Video Generation
2
+
3
+ FAL AI image and video generation plugin for OpenCode - text-to-image, image-to-image, text-to-video, and image-to-video using Flux, Stable Diffusion, Kling, and more.
4
+
5
+ ## Features
6
+
7
+ - **Text-to-Image**: Generate images from text descriptions using Flux, Stable Diffusion, and other models
8
+ - **Image-to-Image**: Transform existing images with style transfer, editing, and variations
9
+ - **Text-to-Video**: Create videos from text prompts using Kling, Runway Gen-3, and more
10
+ - **Image-to-Video**: Animate static images with camera movements and effects
11
+
12
+ ## Installation
13
+
14
+ ```bash
15
+ npm install openpets/fal
16
+ ```
17
+
18
+ ## Setup
19
+
20
+ 1. Get your FAL AI API key from [https://fal.ai](https://fal.ai)
21
+ 2. Create a `.env` file in the plugin directory:
22
+
23
+ ```bash
24
+ FAL_KEY=your-api-key-here
25
+ ```
26
+
27
+ ## Usage
28
+
29
+ ### Generate an image
30
+
31
+ ```bash
32
+ opencode run "generate an image of a sunset over the ocean"
33
+ ```
34
+
35
+ ### Transform an image
36
+
37
+ ```bash
38
+ opencode run "transform this image into watercolor style"
39
+ ```
40
+
41
+ ### Generate a video
42
+
43
+ ```bash
44
+ opencode run "generate a video of clouds drifting across the sky"
45
+ ```
46
+
47
+ ### Animate an image
48
+
49
+ ```bash
50
+ opencode run "animate this image with a camera pan"
51
+ ```
52
+
53
+ ## Available Tools
54
+
55
+ - `fal-generate-image`: Generate images using FAL AI text-to-image models
56
+ - `fal-transform-image`: Transform images using FAL AI image-to-image models
57
+ - `fal-generate-video`: Generate videos using FAL AI text-to-video models
58
+ - `fal-animate-image`: Animate static images using FAL AI image-to-video models
59
+
60
+ ## Testing
61
+
62
+ Run individual test scripts:
63
+
64
+ ```bash
65
+ npm run test:generate-image
66
+ npm run test:transform-image
67
+ npm run test:generate-video
68
+ npm run test:animate-image
69
+ ```
70
+
71
+ ## License
72
+
73
+ MIT
package/client.ts ADDED
@@ -0,0 +1,397 @@
1
+ import { fal } from "@fal-ai/client"
2
+ import { BaseAIClient, AIImageResult, AIVideoResult } from "@/core/ai-client-base"
3
+ import { createLogger } from "@/core/logger"
4
+ import * as fs from "fs"
5
+ import * as path from "path"
6
+
7
+ const logger = createLogger("fal-client")
8
+
9
+ export interface FalImageGenerationOptions {
10
+ prompt: string
11
+ model?: string
12
+ seed?: number
13
+ image_size?: "square_hd" | "square" | "portrait_4_3" | "portrait_16_9" | "landscape_4_3" | "landscape_16_9"
14
+ num_images?: number
15
+ num_inference_steps?: number
16
+ guidance_scale?: number
17
+ negative_prompt?: string
18
+ enable_safety_checker?: boolean
19
+ input_image?: string
20
+ }
21
+
22
+ export interface FalVideoGenerationOptions {
23
+ prompt: string
24
+ model?: string
25
+ seed?: number
26
+ duration?: number
27
+ fps?: number
28
+ aspect_ratio?: "16:9" | "9:16" | "1:1"
29
+ num_videos?: number
30
+ input_image?: string
31
+ enable_safety_checker?: boolean
32
+ }
33
+
34
+ export class FalClient extends BaseAIClient {
35
+ private configured: boolean = false
36
+
37
+ constructor(apiKey?: string) {
38
+ super({
39
+ apiKey: apiKey || process.env.FAL_KEY || "",
40
+ baseUrl: "https://fal.run",
41
+ debug: false
42
+ })
43
+ }
44
+
45
+ get providerName(): string {
46
+ return "fal"
47
+ }
48
+
49
+ private ensureConfigured() {
50
+ this.ensureApiKey("FAL_KEY")
51
+
52
+ if (!this.configured) {
53
+ fal.config({
54
+ credentials: this.apiKey
55
+ })
56
+ this.configured = true
57
+ }
58
+ }
59
+
60
+ private async uploadFileIfNeeded(filePath: string): Promise<string> {
61
+ if (filePath.startsWith("http://") || filePath.startsWith("https://")) {
62
+ logger.info("🌐 Using existing URL (no upload needed)", { url: filePath })
63
+ return filePath
64
+ }
65
+
66
+ if (filePath.startsWith("data:")) {
67
+ logger.info("📊 Using data URI (no upload needed)")
68
+ return filePath
69
+ }
70
+
71
+ this.ensureConfigured()
72
+
73
+ const resolvedPath = path.resolve(filePath)
74
+
75
+ if (!fs.existsSync(resolvedPath)) {
76
+ logger.error("❌ FILE NOT FOUND", {
77
+ original_path: filePath,
78
+ resolved_path: resolvedPath
79
+ })
80
+ throw new Error(`File not found: ${resolvedPath}`)
81
+ }
82
+
83
+ const fileStats = fs.statSync(resolvedPath)
84
+ logger.info("📤 UPLOADING FILE TO FAL STORAGE", {
85
+ original_path: filePath,
86
+ resolved_path: resolvedPath,
87
+ file_size_bytes: fileStats.size,
88
+ file_size_kb: Math.round(fileStats.size / 1024)
89
+ })
90
+
91
+ const fileBuffer = fs.readFileSync(resolvedPath)
92
+ const fileName = path.basename(resolvedPath)
93
+ const mimeType = this.getMimeType(fileName)
94
+
95
+ logger.debug("Creating file blob", {
96
+ fileName,
97
+ mimeType,
98
+ buffer_size: fileBuffer.length
99
+ })
100
+
101
+ const blob = new Blob([fileBuffer], {
102
+ type: mimeType
103
+ })
104
+
105
+ const file = new File([blob], fileName, {
106
+ type: mimeType
107
+ })
108
+
109
+ const url = await fal.storage.upload(file)
110
+
111
+ logger.info("✅ FILE UPLOADED SUCCESSFULLY TO FAL", {
112
+ uploaded_url: url,
113
+ url_length: url.length,
114
+ original_filename: fileName
115
+ })
116
+
117
+ return url
118
+ }
119
+
120
+ private getMimeType(fileName: string): string {
121
+ const ext = path.extname(fileName).toLowerCase()
122
+ const mimeTypes: Record<string, string> = {
123
+ '.jpg': 'image/jpeg',
124
+ '.jpeg': 'image/jpeg',
125
+ '.png': 'image/png',
126
+ '.gif': 'image/gif',
127
+ '.webp': 'image/webp',
128
+ '.mp4': 'video/mp4',
129
+ '.mov': 'video/quicktime',
130
+ '.avi': 'video/x-msvideo'
131
+ }
132
+ return mimeTypes[ext] || 'application/octet-stream'
133
+ }
134
+
135
+ async generateImage(options: FalImageGenerationOptions): Promise<AIImageResult> {
136
+ try {
137
+ this.ensureConfigured()
138
+
139
+ const model = options.model || "fal-ai/flux/dev"
140
+
141
+ const input: any = {
142
+ prompt: options.prompt,
143
+ }
144
+
145
+ if (options.seed !== undefined) input.seed = options.seed
146
+ if (options.image_size) input.image_size = options.image_size
147
+ if (options.num_images) input.num_images = options.num_images
148
+ if (options.num_inference_steps) input.num_inference_steps = options.num_inference_steps
149
+ if (options.guidance_scale) input.guidance_scale = options.guidance_scale
150
+ if (options.negative_prompt) input.negative_prompt = options.negative_prompt
151
+ if (options.enable_safety_checker !== undefined) input.enable_safety_checker = options.enable_safety_checker
152
+
153
+ if (options.input_image) {
154
+ logger.info("🖼️ PROCESSING INPUT IMAGE", {
155
+ input_image_path: options.input_image,
156
+ model: model
157
+ })
158
+ const imageUrl = await this.uploadFileIfNeeded(options.input_image)
159
+
160
+ if (model.includes('nano-banana')) {
161
+ input.image_urls = [imageUrl]
162
+ logger.info("✅ INPUT IMAGE READY (nano-banana mode)", {
163
+ image_urls: [imageUrl],
164
+ total_images: 1
165
+ })
166
+ } else {
167
+ input.image_url = imageUrl
168
+ logger.info("✅ INPUT IMAGE READY (standard mode)", {
169
+ image_url: imageUrl,
170
+ url_length: imageUrl.length
171
+ })
172
+ }
173
+ } else {
174
+ logger.warn("⚠️ NO INPUT IMAGE PROVIDED - generating from text only")
175
+ }
176
+
177
+ logger.info("📤 SENDING REQUEST TO FAL API", {
178
+ model,
179
+ has_input_image: !!(input.image_url || input.image_urls),
180
+ input_params: {
181
+ has_prompt: !!input.prompt,
182
+ image_size: input.image_size,
183
+ num_images: input.num_images,
184
+ has_image_url: !!input.image_url,
185
+ has_image_urls: !!input.image_urls,
186
+ seed: input.seed
187
+ },
188
+ full_input: JSON.stringify(input, null, 2)
189
+ })
190
+
191
+ const result: any = await fal.subscribe(model, {
192
+ input,
193
+ logs: false,
194
+ })
195
+
196
+ logger.info("📥 RAW FAL RESPONSE", {
197
+ has_data: !!result.data,
198
+ data_keys: result.data ? Object.keys(result.data) : [],
199
+ has_images: result.data?.images ? true : false,
200
+ images_count: result.data?.images?.length || 0,
201
+ requestId: result.requestId,
202
+ full_response: JSON.stringify(result, null, 2).substring(0, 1000)
203
+ })
204
+
205
+ const credits = this.extractCreditsFromResponse(result)
206
+
207
+ if (result.data && result.data.images) {
208
+ const generatedImages = result.data.images.map((img: any) => ({
209
+ url: img.url,
210
+ width: img.width,
211
+ height: img.height,
212
+ contentType: img.content_type
213
+ }))
214
+
215
+ logger.info("✅ FAL API RESPONSE RECEIVED", {
216
+ num_images_generated: generatedImages.length,
217
+ dimensions: generatedImages.map(img => `${img.width}x${img.height}`),
218
+ request_id: result.requestId,
219
+ inference_time: result.data.timings?.inference || 0,
220
+ seed_used: result.data.seed
221
+ })
222
+
223
+ if (options.input_image) {
224
+ logger.info("📊 IMAGE-TO-IMAGE TRANSFORMATION COMPLETE", {
225
+ original_image: options.input_image,
226
+ requested_size: options.image_size,
227
+ output_dimensions: generatedImages.map(img => `${img.width}x${img.height}`).join(", ")
228
+ })
229
+ }
230
+
231
+ return {
232
+ success: true,
233
+ images: generatedImages,
234
+ requestId: result.requestId,
235
+ metadata: {
236
+ model,
237
+ provider: this.providerName,
238
+ prompt: options.prompt,
239
+ seed: result.data.seed || options.seed || 0,
240
+ inferenceTime: result.data.timings?.inference || 0
241
+ },
242
+ credits
243
+ }
244
+ } else {
245
+ logger.warn("⚠️ NO IMAGES IN FAL RESPONSE", {
246
+ has_data: !!result.data,
247
+ data_keys: result.data ? Object.keys(result.data) : [],
248
+ has_images_field: 'images' in (result.data || {}),
249
+ result_keys: Object.keys(result || {}),
250
+ full_response: JSON.stringify(result, null, 2).substring(0, 2000)
251
+ })
252
+ return {
253
+ success: false,
254
+ error: "No images returned from FAL API",
255
+ requestId: result.requestId
256
+ }
257
+ }
258
+ } catch (error: any) {
259
+ const errorDetails = {
260
+ message: error.message || "Unknown error occurred",
261
+ name: error.name,
262
+ statusCode: error.status || error.statusCode,
263
+ body: error.body,
264
+ requestId: error.request_id,
265
+ model: options.model || "fal-ai/flux/dev",
266
+ promptLength: options.prompt?.length || 0,
267
+ stack: error.stack
268
+ }
269
+
270
+ const verboseError = `FAL API Error: ${errorDetails.message}\n` +
271
+ ` Model: ${errorDetails.model}\n` +
272
+ ` Prompt Length: ${errorDetails.promptLength} characters\n` +
273
+ (errorDetails.statusCode ? ` Status Code: ${errorDetails.statusCode}\n` : '') +
274
+ (errorDetails.requestId ? ` Request ID: ${errorDetails.requestId}\n` : '') +
275
+ (errorDetails.body ? ` Response Body: ${JSON.stringify(errorDetails.body)}\n` : '') +
276
+ (errorDetails.stack ? ` Stack: ${errorDetails.stack.split('\n').slice(0, 3).join('\n')}` : '')
277
+
278
+ this.log(verboseError, "error")
279
+
280
+ return {
281
+ success: false,
282
+ error: verboseError
283
+ }
284
+ }
285
+ }
286
+
287
+ async generateAndOpenImage(options: FalImageGenerationOptions, openInViewer: boolean = true): Promise<AIImageResult & { localPath?: string }> {
288
+ const result = await this.generateImage(options)
289
+ return this.downloadAndOpen(result, "image", "png", openInViewer)
290
+ }
291
+
292
+ async generateVideo(options: FalVideoGenerationOptions): Promise<AIVideoResult> {
293
+ try {
294
+ this.ensureConfigured()
295
+
296
+ const model = options.model || "fal-ai/veo3.1/image-to-video"
297
+
298
+ const input: any = {
299
+ prompt: options.prompt,
300
+ }
301
+
302
+ if (options.seed !== undefined) input.seed = options.seed
303
+ if (options.duration) input.duration = options.duration
304
+ if (options.fps) input.fps = options.fps
305
+ if (options.aspect_ratio) input.aspect_ratio = options.aspect_ratio
306
+ if (options.num_videos) input.num_videos = options.num_videos
307
+ if (options.enable_safety_checker !== undefined) input.enable_safety_checker = options.enable_safety_checker
308
+
309
+ if (options.input_image) {
310
+ const imageUrl = await this.uploadFileIfNeeded(options.input_image)
311
+ input.image_url = imageUrl
312
+ this.log(`Using input image for video: ${imageUrl}`, "debug")
313
+ }
314
+
315
+ this.log(`Generating video with model: ${model}`, "debug")
316
+
317
+ const result: any = await fal.subscribe(model, {
318
+ input,
319
+ logs: false,
320
+ })
321
+
322
+ const credits = this.extractCreditsFromResponse(result)
323
+
324
+ if (result.data && result.data.video) {
325
+ return {
326
+ success: true,
327
+ videos: [{
328
+ url: result.data.video.url,
329
+ width: result.data.video.width || 720,
330
+ height: result.data.video.height || 1280,
331
+ duration: result.data.video.duration || options.duration || 5,
332
+ contentType: result.data.video.content_type || "video/mp4"
333
+ }],
334
+ requestId: result.requestId,
335
+ metadata: {
336
+ model,
337
+ provider: this.providerName,
338
+ prompt: options.prompt,
339
+ seed: result.data.seed || options.seed || 0,
340
+ inferenceTime: result.data.timings?.inference || 0
341
+ },
342
+ credits
343
+ }
344
+ } else {
345
+ return {
346
+ success: false,
347
+ error: "No video returned from FAL API",
348
+ requestId: result.requestId
349
+ }
350
+ }
351
+ } catch (error: any) {
352
+ const errorDetails = {
353
+ message: error.message || "Unknown error occurred",
354
+ name: error.name,
355
+ statusCode: error.status || error.statusCode,
356
+ body: error.body,
357
+ requestId: error.request_id,
358
+ model: options.model || "fal-ai/kling-video-v1",
359
+ promptLength: options.prompt?.length || 0,
360
+ duration: options.duration,
361
+ fps: options.fps,
362
+ aspectRatio: options.aspect_ratio,
363
+ hasInputImage: !!options.input_image,
364
+ inputImageUrl: options.input_image ? options.input_image.substring(0, 50) + '...' : undefined,
365
+ stack: error.stack
366
+ }
367
+
368
+ const verboseError = `FAL Video API Error: ${errorDetails.message}\n` +
369
+ ` Model: ${errorDetails.model}\n` +
370
+ ` Prompt Length: ${errorDetails.promptLength} characters\n` +
371
+ ` Duration: ${errorDetails.duration || 'default'}s\n` +
372
+ ` FPS: ${errorDetails.fps || 'default'}\n` +
373
+ ` Aspect Ratio: ${errorDetails.aspectRatio || 'default'}\n` +
374
+ (errorDetails.hasInputImage ? ` Input Image: ${errorDetails.inputImageUrl}\n` : '') +
375
+ (errorDetails.statusCode ? ` Status Code: ${errorDetails.statusCode}\n` : '') +
376
+ (errorDetails.requestId ? ` Request ID: ${errorDetails.requestId}\n` : '') +
377
+ (errorDetails.body ? ` Response Body: ${JSON.stringify(errorDetails.body)}\n` : '') +
378
+ (errorDetails.stack ? ` Stack: ${errorDetails.stack.split('\n').slice(0, 3).join('\n')}` : '')
379
+
380
+ this.log(verboseError, "error")
381
+
382
+ return {
383
+ success: false,
384
+ error: verboseError
385
+ }
386
+ }
387
+ }
388
+
389
+ async generateAndOpenVideo(options: FalVideoGenerationOptions, openInViewer: boolean = true): Promise<AIVideoResult & { localPath?: string }> {
390
+ const result = await this.generateVideo(options)
391
+ return this.downloadAndOpen(result, "video", "mp4", openInViewer)
392
+ }
393
+ }
394
+
395
+ export function createFalClient(apiKey?: string): FalClient {
396
+ return new FalClient(apiKey)
397
+ }
package/commands.json ADDED
@@ -0,0 +1,219 @@
1
+ {
2
+ "name": "@openpets/fal",
3
+ "version": "1.0.1",
4
+ "description": "FAL AI image and video generation plugin for OpenCode - text-to-image, image-to-image, text-to-video, and image-to-video using Flux, Stable Diffusion, Kling, and more",
5
+ "tools": [
6
+ {
7
+ "name": "fal-generate-image",
8
+ "description": "Generate an image using FAL AI text-to-image models (Flux, Stable Diffusion, etc.)",
9
+ "schema": {
10
+ "type": "object",
11
+ "properties": {
12
+ "prompt": {
13
+ "type": "string",
14
+ "description": "Text description of the image to generate"
15
+ },
16
+ "model": {
17
+ "type": "string",
18
+ "optional": true,
19
+ "description": "Model to use (default: fal-ai/flux/dev). Options: fal-ai/flux/dev, fal-ai/flux/schnell, fal-ai/stable-diffusion-v3"
20
+ },
21
+ "image_size": {
22
+ "type": "string",
23
+ "enum": [
24
+ "square_hd",
25
+ "square",
26
+ "portrait_4_3",
27
+ "portrait_16_9",
28
+ "landscape_4_3",
29
+ "landscape_16_9"
30
+ ],
31
+ "optional": true,
32
+ "description": "Image aspect ratio (default: square)"
33
+ },
34
+ "num_images": {
35
+ "type": "number",
36
+ "optional": true,
37
+ "description": "Number of images to generate (default: 1)"
38
+ },
39
+ "seed": {
40
+ "type": "number",
41
+ "optional": true,
42
+ "description": "Random seed for reproducibility"
43
+ },
44
+ "num_inference_steps": {
45
+ "type": "number",
46
+ "optional": true,
47
+ "description": "Number of inference steps (more = higher quality, slower)"
48
+ },
49
+ "guidance_scale": {
50
+ "type": "number",
51
+ "optional": true,
52
+ "description": "How closely to follow the prompt (7.5 is typical)"
53
+ },
54
+ "negative_prompt": {
55
+ "type": "string",
56
+ "optional": true,
57
+ "description": "What to avoid in the image"
58
+ },
59
+ "enable_safety_checker": {
60
+ "type": "boolean",
61
+ "optional": true,
62
+ "description": "Enable content safety checker (default: true)"
63
+ }
64
+ }
65
+ }
66
+ },
67
+ {
68
+ "name": "fal-transform-image",
69
+ "description": "Transform an existing image using FAL AI image-to-image models (style transfer, editing, variations)",
70
+ "schema": {
71
+ "type": "object",
72
+ "properties": {
73
+ "input_image": {
74
+ "type": "string",
75
+ "description": "Path or URL to input image to transform"
76
+ },
77
+ "prompt": {
78
+ "type": "string",
79
+ "description": "Description of how to transform the image"
80
+ },
81
+ "model": {
82
+ "type": "string",
83
+ "optional": true,
84
+ "description": "Model to use (default: fal-ai/flux/dev)"
85
+ },
86
+ "image_size": {
87
+ "type": "string",
88
+ "enum": [
89
+ "square_hd",
90
+ "square",
91
+ "portrait_4_3",
92
+ "portrait_16_9",
93
+ "landscape_4_3",
94
+ "landscape_16_9"
95
+ ],
96
+ "optional": true,
97
+ "description": "Output image aspect ratio"
98
+ },
99
+ "num_images": {
100
+ "type": "number",
101
+ "optional": true,
102
+ "description": "Number of variations to generate (default: 1)"
103
+ },
104
+ "seed": {
105
+ "type": "number",
106
+ "optional": true,
107
+ "description": "Random seed for reproducibility"
108
+ },
109
+ "guidance_scale": {
110
+ "type": "number",
111
+ "optional": true,
112
+ "description": "How closely to follow the prompt"
113
+ },
114
+ "negative_prompt": {
115
+ "type": "string",
116
+ "optional": true,
117
+ "description": "What to avoid in the transformation"
118
+ }
119
+ }
120
+ }
121
+ },
122
+ {
123
+ "name": "fal-generate-video",
124
+ "description": "Generate a video using FAL AI text-to-video models (Kling, Runway Gen-3, etc.)",
125
+ "schema": {
126
+ "type": "object",
127
+ "properties": {
128
+ "prompt": {
129
+ "type": "string",
130
+ "description": "Text description of the video to generate"
131
+ },
132
+ "model": {
133
+ "type": "string",
134
+ "optional": true,
135
+ "description": "Model to use (default: fal-ai/kling-video-v1). Options: fal-ai/kling-video-v1, fal-ai/runway-gen3"
136
+ },
137
+ "duration": {
138
+ "type": "number",
139
+ "optional": true,
140
+ "description": "Video duration in seconds (default: 5)"
141
+ },
142
+ "fps": {
143
+ "type": "number",
144
+ "optional": true,
145
+ "description": "Frames per second (default: 24)"
146
+ },
147
+ "aspect_ratio": {
148
+ "type": "string",
149
+ "enum": [
150
+ "16:9",
151
+ "9:16",
152
+ "1:1"
153
+ ],
154
+ "optional": true,
155
+ "description": "Video aspect ratio (default: 16:9)"
156
+ },
157
+ "num_videos": {
158
+ "type": "number",
159
+ "optional": true,
160
+ "description": "Number of videos to generate (default: 1)"
161
+ },
162
+ "seed": {
163
+ "type": "number",
164
+ "optional": true,
165
+ "description": "Random seed for reproducibility"
166
+ }
167
+ }
168
+ }
169
+ },
170
+ {
171
+ "name": "fal-animate-image",
172
+ "description": "Animate a static image using FAL AI image-to-video models",
173
+ "schema": {
174
+ "type": "object",
175
+ "properties": {
176
+ "input_image": {
177
+ "type": "string",
178
+ "description": "Path or URL to input image to animate"
179
+ },
180
+ "prompt": {
181
+ "type": "string",
182
+ "description": "Description of how the image should move/animate"
183
+ },
184
+ "model": {
185
+ "type": "string",
186
+ "optional": true,
187
+ "description": "Model to use (default: fal-ai/veo3.1/image-to-video)"
188
+ },
189
+ "duration": {
190
+ "type": "number",
191
+ "optional": true,
192
+ "description": "Video duration in seconds (default: 5)"
193
+ },
194
+ "fps": {
195
+ "type": "number",
196
+ "optional": true,
197
+ "description": "Frames per second (default: 24)"
198
+ },
199
+ "aspect_ratio": {
200
+ "type": "string",
201
+ "enum": [
202
+ "16:9",
203
+ "9:16",
204
+ "1:1"
205
+ ],
206
+ "optional": true,
207
+ "description": "Video aspect ratio"
208
+ },
209
+ "seed": {
210
+ "type": "number",
211
+ "optional": true,
212
+ "description": "Random seed for reproducibility"
213
+ }
214
+ }
215
+ }
216
+ }
217
+ ],
218
+ "generatedAt": "2026-02-03T18:38:06.215Z"
219
+ }
package/index.ts ADDED
@@ -0,0 +1,177 @@
1
+ import { z, createPlugin, type ToolDefinition, loadEnv, createLogger } from "openpets-sdk"
2
+ import { createFalClient, type FalImageGenerationOptions, type FalVideoGenerationOptions } from "./client"
3
+
4
+ export const FalPlugin = async () => {
5
+ const logger = createLogger("fal-plugin")
6
+
7
+ const env = loadEnv('fal')
8
+ const FAL_KEY = env.FAL_KEY
9
+
10
+ if (!FAL_KEY) {
11
+ logger.error("Missing required FAL environment variable")
12
+ throw new Error("FAL_KEY environment variable is required. Set it in .env file or pet configuration.")
13
+ }
14
+
15
+ logger.info("Initializing FAL plugin", { hasApiKey: !!FAL_KEY })
16
+
17
+ const falClient = createFalClient(FAL_KEY)
18
+
19
+ const tools: ToolDefinition[] = [
20
+ {
21
+ name: "fal-generate-image",
22
+ description: "Generate an image using FAL AI text-to-image models (Flux, Stable Diffusion, etc.)",
23
+ schema: z.object({
24
+ prompt: z.string().describe("Text description of the image to generate"),
25
+ model: z.string().optional().describe("Model to use (default: fal-ai/flux/dev). Options: fal-ai/flux/dev, fal-ai/flux/schnell, fal-ai/stable-diffusion-v3"),
26
+ image_size: z.enum(["square_hd", "square", "portrait_4_3", "portrait_16_9", "landscape_4_3", "landscape_16_9"]).optional().describe("Image aspect ratio (default: square)"),
27
+ num_images: z.number().optional().describe("Number of images to generate (default: 1)"),
28
+ seed: z.number().optional().describe("Random seed for reproducibility"),
29
+ num_inference_steps: z.number().optional().describe("Number of inference steps (more = higher quality, slower)"),
30
+ guidance_scale: z.number().optional().describe("How closely to follow the prompt (7.5 is typical)"),
31
+ negative_prompt: z.string().optional().describe("What to avoid in the image"),
32
+ enable_safety_checker: z.boolean().optional().describe("Enable content safety checker (default: true)")
33
+ }),
34
+ async execute(args) {
35
+ logger.info("Generating image", { prompt: args.prompt.substring(0, 50), model: args.model })
36
+
37
+ const result = await falClient.generateAndOpenImage({
38
+ prompt: args.prompt,
39
+ model: args.model,
40
+ image_size: args.image_size,
41
+ num_images: args.num_images,
42
+ seed: args.seed,
43
+ num_inference_steps: args.num_inference_steps,
44
+ guidance_scale: args.guidance_scale,
45
+ negative_prompt: args.negative_prompt,
46
+ enable_safety_checker: args.enable_safety_checker
47
+ })
48
+
49
+ return JSON.stringify({
50
+ success: result.success,
51
+ message: result.success ? "Image generated successfully" : "Image generation failed",
52
+ images: result.images?.map(img => img.url) || [],
53
+ localPath: result.localPath,
54
+ metadata: result.metadata,
55
+ requestId: result.requestId,
56
+ credits: result.credits,
57
+ error: result.error
58
+ }, null, 2)
59
+ }
60
+ },
61
+ {
62
+ name: "fal-transform-image",
63
+ description: "Transform an existing image using FAL AI image-to-image models (style transfer, editing, variations)",
64
+ schema: z.object({
65
+ input_image: z.string().describe("Path or URL to input image to transform"),
66
+ prompt: z.string().describe("Description of how to transform the image"),
67
+ model: z.string().optional().describe("Model to use (default: fal-ai/flux/dev)"),
68
+ image_size: z.enum(["square_hd", "square", "portrait_4_3", "portrait_16_9", "landscape_4_3", "landscape_16_9"]).optional().describe("Output image aspect ratio"),
69
+ num_images: z.number().optional().describe("Number of variations to generate (default: 1)"),
70
+ seed: z.number().optional().describe("Random seed for reproducibility"),
71
+ guidance_scale: z.number().optional().describe("How closely to follow the prompt"),
72
+ negative_prompt: z.string().optional().describe("What to avoid in the transformation")
73
+ }),
74
+ async execute(args) {
75
+ logger.info("Transforming image", { inputImage: args.input_image, prompt: args.prompt.substring(0, 50) })
76
+
77
+ const result = await falClient.generateAndOpenImage({
78
+ input_image: args.input_image,
79
+ prompt: args.prompt,
80
+ model: args.model,
81
+ image_size: args.image_size,
82
+ num_images: args.num_images,
83
+ seed: args.seed,
84
+ guidance_scale: args.guidance_scale,
85
+ negative_prompt: args.negative_prompt
86
+ })
87
+
88
+ return JSON.stringify({
89
+ success: result.success,
90
+ message: result.success ? "Image transformed successfully" : "Image transformation failed",
91
+ images: result.images?.map(img => img.url) || [],
92
+ localPath: result.localPath,
93
+ metadata: result.metadata,
94
+ requestId: result.requestId,
95
+ credits: result.credits,
96
+ error: result.error
97
+ }, null, 2)
98
+ }
99
+ },
100
+ {
101
+ name: "fal-generate-video",
102
+ description: "Generate a video using FAL AI text-to-video models (Kling, Runway Gen-3, etc.)",
103
+ schema: z.object({
104
+ prompt: z.string().describe("Text description of the video to generate"),
105
+ model: z.string().optional().describe("Model to use (default: fal-ai/kling-video-v1). Options: fal-ai/kling-video-v1, fal-ai/runway-gen3"),
106
+ duration: z.number().optional().describe("Video duration in seconds (default: 5)"),
107
+ fps: z.number().optional().describe("Frames per second (default: 24)"),
108
+ aspect_ratio: z.enum(["16:9", "9:16", "1:1"]).optional().describe("Video aspect ratio (default: 16:9)"),
109
+ num_videos: z.number().optional().describe("Number of videos to generate (default: 1)"),
110
+ seed: z.number().optional().describe("Random seed for reproducibility")
111
+ }),
112
+ async execute(args) {
113
+ logger.info("Generating video", { prompt: args.prompt.substring(0, 50), duration: args.duration })
114
+
115
+ const result = await falClient.generateAndOpenVideo({
116
+ prompt: args.prompt,
117
+ model: args.model,
118
+ duration: args.duration,
119
+ fps: args.fps,
120
+ aspect_ratio: args.aspect_ratio,
121
+ num_videos: args.num_videos,
122
+ seed: args.seed
123
+ })
124
+
125
+ return JSON.stringify({
126
+ success: result.success,
127
+ message: result.success ? "Video generated successfully" : "Video generation failed",
128
+ videos: result.videos?.map(vid => vid.url) || [],
129
+ localPath: result.localPath,
130
+ metadata: result.metadata,
131
+ requestId: result.requestId,
132
+ credits: result.credits,
133
+ error: result.error
134
+ }, null, 2)
135
+ }
136
+ },
137
+ {
138
+ name: "fal-animate-image",
139
+ description: "Animate a static image using FAL AI image-to-video models",
140
+ schema: z.object({
141
+ input_image: z.string().describe("Path or URL to input image to animate"),
142
+ prompt: z.string().describe("Description of how the image should move/animate"),
143
+ model: z.string().optional().describe("Model to use (default: fal-ai/veo3.1/image-to-video)"),
144
+ duration: z.number().optional().describe("Video duration in seconds (default: 5)"),
145
+ fps: z.number().optional().describe("Frames per second (default: 24)"),
146
+ aspect_ratio: z.enum(["16:9", "9:16", "1:1"]).optional().describe("Video aspect ratio"),
147
+ seed: z.number().optional().describe("Random seed for reproducibility")
148
+ }),
149
+ async execute(args) {
150
+ logger.info("Animating image", { inputImage: args.input_image, prompt: args.prompt.substring(0, 50) })
151
+
152
+ const result = await falClient.generateAndOpenVideo({
153
+ input_image: args.input_image,
154
+ prompt: args.prompt,
155
+ model: args.model,
156
+ duration: args.duration,
157
+ fps: args.fps,
158
+ aspect_ratio: args.aspect_ratio,
159
+ seed: args.seed
160
+ })
161
+
162
+ return JSON.stringify({
163
+ success: result.success,
164
+ message: result.success ? "Image animated successfully" : "Image animation failed",
165
+ videos: result.videos?.map(vid => vid.url) || [],
166
+ localPath: result.localPath,
167
+ metadata: result.metadata,
168
+ requestId: result.requestId,
169
+ credits: result.credits,
170
+ error: result.error
171
+ }, null, 2)
172
+ }
173
+ }
174
+ ]
175
+
176
+ return createPlugin(tools)
177
+ }
package/package.json ADDED
@@ -0,0 +1,125 @@
1
+ {
2
+ "$schema": "https://pets.studio/config.json",
3
+ "name": "@openpets/fal",
4
+ "version": "1.0.1",
5
+ "title": "FAL",
6
+ "subtitle": "Flux, Stable Diffusion, Kling Video & More",
7
+ "description": "FAL AI image and video generation plugin for OpenCode - text-to-image, image-to-image, text-to-video, and image-to-video using Flux, Stable Diffusion, Kling, and more",
8
+ "categories": [
9
+ "ai",
10
+ "image-generation",
11
+ "video-generation",
12
+ "creative"
13
+ ],
14
+ "homepage": "https://fal.ai",
15
+ "faq": [
16
+ {
17
+ "question": "How do I get a FAL AI API key?",
18
+ "answer": "Visit https://fal.ai, create an account, and generate an API key from your dashboard."
19
+ },
20
+ {
21
+ "question": "What image models are supported?",
22
+ "answer": "The plugin supports Flux, Stable Diffusion, and other popular text-to-image and image-to-image models available on FAL AI."
23
+ },
24
+ {
25
+ "question": "What video models are supported?",
26
+ "answer": "The plugin supports Kling, Runway Gen-3, and other text-to-video and image-to-video models available on FAL AI."
27
+ },
28
+ {
29
+ "question": "How much does it cost to generate images and videos?",
30
+ "answer": "Pricing varies by model. Check https://fal.ai/pricing for current rates. You'll need credits in your FAL AI account."
31
+ }
32
+ ],
33
+ "main": "index.ts",
34
+ "types": "index.ts",
35
+ "keywords": [
36
+ "opencode",
37
+ "plugin",
38
+ "fal",
39
+ "ai",
40
+ "image-generation",
41
+ "video-generation",
42
+ "flux",
43
+ "stable-diffusion",
44
+ "kling-video",
45
+ "text-to-image",
46
+ "image-to-image",
47
+ "text-to-video",
48
+ "image-to-video"
49
+ ],
50
+ "envVariables": {
51
+ "required": [
52
+ {
53
+ "name": "FAL_KEY",
54
+ "description": "FAL AI API key for image and video generation",
55
+ "provider": "FAL AI",
56
+ "priority": 1,
57
+ "example": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
58
+ }
59
+ ]
60
+ },
61
+ "queries": [
62
+ "generate an image of a sunset over the ocean",
63
+ "transform this image into watercolor style",
64
+ "generate a video of clouds drifting across the sky",
65
+ "animate this image with a camera pan"
66
+ ],
67
+ "scenarios": {
68
+ "text-to-image-basic": [
69
+ "use fal-generate-image to create a cat sitting on a windowsill",
70
+ "use fal-generate-image with square_hd size for a landscape with mountains",
71
+ "use fal-generate-image to make a portrait of a robot"
72
+ ],
73
+ "image-transformation": [
74
+ "use fal-transform-image to convert photo to watercolor style",
75
+ "use fal-transform-image to add cyberpunk effects to image",
76
+ "use fal-transform-image to change day photo to night"
77
+ ],
78
+ "video-generation": [
79
+ "use fal-generate-video to create ocean waves crashing on beach",
80
+ "use fal-generate-video with 16:9 aspect for rotating product",
81
+ "use fal-generate-video at 30 fps for smooth fireworks animation"
82
+ ],
83
+ "image-animation": [
84
+ "use fal-animate-image to add camera pan to landscape photo",
85
+ "use fal-animate-image with zoom motion for city skyline",
86
+ "use fal-animate-image to create parallax effect in forest scene"
87
+ ]
88
+ },
89
+ "scripts": {
90
+ "build": "pets build",
91
+ "build:tsc": "tsc",
92
+ "quickstart": "opencode run \"list available fal commands\"",
93
+ "test:generate-image": "opencode run \"Generate an image using FAL AI text-to-image models (Flux, Stable Diffusion, etc.)\"",
94
+ "test:transform-image": "opencode run \"Transform an existing image using FAL AI image-to-image models (style transfer, editing, variations)\"",
95
+ "test:generate-video": "opencode run \"Generate a video using FAL AI text-to-video models (Kling, Runway Gen-3, etc.)\"",
96
+ "test:animate-image": "opencode run \"Animate a static image using FAL AI image-to-video models\"",
97
+ "test:all": "echo \"Run individual test scripts to test specific tools\""
98
+ },
99
+ "type": "module",
100
+ "dependencies": {
101
+ "openpets-sdk": "^1.0.0",
102
+ "@fal-ai/client": "^1.7.2",
103
+ "dotenv": "^16.5.0"
104
+ },
105
+ "publishConfig": {
106
+ "access": "public"
107
+ },
108
+ "files": [
109
+ "index.ts",
110
+ "index.js",
111
+ "client.ts",
112
+ "client.js",
113
+ "*.d.ts",
114
+ "lib/**/*",
115
+ "src/**/*",
116
+ "commands.json",
117
+ "public/**/*",
118
+ "README.md",
119
+ "prompts/**/*"
120
+ ],
121
+ "repository": {
122
+ "type": "git",
123
+ "url": "https://github.com/raggle-ai/pets/pets/fal"
124
+ }
125
+ }
package/src/custom.ts ADDED
@@ -0,0 +1,162 @@
1
+
2
+ // const falClient = createFalClient(FAL_KEY)
3
+
4
+ // // Define write operation patterns for read-only mode filtering
5
+ // const WRITE_PATTERNS = ['generate', 'transform', 'animate']
6
+
7
+ // const apiTools: ToolDefinition[] = [
8
+ // {
9
+ // name: "fal-generate-image",
10
+ // description: "Generate an image using FAL AI text-to-image models (Flux, Stable Diffusion, etc.)",
11
+ // schema: z.object({
12
+ // prompt: z.string().describe("Text description of the image to generate"),
13
+ // model: z.string().optional().describe("Model to use (default: fal-ai/flux/dev). Options: fal-ai/flux/dev, fal-ai/flux/schnell, fal-ai/stable-diffusion-v3"),
14
+ // image_size: z.enum(["square_hd", "square", "portrait_4_3", "portrait_16_9", "landscape_4_3", "landscape_16_9"]).optional().describe("Image aspect ratio (default: square)"),
15
+ // num_images: z.number().optional().describe("Number of images to generate (default: 1)"),
16
+ // seed: z.number().optional().describe("Random seed for reproducibility"),
17
+ // num_inference_steps: z.number().optional().describe("Number of inference steps (more = higher quality, slower)"),
18
+ // guidance_scale: z.number().optional().describe("How closely to follow the prompt (7.5 is typical)"),
19
+ // negative_prompt: z.string().optional().describe("What to avoid in the image"),
20
+ // enable_safety_checker: z.boolean().optional().describe("Enable content safety checker (default: true)")
21
+ // }),
22
+ // async execute(args) {
23
+ // logger.info("Generating image", { prompt: args.prompt.substring(0, 50), model: args.model })
24
+
25
+ // const result = await falClient.generateAndOpenImage({
26
+ // prompt: args.prompt,
27
+ // model: args.model,
28
+ // image_size: args.image_size,
29
+ // num_images: args.num_images,
30
+ // seed: args.seed,
31
+ // num_inference_steps: args.num_inference_steps,
32
+ // guidance_scale: args.guidance_scale,
33
+ // negative_prompt: args.negative_prompt,
34
+ // enable_safety_checker: args.enable_safety_checker
35
+ // })
36
+
37
+ // return JSON.stringify({
38
+ // success: result.success,
39
+ // message: result.success ? "Image generated successfully" : "Image generation failed",
40
+ // images: result.images?.map(img => img.url) || [],
41
+ // localPath: result.localPath,
42
+ // metadata: result.metadata,
43
+ // requestId: result.requestId,
44
+ // credits: result.credits,
45
+ // error: result.error
46
+ // }, null, 2)
47
+ // }
48
+ // },
49
+ // {
50
+ // name: "fal-transform-image",
51
+ // description: "Transform an existing image using FAL AI image-to-image models (style transfer, editing, variations)",
52
+ // schema: z.object({
53
+ // input_image: z.string().describe("Path or URL to input image to transform"),
54
+ // prompt: z.string().describe("Description of how to transform the image"),
55
+ // model: z.string().optional().describe("Model to use (default: fal-ai/flux/dev)"),
56
+ // image_size: z.enum(["square_hd", "square", "portrait_4_3", "portrait_16_9", "landscape_4_3", "landscape_16_9"]).optional().describe("Output image aspect ratio"),
57
+ // num_images: z.number().optional().describe("Number of variations to generate (default: 1)"),
58
+ // seed: z.number().optional().describe("Random seed for reproducibility"),
59
+ // guidance_scale: z.number().optional().describe("How closely to follow the prompt"),
60
+ // negative_prompt: z.string().optional().describe("What to avoid in the transformation")
61
+ // }),
62
+ // async execute(args) {
63
+ // logger.info("Transforming image", { inputImage: args.input_image, prompt: args.prompt.substring(0, 50) })
64
+
65
+ // const result = await falClient.generateAndOpenImage({
66
+ // input_image: args.input_image,
67
+ // prompt: args.prompt,
68
+ // model: args.model,
69
+ // image_size: args.image_size,
70
+ // num_images: args.num_images,
71
+ // seed: args.seed,
72
+ // guidance_scale: args.guidance_scale,
73
+ // negative_prompt: args.negative_prompt
74
+ // })
75
+
76
+ // return JSON.stringify({
77
+ // success: result.success,
78
+ // message: result.success ? "Image transformed successfully" : "Image transformation failed",
79
+ // images: result.images?.map(img => img.url) || [],
80
+ // localPath: result.localPath,
81
+ // metadata: result.metadata,
82
+ // requestId: result.requestId,
83
+ // credits: result.credits,
84
+ // error: result.error
85
+ // }, null, 2)
86
+ // }
87
+ // },
88
+ // {
89
+ // name: "fal-generate-video",
90
+ // description: "Generate a video using FAL AI text-to-video models (Kling, Runway Gen-3, etc.)",
91
+ // schema: z.object({
92
+ // prompt: z.string().describe("Text description of the video to generate"),
93
+ // model: z.string().optional().describe("Model to use (default: fal-ai/kling-video-v1). Options: fal-ai/kling-video-v1, fal-ai/runway-gen3"),
94
+ // duration: z.number().optional().describe("Video duration in seconds (default: 5)"),
95
+ // fps: z.number().optional().describe("Frames per second (default: 24)"),
96
+ // aspect_ratio: z.enum(["16:9", "9:16", "1:1"]).optional().describe("Video aspect ratio (default: 16:9)"),
97
+ // num_videos: z.number().optional().describe("Number of videos to generate (default: 1)"),
98
+ // seed: z.number().optional().describe("Random seed for reproducibility")
99
+ // }),
100
+ // async execute(args) {
101
+ // logger.info("Generating video", { prompt: args.prompt.substring(0, 50), duration: args.duration })
102
+
103
+ // const result = await falClient.generateAndOpenVideo({
104
+ // prompt: args.prompt,
105
+ // model: args.model,
106
+ // duration: args.duration,
107
+ // fps: args.fps,
108
+ // aspect_ratio: args.aspect_ratio,
109
+ // num_videos: args.num_videos,
110
+ // seed: args.seed
111
+ // })
112
+
113
+ // return JSON.stringify({
114
+ // success: result.success,
115
+ // message: result.success ? "Video generated successfully" : "Video generation failed",
116
+ // videos: result.videos?.map(vid => vid.url) || [],
117
+ // localPath: result.localPath,
118
+ // metadata: result.metadata,
119
+ // requestId: result.requestId,
120
+ // credits: result.credits,
121
+ // error: result.error
122
+ // }, null, 2)
123
+ // }
124
+ // },
125
+ // {
126
+ // name: "fal-animate-image",
127
+ // description: "Animate a static image using FAL AI image-to-video models",
128
+ // schema: z.object({
129
+ // input_image: z.string().describe("Path or URL to input image to animate"),
130
+ // prompt: z.string().describe("Description of how the image should move/animate"),
131
+ // model: z.string().optional().describe("Model to use (default: fal-ai/veo3.1/image-to-video)"),
132
+ // duration: z.number().optional().describe("Video duration in seconds (default: 5)"),
133
+ // fps: z.number().optional().describe("Frames per second (default: 24)"),
134
+ // aspect_ratio: z.enum(["16:9", "9:16", "1:1"]).optional().describe("Video aspect ratio"),
135
+ // seed: z.number().optional().describe("Random seed for reproducibility")
136
+ // }),
137
+ // async execute(args) {
138
+ // logger.info("Animating image", { inputImage: args.input_image, prompt: args.prompt.substring(0, 50) })
139
+
140
+ // const result = await falClient.generateAndOpenVideo({
141
+ // input_image: args.input_image,
142
+ // prompt: args.prompt,
143
+ // model: args.model,
144
+ // duration: args.duration,
145
+ // fps: args.fps,
146
+ // aspect_ratio: args.aspect_ratio,
147
+ // seed: args.seed
148
+ // })
149
+
150
+ // return JSON.stringify({
151
+ // success: result.success,
152
+ // message: result.success ? "Image animated successfully" : "Image animation failed",
153
+ // videos: result.videos?.map(vid => vid.url) || [],
154
+ // localPath: result.localPath,
155
+ // metadata: result.metadata,
156
+ // requestId: result.requestId,
157
+ // credits: result.credits,
158
+ // error: result.error
159
+ // }, null, 2)
160
+ // }
161
+ // }
162
+ // ]