@genfeedai/types 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,45 +1,29 @@
1
1
  # @genfeedai/types
2
2
 
3
- Type definitions for the Genfeed workflow engine.
3
+ Type definitions for the Genfeed workflow engine and node ecosystem.
4
4
 
5
- ## Installation
5
+ ## Install
6
6
 
7
7
  ```bash
8
- npm install @genfeedai/types
9
- # or
10
- bun add @genfeedai/types
8
+ npm i @genfeedai/types
11
9
  ```
12
10
 
13
11
  ## Usage
14
12
 
15
- ```typescript
13
+ ```ts
16
14
  import type { WorkflowFile, WorkflowNode, WorkflowEdge } from '@genfeedai/types';
17
- import type { NodeType, HandleType } from '@genfeedai/types';
15
+ import type { ImageNodeData } from '@genfeedai/types/nodes';
16
+ import replicateSchemas from '@genfeedai/types/replicate/schemas.json';
18
17
  ```
19
18
 
20
- ### Subpath Exports
19
+ ## Related Packages
21
20
 
22
- ```typescript
23
- // Node type definitions
24
- import type { ImageNodeData, VideoNodeData } from '@genfeedai/types/nodes';
21
+ - `@genfeedai/core`
22
+ - `@genfeedai/workflow-ui`
25
23
 
26
- // Workflow file schema
27
- import type { WorkflowFile } from '@genfeedai/types/workflow';
24
+ ## Build Faster with Genfeed
28
25
 
29
- // ComfyUI integration types
30
- import type { ComfyUIWorkflow } from '@genfeedai/types/comfyui';
31
-
32
- // Replicate model schemas
33
- import type { ReplicateModel } from '@genfeedai/types/replicate';
34
- ```
35
-
36
- ## Key Exports
37
-
38
- - **Workflow types**: `WorkflowFile`, `WorkflowNode`, `WorkflowEdge`, `NodeGroup`
39
- - **Node types**: Data interfaces for all 36 node types (image, video, audio, text)
40
- - **Enums**: `NodeCategory`, `HandleType`, `EdgeStyle`
41
- - **ComfyUI types**: ComfyUI workflow and prompt schemas
42
- - **Replicate types**: Replicate model input/output schemas
26
+ Use strongly typed workflow contracts in your own apps, or use Genfeed at [https://genfeed.ai](https://genfeed.ai).
43
27
 
44
28
  ## License
45
29
 
@@ -1,7 +1,7 @@
1
1
  /**
2
2
  * Auto-generated Replicate model types
3
3
  * DO NOT EDIT - Run `bun run sync:replicate` to regenerate
4
- * Generated at: 2026-01-29T02:24:16.582Z
4
+ * Generated at: 2026-03-06T02:26:55.223Z
5
5
  */
6
6
  /**
7
7
  * Input parameters for google/nano-banana
@@ -29,6 +29,47 @@ interface NanoBananaInput {
29
29
  }
30
30
  /** Output type for google/nano-banana */
31
31
  type NanoBananaOutput = string;
32
+ /**
33
+ * Input parameters for google/nano-banana-2
34
+ */
35
+ interface NanoBanana2Input {
36
+ /**
37
+ * A text description of the image you want to generate
38
+ */
39
+ prompt: string;
40
+ /**
41
+ * Input images to transform or use as reference (supports up to 14 images)
42
+ * @default []
43
+ */
44
+ image_input?: string[];
45
+ /**
46
+ * Aspect ratio of the generated image
47
+ * @default "match_input_image"
48
+ */
49
+ aspect_ratio?: unknown;
50
+ /**
51
+ * Resolution of the generated image. Higher resolutions take longer to generate.
52
+ * @default "1K"
53
+ */
54
+ resolution?: unknown;
55
+ /**
56
+ * Use Google Web Search grounding to generate images based on real-time information (e.g. weather, sports scores, recent events).
57
+ * @default false
58
+ */
59
+ google_search?: boolean;
60
+ /**
61
+ * Use Google Image Search grounding to find web images as visual context for generation. When enabled, web search is also used automatically.
62
+ * @default false
63
+ */
64
+ image_search?: boolean;
65
+ /**
66
+ * Format of the output image
67
+ * @default "jpg"
68
+ */
69
+ output_format?: unknown;
70
+ }
71
+ /** Output type for google/nano-banana-2 */
72
+ type NanoBanana2Output = string;
32
73
  /**
33
74
  * Input parameters for google/nano-banana-pro
34
75
  */
@@ -62,6 +103,11 @@ interface NanoBananaProInput {
62
103
  * @default "block_only_high"
63
104
  */
64
105
  safety_filter_level?: unknown;
106
+ /**
107
+ * Fallback to another model (currently bytedance/seedream-5) if Nano Banana Pro is at capacity.
108
+ * @default false
109
+ */
110
+ allow_fallback_model?: boolean;
65
111
  }
66
112
  /** Output type for google/nano-banana-pro */
67
113
  type NanoBananaProOutput = string;
@@ -308,171 +354,6 @@ interface Flux11ProInput {
308
354
  }
309
355
  /** Output type for black-forest-labs/flux-1.1-pro */
310
356
  type Flux11ProOutput = string;
311
- /**
312
- * Input parameters for stability-ai/sdxl
313
- */
314
- interface SDXLInput {
315
- /**
316
- * Input prompt
317
- * @default "An astronaut riding a rainbow unicorn"
318
- */
319
- prompt?: string;
320
- /**
321
- * Input Negative Prompt
322
- * @default ""
323
- */
324
- negative_prompt?: string;
325
- /**
326
- * Input image for img2img or inpaint mode
327
- */
328
- image?: string;
329
- /**
330
- * Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted.
331
- */
332
- mask?: string;
333
- /**
334
- * Width of output image
335
- * @default 1024
336
- */
337
- width?: number;
338
- /**
339
- * Height of output image
340
- * @default 1024
341
- */
342
- height?: number;
343
- /**
344
- * Number of images to output.
345
- * @default 1
346
- * @range min: 1, max: 4
347
- */
348
- num_outputs?: number;
349
- /**
350
- * scheduler
351
- * @default "K_EULER"
352
- */
353
- scheduler?: unknown;
354
- /**
355
- * Number of denoising steps
356
- * @default 50
357
- * @range min: 1, max: 500
358
- */
359
- num_inference_steps?: number;
360
- /**
361
- * Scale for classifier-free guidance
362
- * @default 7.5
363
- * @range min: 1, max: 50
364
- */
365
- guidance_scale?: number;
366
- /**
367
- * Prompt strength when using img2img / inpaint. 1.0 corresponds to full destruction of information in image
368
- * @default 0.8
369
- * @range min: 0, max: 1
370
- */
371
- prompt_strength?: number;
372
- /**
373
- * Random seed. Leave blank to randomize the seed
374
- */
375
- seed?: number;
376
- /**
377
- * Which refine style to use
378
- * @default "no_refiner"
379
- */
380
- refine?: unknown;
381
- /**
382
- * For expert_ensemble_refiner, the fraction of noise to use
383
- * @default 0.8
384
- * @range min: 0, max: 1
385
- */
386
- high_noise_frac?: number;
387
- /**
388
- * For base_image_refiner, the number of steps to refine, defaults to num_inference_steps
389
- */
390
- refine_steps?: number;
391
- /**
392
- * Applies a watermark to enable determining if an image is generated in downstream applications. If you have other provisions for generating or deploying images safely, you can use this to disable watermarking.
393
- * @default true
394
- */
395
- apply_watermark?: boolean;
396
- /**
397
- * LoRA additive scale. Only applicable on trained models.
398
- * @default 0.6
399
- * @range min: 0, max: 1
400
- */
401
- lora_scale?: number;
402
- /**
403
- * Replicate LoRA weights to use. Leave blank to use the default weights.
404
- */
405
- replicate_weights?: string;
406
- /**
407
- * Disable safety checker for generated images. This feature is only available through the API. See [https://replicate.com/docs/how-does-replicate-work#safety](https://replicate.com/docs/how-does-replicate-work#safety)
408
- * @default false
409
- */
410
- disable_safety_checker?: boolean;
411
- }
412
- /** Output type for stability-ai/sdxl */
413
- type SDXLOutput = string[];
414
- /**
415
- * Input parameters for bytedance/sdxl-lightning-4step
416
- */
417
- interface SDXLLightningInput {
418
- /**
419
- * Input prompt
420
- * @default "self-portrait of a woman, lightning in the background"
421
- */
422
- prompt?: string;
423
- /**
424
- * Negative Input prompt
425
- * @default "worst quality, low quality"
426
- */
427
- negative_prompt?: string;
428
- /**
429
- * Width of output image. Recommended 1024 or 1280
430
- * @default 1024
431
- * @range min: 256, max: 1280
432
- */
433
- width?: number;
434
- /**
435
- * Height of output image. Recommended 1024 or 1280
436
- * @default 1024
437
- * @range min: 256, max: 1280
438
- */
439
- height?: number;
440
- /**
441
- * Number of images to output.
442
- * @default 1
443
- * @range min: 1, max: 4
444
- */
445
- num_outputs?: number;
446
- /**
447
- * scheduler
448
- * @default "K_EULER"
449
- */
450
- scheduler?: unknown;
451
- /**
452
- * Number of denoising steps. 4 for best results
453
- * @default 4
454
- * @range min: 1, max: 10
455
- */
456
- num_inference_steps?: number;
457
- /**
458
- * Scale for classifier-free guidance
459
- * @default 0
460
- * @range min: 0, max: 50
461
- */
462
- guidance_scale?: number;
463
- /**
464
- * Random seed. Leave blank to randomize the seed
465
- * @default 0
466
- */
467
- seed?: number;
468
- /**
469
- * Disable safety checker for generated images
470
- * @default false
471
- */
472
- disable_safety_checker?: boolean;
473
- }
474
- /** Output type for bytedance/sdxl-lightning-4step */
475
- type SDXLLightningOutput = string[];
476
357
  /**
477
358
  * Input parameters for black-forest-labs/flux-kontext-dev
478
359
  */
@@ -991,32 +872,6 @@ interface Lipsync2ProInput {
991
872
  }
992
873
  /** Output type for sync/lipsync-2-pro */
993
874
  type Lipsync2ProOutput = string;
994
- /**
995
- * Input parameters for bytedance/latentsync
996
- */
997
- interface LatentSyncInput {
998
- /**
999
- * Input video
1000
- */
1001
- video?: string;
1002
- /**
1003
- * Input audio to
1004
- */
1005
- audio?: string;
1006
- /**
1007
- * Guidance scale
1008
- * @default 1
1009
- * @range min: 0, max: 10
1010
- */
1011
- guidance_scale?: number;
1012
- /**
1013
- * Set to 0 for Random seed
1014
- * @default 0
1015
- */
1016
- seed?: number;
1017
- }
1018
- /** Output type for bytedance/latentsync */
1019
- type LatentSyncOutput = string;
1020
875
  /**
1021
876
  * Input parameters for pixverse/lipsync
1022
877
  */
@@ -1033,17 +888,16 @@ interface PixverseLipsyncInput {
1033
888
  /** Output type for pixverse/lipsync */
1034
889
  type PixverseLipsyncOutput = string;
1035
890
  /** All supported Replicate model IDs */
1036
- type ReplicateModelId = 'google/nano-banana' | 'google/nano-banana-pro' | 'prunaai/z-image-turbo' | 'black-forest-labs/flux-schnell' | 'black-forest-labs/flux-dev' | 'black-forest-labs/flux-1.1-pro' | 'stability-ai/sdxl' | 'bytedance/sdxl-lightning-4step' | 'black-forest-labs/flux-kontext-dev' | 'google/veo-3.1-fast' | 'google/veo-3.1' | 'kwaivgi/kling-v2.5-turbo-pro' | 'kwaivgi/kling-v2.6-motion-control' | 'minimax/video-01' | 'luma/ray' | 'meta/meta-llama-3.1-405b-instruct' | 'luma/reframe-image' | 'luma/reframe-video' | 'sync/lipsync-2' | 'sync/lipsync-2-pro' | 'bytedance/latentsync' | 'pixverse/lipsync';
891
+ type ReplicateModelId = 'google/nano-banana' | 'google/nano-banana-2' | 'google/nano-banana-pro' | 'prunaai/z-image-turbo' | 'black-forest-labs/flux-schnell' | 'black-forest-labs/flux-dev' | 'black-forest-labs/flux-1.1-pro' | 'black-forest-labs/flux-kontext-dev' | 'google/veo-3.1-fast' | 'google/veo-3.1' | 'kwaivgi/kling-v2.5-turbo-pro' | 'kwaivgi/kling-v2.6-motion-control' | 'minimax/video-01' | 'luma/ray' | 'meta/meta-llama-3.1-405b-instruct' | 'luma/reframe-image' | 'luma/reframe-video' | 'sync/lipsync-2' | 'sync/lipsync-2-pro' | 'pixverse/lipsync';
1037
892
  /** Map from model ID to input type */
1038
893
  interface ReplicateModelInputMap {
1039
894
  'google/nano-banana': NanoBananaInput;
895
+ 'google/nano-banana-2': NanoBanana2Input;
1040
896
  'google/nano-banana-pro': NanoBananaProInput;
1041
897
  'prunaai/z-image-turbo': ZImageTurboInput;
1042
898
  'black-forest-labs/flux-schnell': FluxSchnellInput;
1043
899
  'black-forest-labs/flux-dev': FluxDevInput;
1044
900
  'black-forest-labs/flux-1.1-pro': Flux11ProInput;
1045
- 'stability-ai/sdxl': SDXLInput;
1046
- 'bytedance/sdxl-lightning-4step': SDXLLightningInput;
1047
901
  'black-forest-labs/flux-kontext-dev': FluxKontextDevInput;
1048
902
  'google/veo-3.1-fast': Veo31FastInput;
1049
903
  'google/veo-3.1': Veo31Input;
@@ -1056,19 +910,17 @@ interface ReplicateModelInputMap {
1056
910
  'luma/reframe-video': LumaReframeVideoInput;
1057
911
  'sync/lipsync-2': Lipsync2Input;
1058
912
  'sync/lipsync-2-pro': Lipsync2ProInput;
1059
- 'bytedance/latentsync': LatentSyncInput;
1060
913
  'pixverse/lipsync': PixverseLipsyncInput;
1061
914
  }
1062
915
  /** Map from model ID to output type */
1063
916
  interface ReplicateModelOutputMap {
1064
917
  'google/nano-banana': NanoBananaOutput;
918
+ 'google/nano-banana-2': NanoBanana2Output;
1065
919
  'google/nano-banana-pro': NanoBananaProOutput;
1066
920
  'prunaai/z-image-turbo': ZImageTurboOutput;
1067
921
  'black-forest-labs/flux-schnell': FluxSchnellOutput;
1068
922
  'black-forest-labs/flux-dev': FluxDevOutput;
1069
923
  'black-forest-labs/flux-1.1-pro': Flux11ProOutput;
1070
- 'stability-ai/sdxl': SDXLOutput;
1071
- 'bytedance/sdxl-lightning-4step': SDXLLightningOutput;
1072
924
  'black-forest-labs/flux-kontext-dev': FluxKontextDevOutput;
1073
925
  'google/veo-3.1-fast': Veo31FastOutput;
1074
926
  'google/veo-3.1': Veo31Output;
@@ -1081,7 +933,6 @@ interface ReplicateModelOutputMap {
1081
933
  'luma/reframe-video': LumaReframeVideoOutput;
1082
934
  'sync/lipsync-2': Lipsync2Output;
1083
935
  'sync/lipsync-2-pro': Lipsync2ProOutput;
1084
- 'bytedance/latentsync': LatentSyncOutput;
1085
936
  'pixverse/lipsync': PixverseLipsyncOutput;
1086
937
  }
1087
938
  /** Get input type for a model */
@@ -1089,4 +940,4 @@ type ModelInput<T extends ReplicateModelId> = ReplicateModelInputMap[T];
1089
940
  /** Get output type for a model */
1090
941
  type ModelOutput<T extends ReplicateModelId> = ReplicateModelOutputMap[T];
1091
942
 
1092
- export type { Flux11ProInput, Flux11ProOutput, FluxDevInput, FluxDevOutput, FluxKontextDevInput, FluxKontextDevOutput, FluxSchnellInput, FluxSchnellOutput, KlingV25TurboProInput, KlingV25TurboProOutput, KlingV26MotionControlInput, KlingV26MotionControlOutput, LatentSyncInput, LatentSyncOutput, Lipsync2Input, Lipsync2Output, Lipsync2ProInput, Lipsync2ProOutput, LumaRayInput, LumaRayOutput, LumaReframeImageInput, LumaReframeImageOutput, LumaReframeVideoInput, LumaReframeVideoOutput, MetaLlama31Input, MetaLlama31Output, MinimaxVideo01Input, MinimaxVideo01Output, ModelInput, ModelOutput, NanoBananaInput, NanoBananaOutput, NanoBananaProInput, NanoBananaProOutput, PixverseLipsyncInput, PixverseLipsyncOutput, ReplicateModelId, ReplicateModelInputMap, ReplicateModelOutputMap, SDXLInput, SDXLLightningInput, SDXLLightningOutput, SDXLOutput, Veo31FastInput, Veo31FastOutput, Veo31Input, Veo31Output, ZImageTurboInput, ZImageTurboOutput };
943
+ export type { Flux11ProInput, Flux11ProOutput, FluxDevInput, FluxDevOutput, FluxKontextDevInput, FluxKontextDevOutput, FluxSchnellInput, FluxSchnellOutput, KlingV25TurboProInput, KlingV25TurboProOutput, KlingV26MotionControlInput, KlingV26MotionControlOutput, Lipsync2Input, Lipsync2Output, Lipsync2ProInput, Lipsync2ProOutput, LumaRayInput, LumaRayOutput, LumaReframeImageInput, LumaReframeImageOutput, LumaReframeVideoInput, LumaReframeVideoOutput, MetaLlama31Input, MetaLlama31Output, MinimaxVideo01Input, MinimaxVideo01Output, ModelInput, ModelOutput, NanoBanana2Input, NanoBanana2Output, NanoBananaInput, NanoBananaOutput, NanoBananaProInput, NanoBananaProOutput, PixverseLipsyncInput, PixverseLipsyncOutput, ReplicateModelId, ReplicateModelInputMap, ReplicateModelOutputMap, Veo31FastInput, Veo31FastOutput, Veo31Input, Veo31Output, ZImageTurboInput, ZImageTurboOutput };