comfyui-workflow-templates 0.1.94__py3-none-any.whl → 0.1.96__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of comfyui-workflow-templates might be problematic. Click here for more details.
- comfyui_workflow_templates/templates/2_pass_pose_worship.json +551 -139
- comfyui_workflow_templates/templates/3d_hunyuan3d-v2.1.json +8 -8
- comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model.json +187 -295
- comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model.json +158 -160
- comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo.json +54 -56
- comfyui_workflow_templates/templates/ByteDance-Seedance_00003_.json +210 -0
- comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image.json +124 -89
- comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input.json +138 -99
- comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image.json +191 -156
- comfyui_workflow_templates/templates/api_bfl_flux_pro_t2i.json +81 -79
- comfyui_workflow_templates/templates/api_bytedance_flf2v.json +90 -90
- comfyui_workflow_templates/templates/api_bytedance_image_to_video.json +2 -2
- comfyui_workflow_templates/templates/api_bytedance_seedream4.json +227 -222
- comfyui_workflow_templates/templates/api_bytedance_text_to_video.json +39 -40
- comfyui_workflow_templates/templates/api_google_gemini.json +6 -7
- comfyui_workflow_templates/templates/api_google_gemini_image.json +113 -47
- comfyui_workflow_templates/templates/api_hailuo_minimax_i2v.json +59 -40
- comfyui_workflow_templates/templates/api_hailuo_minimax_t2v.json +28 -28
- comfyui_workflow_templates/templates/api_hailuo_minimax_video.json +37 -31
- comfyui_workflow_templates/templates/api_ideogram_v3_t2i.json +59 -43
- comfyui_workflow_templates/templates/api_kling_effects.json +58 -31
- comfyui_workflow_templates/templates/api_kling_flf.json +74 -43
- comfyui_workflow_templates/templates/api_kling_i2v.json +58 -32
- comfyui_workflow_templates/templates/api_luma_i2v.json +115 -118
- comfyui_workflow_templates/templates/api_luma_photon_i2i.json +102 -54
- comfyui_workflow_templates/templates/api_luma_photon_style_ref.json +320 -163
- comfyui_workflow_templates/templates/api_luma_t2v.json +59 -50
- comfyui_workflow_templates/templates/api_moonvalley_image_to_video.json +40 -41
- comfyui_workflow_templates/templates/api_moonvalley_video_to_video_motion_transfer.json +10 -11
- comfyui_workflow_templates/templates/api_moonvalley_video_to_video_pose_control.json +36 -37
- comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint.json +47 -11
- comfyui_workflow_templates/templates/api_openai_dall_e_2_t2i.json +8 -29
- comfyui_workflow_templates/templates/api_openai_image_1_i2i.json +47 -39
- comfyui_workflow_templates/templates/api_openai_image_1_inpaint.json +11 -32
- comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs.json +63 -43
- comfyui_workflow_templates/templates/api_openai_image_1_t2i.json +46 -67
- comfyui_workflow_templates/templates/api_openai_sora_video.json +202 -162
- comfyui_workflow_templates/templates/api_pika_i2v.json +31 -31
- comfyui_workflow_templates/templates/api_pika_scene.json +16 -7
- comfyui_workflow_templates/templates/api_pixverse_i2v.json +72 -77
- comfyui_workflow_templates/templates/api_pixverse_t2v.json +20 -16
- comfyui_workflow_templates/templates/api_pixverse_template_i2v.json +40 -36
- comfyui_workflow_templates/templates/api_recraft_image_gen_with_color_control.json +170 -75
- comfyui_workflow_templates/templates/api_recraft_image_gen_with_style_control.json +212 -199
- comfyui_workflow_templates/templates/api_recraft_vector_gen.json +78 -69
- comfyui_workflow_templates/templates/api_rodin_gen2.json +214 -208
- comfyui_workflow_templates/templates/api_rodin_image_to_model.json +479 -439
- comfyui_workflow_templates/templates/api_rodin_multiview_to_model.json +191 -135
- comfyui_workflow_templates/templates/api_runway_first_last_frame.json +45 -45
- comfyui_workflow_templates/templates/api_runway_gen3a_turbo_image_to_video.json +31 -32
- comfyui_workflow_templates/templates/api_runway_gen4_turo_image_to_video.json +65 -66
- comfyui_workflow_templates/templates/api_runway_reference_to_image.json +109 -71
- comfyui_workflow_templates/templates/api_runway_text_to_image.json +55 -55
- comfyui_workflow_templates/templates/api_stability_ai_audio_inpaint.json +58 -82
- comfyui_workflow_templates/templates/api_stability_ai_audio_to_audio.json +51 -54
- comfyui_workflow_templates/templates/api_stability_ai_i2i.json +66 -41
- comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i.json +74 -26
- comfyui_workflow_templates/templates/api_stability_ai_sd3.5_t2i.json +70 -70
- comfyui_workflow_templates/templates/api_stability_ai_stable_image_ultra_t2i.json +33 -31
- comfyui_workflow_templates/templates/api_tripo_image_to_model.json +90 -92
- comfyui_workflow_templates/templates/api_tripo_multiview_to_model.json +283 -283
- comfyui_workflow_templates/templates/api_tripo_text_to_model.json +102 -102
- comfyui_workflow_templates/templates/api_veo2_i2v.json +32 -29
- comfyui_workflow_templates/templates/api_veo3.json +78 -72
- comfyui_workflow_templates/templates/api_vidu_image_to_video.json +37 -9
- comfyui_workflow_templates/templates/api_vidu_reference_to_video.json +86 -19
- comfyui_workflow_templates/templates/api_vidu_start_end_to_video.json +14 -6
- comfyui_workflow_templates/templates/api_vidu_text_to_video.json +2 -2
- comfyui_workflow_templates/templates/api_wan_image_to_video.json +52 -53
- comfyui_workflow_templates/templates/api_wan_text_to_image .json +140 -0
- comfyui_workflow_templates/templates/api_wan_text_to_video.json +38 -45
- comfyui_workflow_templates/templates/audio_ace_step_1_m2m_editing.json +174 -351
- comfyui_workflow_templates/templates/audio_ace_step_1_t2a_instrumentals.json +214 -405
- comfyui_workflow_templates/templates/audio_ace_step_1_t2a_song.json +179 -390
- comfyui_workflow_templates/templates/audio_stable_audio_example.json +156 -118
- comfyui_workflow_templates/templates/controlnet_example.json +110 -122
- comfyui_workflow_templates/templates/default.json +329 -139
- comfyui_workflow_templates/templates/depth_controlnet.json +463 -206
- comfyui_workflow_templates/templates/depth_t2i_adapter.json +1522 -236
- comfyui_workflow_templates/templates/esrgan_example.json +24 -30
- comfyui_workflow_templates/templates/flux1_dev_uso_reference_image_gen.json +227 -193
- comfyui_workflow_templates/templates/flux1_krea_dev.json +3 -3
- comfyui_workflow_templates/templates/flux_canny_model_example.json +576 -268
- comfyui_workflow_templates/templates/flux_depth_lora_example.json +1234 -213
- comfyui_workflow_templates/templates/flux_fill_inpaint_example.json +553 -250
- comfyui_workflow_templates/templates/flux_fill_outpaint_example.json +556 -228
- comfyui_workflow_templates/templates/flux_kontext_dev_basic.json +149 -234
- comfyui_workflow_templates/templates/flux_redux_model_example-1.webp +0 -0
- comfyui_workflow_templates/templates/flux_redux_model_example.json +600 -560
- comfyui_workflow_templates/templates/flux_schnell_full_text_to_image.json +21 -29
- comfyui_workflow_templates/templates/hidream_e1_1.json +180 -210
- comfyui_workflow_templates/templates/hidream_e1_full.json +34 -40
- comfyui_workflow_templates/templates/hidream_i1_dev.json +15 -15
- comfyui_workflow_templates/templates/hidream_i1_fast.json +15 -15
- comfyui_workflow_templates/templates/hidream_i1_full.json +17 -16
- comfyui_workflow_templates/templates/hiresfix_esrgan_workflow.json +31 -37
- comfyui_workflow_templates/templates/hiresfix_latent_workflow.json +84 -88
- comfyui_workflow_templates/templates/image2image-1.webp +0 -0
- comfyui_workflow_templates/templates/image2image-2.webp +0 -0
- comfyui_workflow_templates/templates/image2image.json +198 -196
- comfyui_workflow_templates/templates/image_chroma1_radiance_text_to_image.json +60 -60
- comfyui_workflow_templates/templates/image_flux.1_fill_dev_OneReward.json +178 -162
- comfyui_workflow_templates/templates/image_lotus_depth_v1_1.json +26 -32
- comfyui_workflow_templates/templates/image_netayume_lumina_t2i-1.webp +0 -0
- comfyui_workflow_templates/templates/image_netayume_lumina_t2i.json +597 -0
- comfyui_workflow_templates/templates/image_omnigen2_image_edit.json +322 -323
- comfyui_workflow_templates/templates/image_omnigen2_t2i.json +26 -33
- comfyui_workflow_templates/templates/image_qwen_image.json +40 -40
- comfyui_workflow_templates/templates/image_qwen_image_controlnet_patch.json +47 -46
- comfyui_workflow_templates/templates/image_qwen_image_edit.json +216 -216
- comfyui_workflow_templates/templates/image_qwen_image_edit_2509.json +361 -361
- comfyui_workflow_templates/templates/image_qwen_image_instantx_controlnet.json +75 -73
- comfyui_workflow_templates/templates/image_qwen_image_instantx_inpainting_controlnet.json +186 -181
- comfyui_workflow_templates/templates/image_qwen_image_union_control_lora.json +191 -190
- comfyui_workflow_templates/templates/image_to_video.json +64 -64
- comfyui_workflow_templates/templates/image_to_video_wan.json +163 -140
- comfyui_workflow_templates/templates/index.es.json +26 -25
- comfyui_workflow_templates/templates/index.fr.json +26 -25
- comfyui_workflow_templates/templates/index.ja.json +26 -25
- comfyui_workflow_templates/templates/index.json +12 -24
- comfyui_workflow_templates/templates/index.ko.json +26 -25
- comfyui_workflow_templates/templates/index.ru.json +26 -25
- comfyui_workflow_templates/templates/index.zh-TW.json +26 -25
- comfyui_workflow_templates/templates/index.zh.json +26 -25
- comfyui_workflow_templates/templates/inpaint_example.json +87 -86
- comfyui_workflow_templates/templates/inpaint_model_outpainting.json +24 -21
- comfyui_workflow_templates/templates/latent_upscale_different_prompt_model.json +179 -185
- comfyui_workflow_templates/templates/ltxv_image_to_video-1.webp +0 -0
- comfyui_workflow_templates/templates/ltxv_image_to_video.json +367 -337
- comfyui_workflow_templates/templates/mixing_controlnets.json +422 -373
- comfyui_workflow_templates/templates/sd3.5_large_blur.json +14 -14
- comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example.json +615 -258
- comfyui_workflow_templates/templates/sd3.5_large_depth.json +1317 -210
- comfyui_workflow_templates/templates/sdxl_revision_text_prompts.json +619 -256
- comfyui_workflow_templates/templates/sdxlturbo_example.json +308 -162
- comfyui_workflow_templates/templates/video_humo.json +194 -194
- comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B.json +100 -73
- comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_14B.json +102 -153
- comfyui_workflow_templates/templates/video_wan2_2_14B_animate.json +48 -46
- comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v.json +151 -151
- comfyui_workflow_templates/templates/video_wan2_2_14B_fun_camera.json +118 -114
- comfyui_workflow_templates/templates/video_wan2_2_14B_fun_control.json +394 -372
- comfyui_workflow_templates/templates/video_wan2_2_14B_fun_inpaint.json +125 -125
- comfyui_workflow_templates/templates/video_wan2_2_14B_i2v.json +60 -60
- comfyui_workflow_templates/templates/video_wan2_2_14B_s2v.json +244 -240
- comfyui_workflow_templates/templates/video_wan2_2_14B_t2v (2).json +1954 -0
- comfyui_workflow_templates/templates/video_wan2_2_5B_fun_control.json +242 -222
- comfyui_workflow_templates/templates/video_wan2_2_5B_fun_inpaint.json +6 -6
- comfyui_workflow_templates/templates/video_wan2_2_5B_ti2v.json +49 -49
- comfyui_workflow_templates/templates/video_wan_ati.json +51 -51
- comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v.json +138 -152
- comfyui_workflow_templates/templates/video_wan_vace_14B_t2v.json +2 -2
- comfyui_workflow_templates/templates/video_wan_vace_14B_v2v.json +135 -153
- comfyui_workflow_templates/templates/video_wan_vace_flf2v.json +178 -194
- comfyui_workflow_templates/templates/video_wan_vace_inpainting.json +228 -236
- comfyui_workflow_templates/templates/video_wan_vace_outpainting.json +257 -340
- comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16.json +65 -73
- comfyui_workflow_templates/templates/wan2.1_fun_control.json +202 -133
- comfyui_workflow_templates/templates/wan2.1_fun_inp.json +78 -52
- {comfyui_workflow_templates-0.1.94.dist-info → comfyui_workflow_templates-0.1.96.dist-info}/METADATA +1 -1
- {comfyui_workflow_templates-0.1.94.dist-info → comfyui_workflow_templates-0.1.96.dist-info}/RECORD +164 -162
- comfyui_workflow_templates/templates/sdxl_revision_zero_positive-1.webp +0 -0
- comfyui_workflow_templates/templates/sdxl_revision_zero_positive.json +0 -496
- comfyui_workflow_templates/templates/stable_zero123_example-1.webp +0 -0
- comfyui_workflow_templates/templates/stable_zero123_example.json +0 -273
- {comfyui_workflow_templates-0.1.94.dist-info → comfyui_workflow_templates-0.1.96.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates-0.1.94.dist-info → comfyui_workflow_templates-0.1.96.dist-info}/licenses/LICENSE +0 -0
- {comfyui_workflow_templates-0.1.94.dist-info → comfyui_workflow_templates-0.1.96.dist-info}/top_level.txt +0 -0
|
@@ -29,7 +29,8 @@
|
|
|
29
29
|
"models": ["SD1.5", "Stability"],
|
|
30
30
|
"date": "2025-03-01",
|
|
31
31
|
"size": 1.99,
|
|
32
|
-
"vram": 2.88
|
|
32
|
+
"vram": 2.88,
|
|
33
|
+
"thumbnailVariant": "hoverDissolve"
|
|
33
34
|
},
|
|
34
35
|
{
|
|
35
36
|
"name": "lora",
|
|
@@ -386,6 +387,17 @@
|
|
|
386
387
|
"size": 22.0,
|
|
387
388
|
"vram": 22.0
|
|
388
389
|
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_netayume_lumina_t2i",
|
|
392
|
+
"title": "NetaYume Lumina Text to Image",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
396
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
397
|
+
"models": ["NetaYume Lumina"],
|
|
398
|
+
"date": "2025-10-10",
|
|
399
|
+
"size": 9.89
|
|
400
|
+
},
|
|
389
401
|
{
|
|
390
402
|
"name": "image_chroma_text_to_image",
|
|
391
403
|
"title": "Chroma texte vers image",
|
|
@@ -727,18 +739,6 @@
|
|
|
727
739
|
"date": "2025-03-01",
|
|
728
740
|
"size": 9.9
|
|
729
741
|
},
|
|
730
|
-
{
|
|
731
|
-
"name": "sdxl_revision_zero_positive",
|
|
732
|
-
"title": "SDXL Revision Zero Positive",
|
|
733
|
-
"mediaType": "image",
|
|
734
|
-
"mediaSubtype": "webp",
|
|
735
|
-
"description": "Générer des images en utilisant à la fois des prompts textuels et des images de référence avec SDXL Revision.",
|
|
736
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
737
|
-
"tags": ["Texte vers Image", "Image"],
|
|
738
|
-
"models": ["SDXL", "Stability"],
|
|
739
|
-
"date": "2025-03-01",
|
|
740
|
-
"size": 9.9
|
|
741
|
-
},
|
|
742
742
|
{
|
|
743
743
|
"name": "sdxlturbo_example",
|
|
744
744
|
"title": "SDXL Turbo",
|
|
@@ -1287,18 +1287,6 @@
|
|
|
1287
1287
|
"tutorialUrl": "",
|
|
1288
1288
|
"thumbnailVariant": "hoverDissolve",
|
|
1289
1289
|
"size": 4.59
|
|
1290
|
-
},
|
|
1291
|
-
{
|
|
1292
|
-
"name": "stable_zero123_example",
|
|
1293
|
-
"title": "Stable Zero123",
|
|
1294
|
-
"mediaType": "image",
|
|
1295
|
-
"mediaSubtype": "webp",
|
|
1296
|
-
"description": "Générer des vues 3D à partir d'images simples en utilisant Stable Zero123.",
|
|
1297
|
-
"tags": ["Image vers 3D", "3D"],
|
|
1298
|
-
"models": ["Stable Zero123"],
|
|
1299
|
-
"date": "2025-03-01",
|
|
1300
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
|
|
1301
|
-
"size": 7.99
|
|
1302
1290
|
}
|
|
1303
1291
|
]
|
|
1304
1292
|
},
|
|
@@ -1679,6 +1667,19 @@
|
|
|
1679
1667
|
"icon": "icon-[lucide--film]",
|
|
1680
1668
|
"title": "API Vidéo",
|
|
1681
1669
|
"templates": [
|
|
1670
|
+
{
|
|
1671
|
+
"name": "api_openai_sora_video",
|
|
1672
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1673
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1674
|
+
"mediaType": "image",
|
|
1675
|
+
"mediaSubtype": "webp",
|
|
1676
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1677
|
+
"models": ["OpenAI"],
|
|
1678
|
+
"date": "2025-10-08",
|
|
1679
|
+
"OpenSource": false,
|
|
1680
|
+
"size": 0,
|
|
1681
|
+
"vram": 0
|
|
1682
|
+
},
|
|
1682
1683
|
{
|
|
1683
1684
|
"name": "api_wan_text_to_video",
|
|
1684
1685
|
"title": "Wan2.5: Text to Video",
|
|
@@ -29,7 +29,8 @@
|
|
|
29
29
|
"models": ["SD1.5", "Stability"],
|
|
30
30
|
"date": "2025-03-01",
|
|
31
31
|
"size": 1.99,
|
|
32
|
-
"vram": 2.88
|
|
32
|
+
"vram": 2.88,
|
|
33
|
+
"thumbnailVariant": "hoverDissolve"
|
|
33
34
|
},
|
|
34
35
|
{
|
|
35
36
|
"name": "lora",
|
|
@@ -386,6 +387,17 @@
|
|
|
386
387
|
"size": 22.0,
|
|
387
388
|
"vram": 22.0
|
|
388
389
|
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_netayume_lumina_t2i",
|
|
392
|
+
"title": "NetaYume Lumina Text to Image",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
396
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
397
|
+
"models": ["NetaYume Lumina"],
|
|
398
|
+
"date": "2025-10-10",
|
|
399
|
+
"size": 9.89
|
|
400
|
+
},
|
|
389
401
|
{
|
|
390
402
|
"name": "image_chroma_text_to_image",
|
|
391
403
|
"title": "Chromaテキストから画像",
|
|
@@ -727,18 +739,6 @@
|
|
|
727
739
|
"date": "2025-03-01",
|
|
728
740
|
"size": 9.9
|
|
729
741
|
},
|
|
730
|
-
{
|
|
731
|
-
"name": "sdxl_revision_zero_positive",
|
|
732
|
-
"title": "SDXLリビジョンゼロポジティブ",
|
|
733
|
-
"mediaType": "image",
|
|
734
|
-
"mediaSubtype": "webp",
|
|
735
|
-
"description": "SDXLリビジョンを使用して、テキストプロンプトと参照画像の両方を使用して画像を生成します。",
|
|
736
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
737
|
-
"tags": ["テキストから画像", "画像"],
|
|
738
|
-
"models": ["SDXL", "Stability"],
|
|
739
|
-
"date": "2025-03-01",
|
|
740
|
-
"size": 9.9
|
|
741
|
-
},
|
|
742
742
|
{
|
|
743
743
|
"name": "sdxlturbo_example",
|
|
744
744
|
"title": "SDXLターボ",
|
|
@@ -1286,18 +1286,6 @@
|
|
|
1286
1286
|
"tutorialUrl": "",
|
|
1287
1287
|
"thumbnailVariant": "hoverDissolve",
|
|
1288
1288
|
"size": 4.59
|
|
1289
|
-
},
|
|
1290
|
-
{
|
|
1291
|
-
"name": "stable_zero123_example",
|
|
1292
|
-
"title": "Stable Zero123",
|
|
1293
|
-
"mediaType": "image",
|
|
1294
|
-
"mediaSubtype": "webp",
|
|
1295
|
-
"description": "Stable Zero123を使用して、単一の画像から3Dビューを生成します。",
|
|
1296
|
-
"tags": ["画像から3D", "3D"],
|
|
1297
|
-
"models": ["Stable Zero123"],
|
|
1298
|
-
"date": "2025-03-01",
|
|
1299
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
|
|
1300
|
-
"size": 7.99
|
|
1301
1289
|
}
|
|
1302
1290
|
]
|
|
1303
1291
|
},
|
|
@@ -1678,6 +1666,19 @@
|
|
|
1678
1666
|
"icon": "icon-[lucide--film]",
|
|
1679
1667
|
"title": "ビデオAPI",
|
|
1680
1668
|
"templates": [
|
|
1669
|
+
{
|
|
1670
|
+
"name": "api_openai_sora_video",
|
|
1671
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1672
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1673
|
+
"mediaType": "image",
|
|
1674
|
+
"mediaSubtype": "webp",
|
|
1675
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1676
|
+
"models": ["OpenAI"],
|
|
1677
|
+
"date": "2025-10-08",
|
|
1678
|
+
"OpenSource": false,
|
|
1679
|
+
"size": 0,
|
|
1680
|
+
"vram": 0
|
|
1681
|
+
},
|
|
1681
1682
|
{
|
|
1682
1683
|
"name": "api_wan_text_to_video",
|
|
1683
1684
|
"title": "Wan2.5: Text to Video",
|
|
@@ -23,6 +23,7 @@
|
|
|
23
23
|
"title": "Image to Image",
|
|
24
24
|
"mediaType": "image",
|
|
25
25
|
"mediaSubtype": "webp",
|
|
26
|
+
"thumbnailVariant": "hoverDissolve",
|
|
26
27
|
"description": "Transform existing images using text prompts.",
|
|
27
28
|
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
|
|
28
29
|
"tags": ["Image to Image", "Image"],
|
|
@@ -386,6 +387,17 @@
|
|
|
386
387
|
"size": 22.0,
|
|
387
388
|
"vram": 22.0
|
|
388
389
|
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_netayume_lumina_t2i",
|
|
392
|
+
"title": "NetaYume Lumina Text to Image",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
396
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
397
|
+
"models": ["NetaYume Lumina"],
|
|
398
|
+
"date": "2025-10-10",
|
|
399
|
+
"size": 9.89
|
|
400
|
+
},
|
|
389
401
|
{
|
|
390
402
|
"name": "image_chroma_text_to_image",
|
|
391
403
|
"title": "Chroma text to image",
|
|
@@ -727,18 +739,6 @@
|
|
|
727
739
|
"date": "2025-03-01",
|
|
728
740
|
"size": 9.9
|
|
729
741
|
},
|
|
730
|
-
{
|
|
731
|
-
"name": "sdxl_revision_zero_positive",
|
|
732
|
-
"title": "SDXL Revision Zero Positive",
|
|
733
|
-
"mediaType": "image",
|
|
734
|
-
"mediaSubtype": "webp",
|
|
735
|
-
"description": "Generate images using both text prompts and reference images with SDXL Revision.",
|
|
736
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
737
|
-
"tags": ["Text to Image", "Image"],
|
|
738
|
-
"models": ["SDXL", "Stability"],
|
|
739
|
-
"date": "2025-03-01",
|
|
740
|
-
"size": 9.9
|
|
741
|
-
},
|
|
742
742
|
{
|
|
743
743
|
"name": "sdxlturbo_example",
|
|
744
744
|
"title": "SDXL Turbo",
|
|
@@ -1285,18 +1285,6 @@
|
|
|
1285
1285
|
"tutorialUrl": "",
|
|
1286
1286
|
"thumbnailVariant": "hoverDissolve",
|
|
1287
1287
|
"size": 4.59
|
|
1288
|
-
},
|
|
1289
|
-
{
|
|
1290
|
-
"name": "stable_zero123_example",
|
|
1291
|
-
"title": "Stable Zero123",
|
|
1292
|
-
"mediaType": "image",
|
|
1293
|
-
"mediaSubtype": "webp",
|
|
1294
|
-
"description": "Generate 3D views from single images using Stable Zero123.",
|
|
1295
|
-
"tags": ["Image to 3D", "3D"],
|
|
1296
|
-
"models": ["Stable Zero123"],
|
|
1297
|
-
"date": "2025-03-01",
|
|
1298
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
|
|
1299
|
-
"size": 7.99
|
|
1300
1288
|
}
|
|
1301
1289
|
]
|
|
1302
1290
|
},
|
|
@@ -29,7 +29,8 @@
|
|
|
29
29
|
"models": ["SD1.5", "Stability"],
|
|
30
30
|
"date": "2025-03-01",
|
|
31
31
|
"size": 1.99,
|
|
32
|
-
"vram": 2.88
|
|
32
|
+
"vram": 2.88,
|
|
33
|
+
"thumbnailVariant": "hoverDissolve"
|
|
33
34
|
},
|
|
34
35
|
{
|
|
35
36
|
"name": "lora",
|
|
@@ -386,6 +387,17 @@
|
|
|
386
387
|
"size": 22.0,
|
|
387
388
|
"vram": 22.0
|
|
388
389
|
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_netayume_lumina_t2i",
|
|
392
|
+
"title": "NetaYume Lumina Text to Image",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
396
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
397
|
+
"models": ["NetaYume Lumina"],
|
|
398
|
+
"date": "2025-10-10",
|
|
399
|
+
"size": 9.89
|
|
400
|
+
},
|
|
389
401
|
{
|
|
390
402
|
"name": "image_chroma_text_to_image",
|
|
391
403
|
"title": "Chroma 텍스트에서 이미지",
|
|
@@ -727,18 +739,6 @@
|
|
|
727
739
|
"date": "2025-03-01",
|
|
728
740
|
"size": 9.9
|
|
729
741
|
},
|
|
730
|
-
{
|
|
731
|
-
"name": "sdxl_revision_zero_positive",
|
|
732
|
-
"title": "SDXL 리비전 제로 포지티브",
|
|
733
|
-
"mediaType": "image",
|
|
734
|
-
"mediaSubtype": "webp",
|
|
735
|
-
"description": "SDXL 리비전을 사용하여 텍스트 프롬프트와 참조 이미지를 모두 사용하여 이미지를 생성합니다.",
|
|
736
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
737
|
-
"tags": ["텍스트에서 이미지", "이미지"],
|
|
738
|
-
"models": ["SDXL", "Stability"],
|
|
739
|
-
"date": "2025-03-01",
|
|
740
|
-
"size": 9.9
|
|
741
|
-
},
|
|
742
742
|
{
|
|
743
743
|
"name": "sdxlturbo_example",
|
|
744
744
|
"title": "SDXL 터보",
|
|
@@ -1286,18 +1286,6 @@
|
|
|
1286
1286
|
"tutorialUrl": "",
|
|
1287
1287
|
"thumbnailVariant": "hoverDissolve",
|
|
1288
1288
|
"size": 4.59
|
|
1289
|
-
},
|
|
1290
|
-
{
|
|
1291
|
-
"name": "stable_zero123_example",
|
|
1292
|
-
"title": "Stable Zero123",
|
|
1293
|
-
"mediaType": "image",
|
|
1294
|
-
"mediaSubtype": "webp",
|
|
1295
|
-
"description": "Stable Zero123을 사용하여 단일 이미지에서 3D 뷰를 생성합니다.",
|
|
1296
|
-
"tags": ["이미지에서 3D", "3D"],
|
|
1297
|
-
"models": ["Stable Zero123"],
|
|
1298
|
-
"date": "2025-03-01",
|
|
1299
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
|
|
1300
|
-
"size": 7.99
|
|
1301
1289
|
}
|
|
1302
1290
|
]
|
|
1303
1291
|
},
|
|
@@ -1678,6 +1666,19 @@
|
|
|
1678
1666
|
"icon": "icon-[lucide--film]",
|
|
1679
1667
|
"title": "비디오 API",
|
|
1680
1668
|
"templates": [
|
|
1669
|
+
{
|
|
1670
|
+
"name": "api_openai_sora_video",
|
|
1671
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1672
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1673
|
+
"mediaType": "image",
|
|
1674
|
+
"mediaSubtype": "webp",
|
|
1675
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1676
|
+
"models": ["OpenAI"],
|
|
1677
|
+
"date": "2025-10-08",
|
|
1678
|
+
"OpenSource": false,
|
|
1679
|
+
"size": 0,
|
|
1680
|
+
"vram": 0
|
|
1681
|
+
},
|
|
1681
1682
|
{
|
|
1682
1683
|
"name": "api_wan_text_to_video",
|
|
1683
1684
|
"title": "Wan2.5: Text to Video",
|
|
@@ -29,7 +29,8 @@
|
|
|
29
29
|
"models": ["SD1.5", "Stability"],
|
|
30
30
|
"date": "2025-03-01",
|
|
31
31
|
"size": 1.99,
|
|
32
|
-
"vram": 2.88
|
|
32
|
+
"vram": 2.88,
|
|
33
|
+
"thumbnailVariant": "hoverDissolve"
|
|
33
34
|
},
|
|
34
35
|
{
|
|
35
36
|
"name": "lora",
|
|
@@ -386,6 +387,17 @@
|
|
|
386
387
|
"size": 22.0,
|
|
387
388
|
"vram": 22.0
|
|
388
389
|
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_netayume_lumina_t2i",
|
|
392
|
+
"title": "NetaYume Lumina Text to Image",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
396
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
397
|
+
"models": ["NetaYume Lumina"],
|
|
398
|
+
"date": "2025-10-10",
|
|
399
|
+
"size": 9.89
|
|
400
|
+
},
|
|
389
401
|
{
|
|
390
402
|
"name": "image_chroma_text_to_image",
|
|
391
403
|
"title": "Chroma текст в изображение",
|
|
@@ -727,18 +739,6 @@
|
|
|
727
739
|
"date": "2025-03-01",
|
|
728
740
|
"size": 9.9
|
|
729
741
|
},
|
|
730
|
-
{
|
|
731
|
-
"name": "sdxl_revision_zero_positive",
|
|
732
|
-
"title": "SDXL Revision Zero Positive",
|
|
733
|
-
"mediaType": "image",
|
|
734
|
-
"mediaSubtype": "webp",
|
|
735
|
-
"description": "Генерация изображений с использованием как текстовых запросов, так и опорных изображений с SDXL Revision.",
|
|
736
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
737
|
-
"tags": ["Текст в изображение", "Изображение"],
|
|
738
|
-
"models": ["SDXL", "Stability"],
|
|
739
|
-
"date": "2025-03-01",
|
|
740
|
-
"size": 9.9
|
|
741
|
-
},
|
|
742
742
|
{
|
|
743
743
|
"name": "sdxlturbo_example",
|
|
744
744
|
"title": "SDXL Turbo",
|
|
@@ -1286,18 +1286,6 @@
|
|
|
1286
1286
|
"tutorialUrl": "",
|
|
1287
1287
|
"thumbnailVariant": "hoverDissolve",
|
|
1288
1288
|
"size": 4.59
|
|
1289
|
-
},
|
|
1290
|
-
{
|
|
1291
|
-
"name": "stable_zero123_example",
|
|
1292
|
-
"title": "Stable Zero123",
|
|
1293
|
-
"mediaType": "image",
|
|
1294
|
-
"mediaSubtype": "webp",
|
|
1295
|
-
"description": "Генерация 3D-видов из отдельных изображений с использованием Stable Zero123.",
|
|
1296
|
-
"tags": ["Изображение в 3D", "3D"],
|
|
1297
|
-
"models": ["Stable Zero123"],
|
|
1298
|
-
"date": "2025-03-01",
|
|
1299
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
|
|
1300
|
-
"size": 7.99
|
|
1301
1289
|
}
|
|
1302
1290
|
]
|
|
1303
1291
|
},
|
|
@@ -1678,6 +1666,19 @@
|
|
|
1678
1666
|
"icon": "icon-[lucide--film]",
|
|
1679
1667
|
"title": "API видео",
|
|
1680
1668
|
"templates": [
|
|
1669
|
+
{
|
|
1670
|
+
"name": "api_openai_sora_video",
|
|
1671
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1672
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1673
|
+
"mediaType": "image",
|
|
1674
|
+
"mediaSubtype": "webp",
|
|
1675
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1676
|
+
"models": ["OpenAI"],
|
|
1677
|
+
"date": "2025-10-08",
|
|
1678
|
+
"OpenSource": false,
|
|
1679
|
+
"size": 0,
|
|
1680
|
+
"vram": 0
|
|
1681
|
+
},
|
|
1681
1682
|
{
|
|
1682
1683
|
"name": "api_wan_text_to_video",
|
|
1683
1684
|
"title": "Wan2.5: Text to Video",
|
|
@@ -29,7 +29,8 @@
|
|
|
29
29
|
"models": ["SD1.5", "Stability"],
|
|
30
30
|
"date": "2025-03-01",
|
|
31
31
|
"size": 1.99,
|
|
32
|
-
"vram": 2.88
|
|
32
|
+
"vram": 2.88,
|
|
33
|
+
"thumbnailVariant": "hoverDissolve"
|
|
33
34
|
},
|
|
34
35
|
{
|
|
35
36
|
"name": "lora",
|
|
@@ -386,6 +387,17 @@
|
|
|
386
387
|
"size": 22.0,
|
|
387
388
|
"vram": 22.0
|
|
388
389
|
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_netayume_lumina_t2i",
|
|
392
|
+
"title": "NetaYume Lumina Text to Image",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
396
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
397
|
+
"models": ["NetaYume Lumina"],
|
|
398
|
+
"date": "2025-10-10",
|
|
399
|
+
"size": 9.89
|
|
400
|
+
},
|
|
389
401
|
{
|
|
390
402
|
"name": "image_chroma_text_to_image",
|
|
391
403
|
"title": "Chroma 文字到影像",
|
|
@@ -727,18 +739,6 @@
|
|
|
727
739
|
"date": "2025-03-01",
|
|
728
740
|
"size": 9.9
|
|
729
741
|
},
|
|
730
|
-
{
|
|
731
|
-
"name": "sdxl_revision_zero_positive",
|
|
732
|
-
"title": "SDXL 修訂零正向",
|
|
733
|
-
"mediaType": "image",
|
|
734
|
-
"mediaSubtype": "webp",
|
|
735
|
-
"description": "使用 SDXL 修訂版結合文字提示與參考影像生成影像。",
|
|
736
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
737
|
-
"tags": ["文字到影像", "影像"],
|
|
738
|
-
"models": ["SDXL", "Stability"],
|
|
739
|
-
"date": "2025-03-01",
|
|
740
|
-
"size": 9.9
|
|
741
|
-
},
|
|
742
742
|
{
|
|
743
743
|
"name": "sdxlturbo_example",
|
|
744
744
|
"title": "SDXL Turbo",
|
|
@@ -1286,18 +1286,6 @@
|
|
|
1286
1286
|
"tutorialUrl": "",
|
|
1287
1287
|
"thumbnailVariant": "hoverDissolve",
|
|
1288
1288
|
"size": 4.59
|
|
1289
|
-
},
|
|
1290
|
-
{
|
|
1291
|
-
"name": "stable_zero123_example",
|
|
1292
|
-
"title": "Stable Zero123",
|
|
1293
|
-
"mediaType": "image",
|
|
1294
|
-
"mediaSubtype": "webp",
|
|
1295
|
-
"description": "使用 Stable Zero123 從單張影像生成 3D 視圖。",
|
|
1296
|
-
"tags": ["影像到 3D", "3D"],
|
|
1297
|
-
"models": ["Stable Zero123"],
|
|
1298
|
-
"date": "2025-03-01",
|
|
1299
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
|
|
1300
|
-
"size": 7.99
|
|
1301
1289
|
}
|
|
1302
1290
|
]
|
|
1303
1291
|
},
|
|
@@ -1678,6 +1666,19 @@
|
|
|
1678
1666
|
"icon": "icon-[lucide--film]",
|
|
1679
1667
|
"title": "影片 API",
|
|
1680
1668
|
"templates": [
|
|
1669
|
+
{
|
|
1670
|
+
"name": "api_openai_sora_video",
|
|
1671
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1672
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1673
|
+
"mediaType": "image",
|
|
1674
|
+
"mediaSubtype": "webp",
|
|
1675
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1676
|
+
"models": ["OpenAI"],
|
|
1677
|
+
"date": "2025-10-08",
|
|
1678
|
+
"OpenSource": false,
|
|
1679
|
+
"size": 0,
|
|
1680
|
+
"vram": 0
|
|
1681
|
+
},
|
|
1681
1682
|
{
|
|
1682
1683
|
"name": "api_wan_text_to_video",
|
|
1683
1684
|
"title": "Wan2.5: Text to Video",
|
|
@@ -29,7 +29,8 @@
|
|
|
29
29
|
"models": ["SD1.5", "Stability"],
|
|
30
30
|
"date": "2025-03-01",
|
|
31
31
|
"size": 1.99,
|
|
32
|
-
"vram": 2.88
|
|
32
|
+
"vram": 2.88,
|
|
33
|
+
"thumbnailVariant": "hoverDissolve"
|
|
33
34
|
},
|
|
34
35
|
{
|
|
35
36
|
"name": "lora",
|
|
@@ -386,6 +387,17 @@
|
|
|
386
387
|
"size": 22.0,
|
|
387
388
|
"vram": 22.0
|
|
388
389
|
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_netayume_lumina_t2i",
|
|
392
|
+
"title": "NetaYume Lumina Text to Image",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
396
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
397
|
+
"models": ["NetaYume Lumina"],
|
|
398
|
+
"date": "2025-10-10",
|
|
399
|
+
"size": 9.89
|
|
400
|
+
},
|
|
389
401
|
{
|
|
390
402
|
"name": "image_chroma_text_to_image",
|
|
391
403
|
"title": "Chroma文生图",
|
|
@@ -727,18 +739,6 @@
|
|
|
727
739
|
"date": "2025-03-01",
|
|
728
740
|
"size": 9.9
|
|
729
741
|
},
|
|
730
|
-
{
|
|
731
|
-
"name": "sdxl_revision_zero_positive",
|
|
732
|
-
"title": "SDXL修订零正向",
|
|
733
|
-
"mediaType": "image",
|
|
734
|
-
"mediaSubtype": "webp",
|
|
735
|
-
"description": "使用SDXL修订版结合文本提示和参考图像生成图像。",
|
|
736
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
737
|
-
"tags": ["文生图", "图像"],
|
|
738
|
-
"models": ["SDXL", "Stability"],
|
|
739
|
-
"date": "2025-03-01",
|
|
740
|
-
"size": 9.9
|
|
741
|
-
},
|
|
742
742
|
{
|
|
743
743
|
"name": "sdxlturbo_example",
|
|
744
744
|
"title": "SDXL Turbo",
|
|
@@ -1286,18 +1286,6 @@
|
|
|
1286
1286
|
"tutorialUrl": "",
|
|
1287
1287
|
"thumbnailVariant": "hoverDissolve",
|
|
1288
1288
|
"size": 4.59
|
|
1289
|
-
},
|
|
1290
|
-
{
|
|
1291
|
-
"name": "stable_zero123_example",
|
|
1292
|
-
"title": "Stable Zero123",
|
|
1293
|
-
"mediaType": "image",
|
|
1294
|
-
"mediaSubtype": "webp",
|
|
1295
|
-
"description": "使用Stable Zero123从单张图像生成3D视图。",
|
|
1296
|
-
"tags": ["图像到3D", "3D"],
|
|
1297
|
-
"models": ["Stable Zero123"],
|
|
1298
|
-
"date": "2025-03-01",
|
|
1299
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
|
|
1300
|
-
"size": 7.99
|
|
1301
1289
|
}
|
|
1302
1290
|
]
|
|
1303
1291
|
},
|
|
@@ -1678,6 +1666,19 @@
|
|
|
1678
1666
|
"icon": "icon-[lucide--film]",
|
|
1679
1667
|
"title": "视频API",
|
|
1680
1668
|
"templates": [
|
|
1669
|
+
{
|
|
1670
|
+
"name": "api_openai_sora_video",
|
|
1671
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1672
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1673
|
+
"mediaType": "image",
|
|
1674
|
+
"mediaSubtype": "webp",
|
|
1675
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1676
|
+
"models": ["OpenAI"],
|
|
1677
|
+
"date": "2025-10-08",
|
|
1678
|
+
"OpenSource": false,
|
|
1679
|
+
"size": 0,
|
|
1680
|
+
"vram": 0
|
|
1681
|
+
},
|
|
1681
1682
|
{
|
|
1682
1683
|
"name": "api_wan_text_to_video",
|
|
1683
1684
|
"title": "Wan2.5: Text to Video",
|