@mindstudio-ai/local-model-tunnel 0.3.1 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,10 +47,20 @@ function getProviderBaseUrl(name, defaultUrl) {
47
47
  const urls = config.get("providerBaseUrls");
48
48
  return urls[name] ?? defaultUrl;
49
49
  }
50
+ function setProviderBaseUrl(name, url) {
51
+ const urls = config.get("providerBaseUrls");
52
+ urls[name] = url;
53
+ config.set("providerBaseUrls", urls);
54
+ }
50
55
  function getProviderInstallPath(name) {
51
56
  const paths = config.get("providerInstallPaths");
52
57
  return paths[name];
53
58
  }
59
+ function setProviderInstallPath(name, installPath) {
60
+ const paths = config.get("providerInstallPaths");
61
+ paths[name] = installPath;
62
+ config.set("providerInstallPaths", paths);
63
+ }
54
64
 
55
65
  // src/api.ts
56
66
  function getHeaders() {
@@ -83,39 +93,20 @@ async function pollForRequest(models) {
83
93
  const data = await response.json();
84
94
  return data.request;
85
95
  }
86
- async function submitProgress(requestId, content) {
96
+ async function submitProgress(requestId, content, type = "chunk") {
87
97
  const baseUrl = getApiBaseUrl();
88
98
  const response = await fetch(
89
99
  `${baseUrl}/v1/local-models/requests/${requestId}/progress`,
90
100
  {
91
101
  method: "POST",
92
102
  headers: getHeaders(),
93
- body: JSON.stringify({ content })
103
+ body: JSON.stringify({ type, content })
94
104
  }
95
105
  );
96
106
  if (!response.ok) {
97
107
  console.warn(`Progress update failed: ${response.status}`);
98
108
  }
99
109
  }
100
- async function submitGenerationProgress(requestId, step, totalSteps, preview) {
101
- const baseUrl = getApiBaseUrl();
102
- const response = await fetch(
103
- `${baseUrl}/v1/local-models/requests/${requestId}/progress`,
104
- {
105
- method: "POST",
106
- headers: getHeaders(),
107
- body: JSON.stringify({
108
- type: "generation",
109
- step,
110
- totalSteps,
111
- preview
112
- })
113
- }
114
- );
115
- if (!response.ok) {
116
- console.warn(`Generation progress update failed: ${response.status}`);
117
- }
118
- }
119
110
  async function submitResult(requestId, success, result, error) {
120
111
  const baseUrl = getApiBaseUrl();
121
112
  const response = await fetch(
@@ -145,7 +136,7 @@ async function verifyApiKey() {
145
136
  return false;
146
137
  }
147
138
  }
148
- async function registerLocalModel(modelNameOrOptions, provider = "ollama", modelType = "llm_chat") {
139
+ async function syncLocalModel(modelNameOrOptions, provider = "ollama", modelType = "llm_chat") {
149
140
  const baseUrl = getApiBaseUrl();
150
141
  let payload;
151
142
  if (typeof modelNameOrOptions === "string") {
@@ -169,10 +160,29 @@ async function registerLocalModel(modelNameOrOptions, provider = "ollama", model
169
160
  });
170
161
  if (!response.ok) {
171
162
  const errorText = await response.text();
172
- throw new Error(`Register failed: ${response.status} ${errorText}`);
163
+ throw new Error(`Sync failed: ${response.status} ${errorText}`);
173
164
  }
174
165
  }
175
- async function getRegisteredModels() {
166
+ async function updateLocalModel(options) {
167
+ const baseUrl = getApiBaseUrl();
168
+ const payload = {
169
+ modelId: options.modelId,
170
+ modelName: options.modelName,
171
+ provider: options.provider,
172
+ modelType: options.modelType || "llm_chat",
173
+ parameters: options.parameters
174
+ };
175
+ const response = await fetch(`${baseUrl}/v1/local-models/models/update`, {
176
+ method: "POST",
177
+ headers: getHeaders(),
178
+ body: JSON.stringify(payload)
179
+ });
180
+ if (!response.ok) {
181
+ const errorText = await response.text();
182
+ throw new Error(`Update failed: ${response.status} ${errorText}`);
183
+ }
184
+ }
185
+ async function getSyncedModels() {
176
186
  const baseUrl = getApiBaseUrl();
177
187
  const response = await fetch(`${baseUrl}/v1/local-models/models`, {
178
188
  method: "GET",
@@ -181,7 +191,7 @@ async function getRegisteredModels() {
181
191
  if (!response.ok) {
182
192
  const errorText = await response.text();
183
193
  throw new Error(
184
- `Failed to fetch registered models: ${response.status} ${errorText}`
194
+ `Failed to fetch synced models: ${response.status} ${errorText}`
185
195
  );
186
196
  }
187
197
  const data = await response.json();
@@ -567,7 +577,7 @@ var StableDiffusionProvider = class {
567
577
  throw new Error(`Failed to switch model: ${error}`);
568
578
  }
569
579
  }
570
- async generateImage(model, prompt, options) {
580
+ async generateImage(model, prompt, options, onProgress) {
571
581
  const currentModel = await this.getCurrentModel();
572
582
  if (currentModel && !currentModel.includes(model)) {
573
583
  await this.setModel(model);
@@ -582,44 +592,20 @@ var StableDiffusionProvider = class {
582
592
  seed: options?.seed ?? -1,
583
593
  sampler_name: options?.sampler || "Euler a"
584
594
  };
585
- const response = await fetch(`${this.getBaseUrl()}/sdapi/v1/txt2img`, {
595
+ const generatePromise = fetch(`${this.getBaseUrl()}/sdapi/v1/txt2img`, {
586
596
  method: "POST",
587
597
  headers: { "Content-Type": "application/json" },
588
598
  body: JSON.stringify(payload)
589
599
  });
590
- if (!response.ok) {
591
- const error = await response.text();
592
- throw new Error(`Image generation failed: ${response.status} ${error}`);
593
- }
594
- const result = await response.json();
595
- if (!result.images || result.images.length === 0) {
596
- throw new Error("No images returned from Stable Diffusion");
597
- }
598
- let info = {};
599
- let seed;
600
- try {
601
- info = JSON.parse(result.info);
602
- seed = typeof info.seed === "number" ? info.seed : void 0;
603
- } catch {
604
- }
605
- return {
606
- imageBase64: result.images[0],
607
- mimeType: "image/png",
608
- seed,
609
- info
610
- };
611
- }
612
- async generateImageWithProgress(model, prompt, options, onProgress) {
613
- const generatePromise = this.generateImage(model, prompt, options);
614
600
  if (onProgress) {
615
601
  const pollProgress = async () => {
616
602
  while (true) {
617
603
  try {
618
- const response = await fetch(
604
+ const response2 = await fetch(
619
605
  `${this.getBaseUrl()}/sdapi/v1/progress`
620
606
  );
621
- if (!response.ok) break;
622
- const progress = await response.json();
607
+ if (!response2.ok) break;
608
+ const progress = await response2.json();
623
609
  onProgress({
624
610
  step: progress.state.sampling_step,
625
611
  totalSteps: progress.state.sampling_steps,
@@ -635,7 +621,28 @@ var StableDiffusionProvider = class {
635
621
  pollProgress().catch(() => {
636
622
  });
637
623
  }
638
- return generatePromise;
624
+ const response = await generatePromise;
625
+ if (!response.ok) {
626
+ const error = await response.text();
627
+ throw new Error(`Image generation failed: ${response.status} ${error}`);
628
+ }
629
+ const result = await response.json();
630
+ if (!result.images || result.images.length === 0) {
631
+ throw new Error("No images returned from Stable Diffusion");
632
+ }
633
+ let info = {};
634
+ let seed;
635
+ try {
636
+ info = JSON.parse(result.info);
637
+ seed = typeof info.seed === "number" ? info.seed : void 0;
638
+ } catch {
639
+ }
640
+ return {
641
+ imageBase64: result.images[0],
642
+ mimeType: "image/png",
643
+ seed,
644
+ info
645
+ };
639
646
  }
640
647
  /**
641
648
  * Fetch available samplers from the backend
@@ -718,7 +725,7 @@ var StableDiffusionProvider = class {
718
725
  label: "Steps",
719
726
  variable: "steps",
720
727
  helpText: "Number of denoising steps. More steps = higher quality but slower.",
721
- defaultValue: 20,
728
+ defaultValue: "20",
722
729
  numberOptions: {
723
730
  min: 1,
724
731
  max: 150,
@@ -730,7 +737,7 @@ var StableDiffusionProvider = class {
730
737
  label: "CFG Scale",
731
738
  variable: "cfgScale",
732
739
  helpText: "How strongly the image should follow the prompt. Higher = more literal.",
733
- defaultValue: 7,
740
+ defaultValue: "7",
734
741
  numberOptions: {
735
742
  min: 1,
736
743
  max: 30,
@@ -738,15 +745,11 @@ var StableDiffusionProvider = class {
738
745
  }
739
746
  },
740
747
  {
741
- type: "number",
748
+ type: "seed",
742
749
  label: "Seed",
743
750
  variable: "seed",
744
751
  helpText: "A specific value used to guide the 'randomness' of generation. Use -1 for random.",
745
- defaultValue: -1,
746
- numberOptions: {
747
- min: -1,
748
- max: 2147483647
749
- }
752
+ defaultValue: "-1"
750
753
  },
751
754
  {
752
755
  type: "text",
@@ -761,665 +764,537 @@ var StableDiffusionProvider = class {
761
764
  var stable_diffusion_default = new StableDiffusionProvider();
762
765
 
763
766
  // src/providers/comfyui/index.ts
767
+ import * as path7 from "path";
768
+
769
+ // src/providers/comfyui/workflow-discovery.ts
770
+ import * as fs4 from "fs";
771
+ import * as path5 from "path";
772
+
773
+ // src/providers/comfyui/converter-install.ts
764
774
  import * as fs3 from "fs";
765
775
  import * as path4 from "path";
766
- import * as os4 from "os";
767
-
768
- // src/providers/comfyui/workflows/ltx-video.ts
769
- var LTX_VIDEO_DEFAULTS = {
770
- model: "ltx-video-2b-v0.9.5.safetensors",
771
- textEncoder: "t5xxl_fp16.safetensors",
772
- prompt: "",
773
- negativePrompt: "worst quality, blurry, distorted, disfigured, motion smear, motion artifacts",
774
- width: 512,
775
- height: 320,
776
- numFrames: 41,
777
- fps: 8,
778
- steps: 20,
779
- cfgScale: 3,
780
- seed: -1
781
- };
782
- function buildLtxVideoWorkflow(params) {
783
- const p = { ...LTX_VIDEO_DEFAULTS, ...params };
784
- const seed = p.seed === -1 ? Math.floor(Math.random() * 2 ** 32) : p.seed;
785
- return {
786
- // Node 1: Load checkpoint (MODEL + VAE, CLIP output unused)
787
- "1": {
788
- class_type: "CheckpointLoaderSimple",
789
- inputs: {
790
- ckpt_name: p.model
791
- }
792
- },
793
- // Node 2: Load text encoder (T5-XXL) separately
794
- "2": {
795
- class_type: "CLIPLoader",
796
- inputs: {
797
- clip_name: p.textEncoder,
798
- type: "ltxv"
799
- }
800
- },
801
- // Node 3: Positive prompt encoding (CLIP from CLIPLoader, NOT from checkpoint)
802
- "3": {
803
- class_type: "CLIPTextEncode",
804
- inputs: {
805
- text: p.prompt,
806
- clip: ["2", 0]
807
- }
808
- },
809
- // Node 4: Negative prompt encoding
810
- "4": {
811
- class_type: "CLIPTextEncode",
812
- inputs: {
813
- text: p.negativePrompt,
814
- clip: ["2", 0]
815
- }
816
- },
817
- // Node 5: Empty latent video
818
- "5": {
819
- class_type: "EmptyLTXVLatentVideo",
820
- inputs: {
821
- width: p.width,
822
- height: p.height,
823
- length: p.numFrames,
824
- batch_size: 1
825
- }
826
- },
827
- // Node 6: KSampler
828
- "6": {
829
- class_type: "KSampler",
830
- inputs: {
831
- model: ["1", 0],
832
- positive: ["3", 0],
833
- negative: ["4", 0],
834
- latent_image: ["5", 0],
835
- seed,
836
- steps: p.steps,
837
- cfg: p.cfgScale,
838
- sampler_name: "euler",
839
- scheduler: "normal",
840
- denoise: 1
841
- }
842
- },
843
- // Node 7: VAE Decode (VAE from checkpoint, slot 2)
844
- "7": {
845
- class_type: "VAEDecode",
846
- inputs: {
847
- samples: ["6", 0],
848
- vae: ["1", 2]
849
- }
850
- },
851
- // Node 8: Save as MP4 via VideoHelperSuite
852
- "8": {
853
- class_type: "VHS_VideoCombine",
854
- inputs: {
855
- images: ["7", 0],
856
- frame_rate: p.fps,
857
- loop_count: 0,
858
- filename_prefix: "ltxv_output",
859
- format: "video/h264-mp4",
860
- pingpong: false,
861
- save_output: true
776
+ var CONVERTER_DIR = "comfyui-workflow-to-api-converter-endpoint";
777
+ var GITHUB_RAW_BASE = "https://raw.githubusercontent.com/SethRobinson/comfyui-workflow-to-api-converter-endpoint/main";
778
+ var FILES_TO_DOWNLOAD = ["__init__.py", "workflow_converter.py"];
779
+ async function ensureConverterInstalled(installPath) {
780
+ const customNodesDir = path4.join(installPath, "custom_nodes");
781
+ const converterDir = path4.join(customNodesDir, CONVERTER_DIR);
782
+ const allFilesExist = FILES_TO_DOWNLOAD.every(
783
+ (f) => fs3.existsSync(path4.join(converterDir, f))
784
+ );
785
+ if (allFilesExist) {
786
+ return true;
787
+ }
788
+ if (!fs3.existsSync(customNodesDir)) {
789
+ return false;
790
+ }
791
+ try {
792
+ if (!fs3.existsSync(converterDir)) {
793
+ fs3.mkdirSync(converterDir, { recursive: true });
794
+ }
795
+ for (const filename of FILES_TO_DOWNLOAD) {
796
+ const url = `${GITHUB_RAW_BASE}/${filename}`;
797
+ const response = await fetch(url, {
798
+ signal: AbortSignal.timeout(15e3)
799
+ });
800
+ if (!response.ok) {
801
+ throw new Error(`Failed to download ${filename}: ${response.status}`);
862
802
  }
803
+ const content = await response.text();
804
+ fs3.writeFileSync(path4.join(converterDir, filename), content, "utf-8");
863
805
  }
864
- };
806
+ return true;
807
+ } catch {
808
+ return false;
809
+ }
810
+ }
811
+ var converterAvailableCache = null;
812
+ async function isConverterEndpointAvailable(baseUrl) {
813
+ if (converterAvailableCache !== null) {
814
+ return converterAvailableCache;
815
+ }
816
+ try {
817
+ const response = await fetch(`${baseUrl}/workflow/convert`, {
818
+ method: "POST",
819
+ headers: { "Content-Type": "application/json" },
820
+ body: JSON.stringify({ nodes: [], links: [] }),
821
+ signal: AbortSignal.timeout(5e3)
822
+ });
823
+ converterAvailableCache = response.ok;
824
+ return converterAvailableCache;
825
+ } catch {
826
+ converterAvailableCache = false;
827
+ return false;
828
+ }
829
+ }
830
+ function resetConverterCache() {
831
+ converterAvailableCache = null;
832
+ }
833
+ async function convertWorkflow(baseUrl, uiWorkflow) {
834
+ const response = await fetch(`${baseUrl}/workflow/convert`, {
835
+ method: "POST",
836
+ headers: { "Content-Type": "application/json" },
837
+ body: JSON.stringify(uiWorkflow),
838
+ signal: AbortSignal.timeout(1e4)
839
+ });
840
+ if (!response.ok) {
841
+ const errorText = await response.text();
842
+ throw new Error(`Workflow conversion failed: ${response.status} ${errorText}`);
843
+ }
844
+ return await response.json();
865
845
  }
866
- var LTX_VIDEO_OUTPUT_NODE = "8";
867
846
 
868
- // src/providers/comfyui/workflows/wan2.1.ts
869
- var WAN21_DEFAULTS = {
870
- model: "wan2.1_t2v_1.3B_fp16.safetensors",
871
- textEncoder: "umt5_xxl_fp8_e4m3fn_scaled.safetensors",
872
- vae: "wan_2.1_vae.safetensors",
873
- prompt: "",
874
- negativePrompt: "worst quality, blurry, distorted",
875
- width: 480,
876
- height: 320,
877
- numFrames: 25,
878
- fps: 8,
879
- steps: 20,
880
- cfgScale: 5,
881
- seed: -1
882
- };
883
- function buildWan21Workflow(params) {
884
- const p = { ...WAN21_DEFAULTS, ...params };
885
- const seed = p.seed === -1 ? Math.floor(Math.random() * 2 ** 32) : p.seed;
886
- return {
887
- // Node 1: Load diffusion model (UNET)
888
- "1": {
889
- class_type: "UNETLoader",
890
- inputs: {
891
- unet_name: p.model,
892
- weight_dtype: "default"
893
- }
894
- },
895
- // Node 2: Load text encoder (UMT5-XXL)
896
- "2": {
897
- class_type: "CLIPLoader",
898
- inputs: {
899
- clip_name: p.textEncoder,
900
- type: "wan"
901
- }
902
- },
903
- // Node 3: Load VAE
904
- "3": {
905
- class_type: "VAELoader",
906
- inputs: {
907
- vae_name: p.vae
908
- }
909
- },
910
- // Node 4: Positive prompt encoding
911
- "4": {
912
- class_type: "CLIPTextEncode",
913
- inputs: {
914
- text: p.prompt,
915
- clip: ["2", 0]
916
- }
917
- },
918
- // Node 5: Negative prompt encoding
919
- "5": {
920
- class_type: "CLIPTextEncode",
921
- inputs: {
922
- text: p.negativePrompt,
923
- clip: ["2", 0]
924
- }
925
- },
926
- // Node 6: Empty latent image (for video frames)
927
- "6": {
928
- class_type: "EmptySD3LatentImage",
929
- inputs: {
930
- width: p.width,
931
- height: p.height,
932
- batch_size: p.numFrames
933
- }
934
- },
935
- // Node 7: KSampler
936
- "7": {
937
- class_type: "KSampler",
938
- inputs: {
939
- model: ["1", 0],
940
- positive: ["4", 0],
941
- negative: ["5", 0],
942
- latent_image: ["6", 0],
943
- seed,
944
- steps: p.steps,
945
- cfg: p.cfgScale,
946
- sampler_name: "euler",
947
- scheduler: "normal",
948
- denoise: 1
949
- }
950
- },
951
- // Node 8: VAE Decode
952
- "8": {
953
- class_type: "VAEDecode",
954
- inputs: {
955
- samples: ["7", 0],
956
- vae: ["3", 0]
847
+ // src/providers/comfyui/workflow-discovery.ts
848
+ var VIDEO_OUTPUT_NODES = ["VHS_VideoCombine", "SaveVideo"];
849
+ var IMAGE_OUTPUT_NODES = ["SaveImage", "PreviewImage"];
850
+ async function discoverWorkflows(baseUrl, installPath) {
851
+ resetConverterCache();
852
+ let converterJustInstalled = false;
853
+ if (installPath) {
854
+ const wasInstalled = await ensureConverterInstalled(installPath);
855
+ if (wasInstalled) {
856
+ converterJustInstalled = true;
857
+ }
858
+ }
859
+ const converterAvailable = await isConverterEndpointAvailable(baseUrl);
860
+ const needsRestart = converterJustInstalled && !converterAvailable;
861
+ const workflowFiles = await listWorkflowFiles(baseUrl, installPath);
862
+ const converted = { image: [], video: [] };
863
+ const unconvertedCapabilities = /* @__PURE__ */ new Set();
864
+ for (const file of workflowFiles) {
865
+ try {
866
+ const workflowJson = await fetchWorkflowJson(baseUrl, installPath, file);
867
+ if (!workflowJson) continue;
868
+ let apiWorkflow;
869
+ if (isApiFormat(workflowJson)) {
870
+ apiWorkflow = workflowJson;
871
+ } else if (converterAvailable) {
872
+ try {
873
+ apiWorkflow = await convertWorkflow(baseUrl, workflowJson);
874
+ } catch {
875
+ continue;
876
+ }
877
+ } else {
878
+ unconvertedCapabilities.add("image");
879
+ continue;
957
880
  }
958
- },
959
- // Node 9: Save as MP4 via VideoHelperSuite
960
- "9": {
961
- class_type: "VHS_VideoCombine",
962
- inputs: {
963
- images: ["8", 0],
964
- frame_rate: p.fps,
965
- loop_count: 0,
966
- filename_prefix: "wan21_output",
967
- format: "video/h264-mp4",
968
- pingpong: false,
969
- save_output: true
881
+ const capability = detectCapability(apiWorkflow);
882
+ const name = path5.basename(file, path5.extname(file));
883
+ converted[capability].push({ name, workflow: apiWorkflow });
884
+ } catch {
885
+ }
886
+ }
887
+ const models = [];
888
+ for (const capability of ["image", "video"]) {
889
+ if (converted[capability].length > 0) {
890
+ const displayName = capability === "image" ? "ComfyUI Image Generation" : "ComfyUI Video Generation";
891
+ const workflowParam = {
892
+ type: "comfyWorkflow",
893
+ variable: "workflow",
894
+ label: "Workflow",
895
+ comfyWorkflowOptions: { availableWorkflows: converted[capability] }
896
+ };
897
+ models.push({
898
+ name: displayName,
899
+ provider: "comfyui",
900
+ capability,
901
+ parameters: [workflowParam]
902
+ });
903
+ }
904
+ }
905
+ for (const capability of unconvertedCapabilities) {
906
+ if (converted[capability].length > 0) continue;
907
+ const displayName = capability === "image" ? "ComfyUI Image Generation" : "ComfyUI Video Generation";
908
+ models.push({
909
+ name: displayName,
910
+ provider: "comfyui",
911
+ capability,
912
+ statusHint: needsRestart ? "Restart ComfyUI to enable" : "Workflow converter not available"
913
+ });
914
+ }
915
+ return models;
916
+ }
917
+ async function listWorkflowFiles(baseUrl, installPath) {
918
+ try {
919
+ const response = await fetch(
920
+ `${baseUrl}/userdata?dir=workflows/&recurse=true&full_info=true`,
921
+ { signal: AbortSignal.timeout(5e3) }
922
+ );
923
+ if (response.ok) {
924
+ const data = await response.json();
925
+ return data.map((entry) => typeof entry === "string" ? entry : entry.path).filter((p) => p.endsWith(".json"));
926
+ }
927
+ } catch {
928
+ }
929
+ if (installPath) {
930
+ const workflowsDir = path5.join(installPath, "user", "default", "workflows");
931
+ return scanDirectory(workflowsDir);
932
+ }
933
+ return [];
934
+ }
935
+ function scanDirectory(dir) {
936
+ if (!fs4.existsSync(dir)) return [];
937
+ const results = [];
938
+ try {
939
+ const entries = fs4.readdirSync(dir, { withFileTypes: true });
940
+ for (const entry of entries) {
941
+ const fullPath = path5.join(dir, entry.name);
942
+ if (entry.isDirectory()) {
943
+ results.push(
944
+ ...scanDirectory(fullPath).map((f) => path5.join(entry.name, f))
945
+ );
946
+ } else if (entry.name.endsWith(".json")) {
947
+ results.push(entry.name);
970
948
  }
971
949
  }
972
- };
950
+ } catch {
951
+ }
952
+ return results;
973
953
  }
974
- var WAN21_OUTPUT_NODE = "9";
975
-
976
- // src/providers/comfyui/workflows/index.ts
977
- var MODEL_REGISTRY = [
978
- // LTX-Video models
979
- {
980
- pattern: /ltx[_-]?video/i,
981
- config: {
982
- family: "ltx-video",
983
- displayName: "LTX-Video",
984
- buildWorkflow: (params) => buildLtxVideoWorkflow({
985
- model: params.model,
986
- prompt: params.prompt,
987
- negativePrompt: params.negativePrompt,
988
- width: params.width,
989
- height: params.height,
990
- numFrames: params.numFrames,
991
- fps: params.fps,
992
- steps: params.steps,
993
- cfgScale: params.cfgScale,
994
- seed: params.seed
995
- }),
996
- outputNodeId: LTX_VIDEO_OUTPUT_NODE,
997
- defaults: {
998
- width: LTX_VIDEO_DEFAULTS.width,
999
- height: LTX_VIDEO_DEFAULTS.height,
1000
- numFrames: LTX_VIDEO_DEFAULTS.numFrames,
1001
- fps: LTX_VIDEO_DEFAULTS.fps,
1002
- steps: LTX_VIDEO_DEFAULTS.steps,
1003
- cfgScale: LTX_VIDEO_DEFAULTS.cfgScale
954
+ async function fetchWorkflowJson(baseUrl, installPath, filePath) {
955
+ try {
956
+ const userdataPath = `workflows/${filePath}`;
957
+ const response = await fetch(
958
+ `${baseUrl}/userdata/${encodeURIComponent(userdataPath)}`,
959
+ { signal: AbortSignal.timeout(5e3) }
960
+ );
961
+ if (response.ok) {
962
+ return await response.json();
963
+ }
964
+ } catch {
965
+ }
966
+ if (installPath) {
967
+ const fullPath = path5.join(
968
+ installPath,
969
+ "user",
970
+ "default",
971
+ "workflows",
972
+ filePath
973
+ );
974
+ try {
975
+ const content = fs4.readFileSync(fullPath, "utf-8");
976
+ return JSON.parse(content);
977
+ } catch {
978
+ return null;
979
+ }
980
+ }
981
+ return null;
982
+ }
983
+ function isApiFormat(json) {
984
+ const keys = Object.keys(json);
985
+ if (keys.length === 0) return false;
986
+ return keys.some((key) => {
987
+ const node = json[key];
988
+ return /^\d+$/.test(key) && typeof node === "object" && node !== null && "class_type" in node;
989
+ });
990
+ }
991
+ function detectCapability(apiWorkflow) {
992
+ for (const node of Object.values(apiWorkflow)) {
993
+ if (typeof node === "object" && node !== null && "class_type" in node) {
994
+ const classType = node.class_type;
995
+ if (VIDEO_OUTPUT_NODES.includes(classType)) {
996
+ return "video";
1004
997
  }
1005
998
  }
1006
- },
1007
- // Wan 2.1 models
1008
- {
1009
- pattern: /wan2[\._]?1/i,
1010
- config: {
1011
- family: "wan2.1",
1012
- displayName: "Wan 2.1",
1013
- buildWorkflow: (params) => buildWan21Workflow({
1014
- model: params.model,
1015
- prompt: params.prompt,
1016
- negativePrompt: params.negativePrompt,
1017
- width: params.width,
1018
- height: params.height,
1019
- numFrames: params.numFrames,
1020
- fps: params.fps,
1021
- steps: params.steps,
1022
- cfgScale: params.cfgScale,
1023
- seed: params.seed
1024
- }),
1025
- outputNodeId: WAN21_OUTPUT_NODE,
1026
- defaults: {
1027
- width: WAN21_DEFAULTS.width,
1028
- height: WAN21_DEFAULTS.height,
1029
- numFrames: WAN21_DEFAULTS.numFrames,
1030
- fps: WAN21_DEFAULTS.fps,
1031
- steps: WAN21_DEFAULTS.steps,
1032
- cfgScale: WAN21_DEFAULTS.cfgScale
999
+ }
1000
+ for (const node of Object.values(apiWorkflow)) {
1001
+ if (typeof node === "object" && node !== null && "class_type" in node) {
1002
+ const classType = node.class_type;
1003
+ if (IMAGE_OUTPUT_NODES.includes(classType)) {
1004
+ return "image";
1033
1005
  }
1034
1006
  }
1035
1007
  }
1036
- ];
1037
- function getWorkflowForModel(modelFilename) {
1038
- for (const entry of MODEL_REGISTRY) {
1039
- if (entry.pattern.test(modelFilename)) {
1040
- return entry.config;
1008
+ return "image";
1009
+ }
1010
+
1011
+ // src/providers/comfyui/workflow-executor.ts
1012
+ import * as path6 from "path";
1013
+ async function executeWorkflow(options) {
1014
+ const { baseUrl, workflow, onProgress } = options;
1015
+ const clientId = `mindstudio_${Date.now()}_${Math.random().toString(36).slice(2)}`;
1016
+ const wsUrl = baseUrl.replace(/^http/, "ws") + `/ws?clientId=${clientId}`;
1017
+ const submitResponse = await fetch(`${baseUrl}/prompt`, {
1018
+ method: "POST",
1019
+ headers: { "Content-Type": "application/json" },
1020
+ body: JSON.stringify({
1021
+ prompt: workflow,
1022
+ client_id: clientId
1023
+ })
1024
+ });
1025
+ if (!submitResponse.ok) {
1026
+ const errorText = await submitResponse.text();
1027
+ throw new Error(
1028
+ `ComfyUI prompt submission failed: ${submitResponse.status} ${errorText}`
1029
+ );
1030
+ }
1031
+ const submitResult2 = await submitResponse.json();
1032
+ if (submitResult2.node_errors && Object.keys(submitResult2.node_errors).length > 0) {
1033
+ throw new Error(
1034
+ `ComfyUI workflow validation failed: ${JSON.stringify(submitResult2.node_errors)}`
1035
+ );
1036
+ }
1037
+ const promptId = submitResult2.prompt_id;
1038
+ await waitForCompletion(wsUrl, promptId, onProgress);
1039
+ const historyResponse = await fetch(`${baseUrl}/history/${promptId}`, {
1040
+ signal: AbortSignal.timeout(3e4)
1041
+ });
1042
+ if (!historyResponse.ok) {
1043
+ throw new Error(
1044
+ `Failed to fetch result history: ${historyResponse.status}`
1045
+ );
1046
+ }
1047
+ const history = await historyResponse.json();
1048
+ const promptHistory = history[promptId];
1049
+ if (!promptHistory) {
1050
+ throw new Error("No result found in ComfyUI history");
1051
+ }
1052
+ let outputFile = null;
1053
+ for (const nodeOutputs of Object.values(promptHistory.outputs)) {
1054
+ if (nodeOutputs.gifs && nodeOutputs.gifs.length > 0) {
1055
+ outputFile = nodeOutputs.gifs[0];
1056
+ break;
1057
+ }
1058
+ if (!outputFile && nodeOutputs.images && nodeOutputs.images.length > 0) {
1059
+ outputFile = nodeOutputs.images[0];
1041
1060
  }
1042
1061
  }
1043
- return null;
1062
+ if (!outputFile) {
1063
+ throw new Error("No output files found in ComfyUI result");
1064
+ }
1065
+ const fileUrl = new URL(`${baseUrl}/view`);
1066
+ fileUrl.searchParams.set("filename", outputFile.filename);
1067
+ fileUrl.searchParams.set("subfolder", outputFile.subfolder || "");
1068
+ fileUrl.searchParams.set("type", outputFile.type || "output");
1069
+ const fileResponse = await fetch(fileUrl.toString(), {
1070
+ signal: AbortSignal.timeout(6e4)
1071
+ });
1072
+ if (!fileResponse.ok) {
1073
+ throw new Error(`Failed to download output file: ${fileResponse.status}`);
1074
+ }
1075
+ const fileBuffer = await fileResponse.arrayBuffer();
1076
+ const dataBase64 = Buffer.from(fileBuffer).toString("base64");
1077
+ const ext = path6.extname(outputFile.filename).toLowerCase();
1078
+ const mimeType = getMimeType(ext);
1079
+ return { dataBase64, mimeType, filename: outputFile.filename };
1044
1080
  }
1045
- function isKnownVideoModel(modelFilename) {
1046
- return getWorkflowForModel(modelFilename) !== null;
1081
+ function getMimeType(ext) {
1082
+ switch (ext) {
1083
+ case ".mp4":
1084
+ return "video/mp4";
1085
+ case ".webm":
1086
+ return "video/webm";
1087
+ case ".webp":
1088
+ return "image/webp";
1089
+ case ".gif":
1090
+ return "image/gif";
1091
+ case ".png":
1092
+ return "image/png";
1093
+ case ".jpg":
1094
+ case ".jpeg":
1095
+ return "image/jpeg";
1096
+ default:
1097
+ return "application/octet-stream";
1098
+ }
1099
+ }
1100
+ function waitForCompletion(wsUrl, promptId, onProgress) {
1101
+ return new Promise((resolve, reject) => {
1102
+ const timeoutMs = 30 * 60 * 1e3;
1103
+ let ws;
1104
+ const timeout = setTimeout(() => {
1105
+ try {
1106
+ ws?.close();
1107
+ } catch {
1108
+ }
1109
+ reject(new Error("Workflow execution timed out after 30 minutes"));
1110
+ }, timeoutMs);
1111
+ try {
1112
+ ws = new WebSocket(wsUrl);
1113
+ } catch (err) {
1114
+ clearTimeout(timeout);
1115
+ reject(
1116
+ new Error(
1117
+ `Failed to connect to ComfyUI WebSocket: ${err instanceof Error ? err.message : err}`
1118
+ )
1119
+ );
1120
+ return;
1121
+ }
1122
+ ws.onmessage = (event) => {
1123
+ try {
1124
+ const message = JSON.parse(
1125
+ typeof event.data === "string" ? event.data : ""
1126
+ );
1127
+ if (message.type === "progress") {
1128
+ const data = message.data;
1129
+ if (!data.prompt_id || data.prompt_id === promptId) {
1130
+ onProgress?.({
1131
+ step: data.value,
1132
+ totalSteps: data.max,
1133
+ currentNode: data.node
1134
+ });
1135
+ }
1136
+ }
1137
+ if (message.type === "execution_success") {
1138
+ const data = message.data;
1139
+ if (data.prompt_id === promptId) {
1140
+ clearTimeout(timeout);
1141
+ ws.close();
1142
+ resolve();
1143
+ }
1144
+ }
1145
+ if (message.type === "execution_error") {
1146
+ const data = message.data;
1147
+ if (data.prompt_id === promptId) {
1148
+ clearTimeout(timeout);
1149
+ ws.close();
1150
+ reject(
1151
+ new Error(
1152
+ `ComfyUI execution error${data.node_type ? ` in ${data.node_type}` : ""}: ${data.exception_message || "Unknown error"}`
1153
+ )
1154
+ );
1155
+ }
1156
+ }
1157
+ } catch {
1158
+ }
1159
+ };
1160
+ ws.onerror = () => {
1161
+ clearTimeout(timeout);
1162
+ reject(new Error("ComfyUI WebSocket error: connection failed"));
1163
+ };
1164
+ ws.onclose = (event) => {
1165
+ if (!event.wasClean) {
1166
+ clearTimeout(timeout);
1167
+ reject(new Error("ComfyUI WebSocket connection closed unexpectedly"));
1168
+ }
1169
+ };
1170
+ });
1047
1171
  }
1048
1172
 
1049
1173
  // src/providers/comfyui/readme.md
1050
- var readme_default4 = '# ComfyUI\n\nComfyUI runs video generation models (LTX-Video, Wan2.1) locally. MindStudio handles all the workflow complexity for you -- you just need to install ComfyUI and download a model.\n\n**Default port:** 8188\n**Website:** https://www.comfy.org\n**GitHub:** https://github.com/comfyanonymous/ComfyUI\n\n## What You\'ll Need\n\n- **Python 3.10 or newer** -- Check by opening a terminal and typing `python3 --version`. If you don\'t have it, download from https://www.python.org/downloads/\n\n- **Git** -- Check by typing `git --version`. If you don\'t have it, download from https://git-scm.com/downloads\n\n- **A GPU with 8+ GB of VRAM** -- Video generation is demanding. Without enough GPU memory, generation will fail or be extremely slow.\n\n## Step 1: Install ComfyUI\n\nOpen a terminal and run these commands one at a time, waiting for each to finish before running the next.\n\nDownload ComfyUI:\n\n```\ngit clone https://github.com/comfyanonymous/ComfyUI.git ~/ComfyUI\n```\n\nGo into the folder:\n\n```\ncd ~/ComfyUI\n```\n\nCreate an isolated Python environment:\n\n```\npython3 -m venv venv\n```\n\nActivate the environment:\n\n```\nsource venv/bin/activate\n```\n\n**Windows users:** use `venv\\Scripts\\activate` instead.\n\nInstall dependencies (may take a few minutes):\n\n```\npip install -r requirements.txt\n```\n\n## Step 2: Download a Video Model\n\nYou need at least one video model for MindStudio to use.\n\n### LTX-Video (recommended to start)\n\nFastest option, good for getting up and running quickly.\n\n1. Go to https://huggingface.co/Lightricks/LTX-Video\n2. Download `ltx-video-2b-v0.9.5.safetensors`\n3. Move the file into:\n\n```\n~/ComfyUI/models/checkpoints/\n```\n\n### Wan2.1\n\nHigher quality but slower and needs more VRAM. Requires multiple files -- make sure you download all of them or it won\'t work.\n\n1. Go to https://huggingface.co/Comfy-Org/Wan2.1_ComfyUI_repackaged\n2. Place UNET files in `~/ComfyUI/models/diffusion_models/`\n3. Place text encoder files in `~/ComfyUI/models/text_encoders/`\n4. Place VAE files in `~/ComfyUI/models/vae/`\n\n## Step 3: Start the Server\n\nEvery time you want to use ComfyUI with MindStudio, open a terminal and run:\n\n```\ncd ~/ComfyUI && source venv/bin/activate && python main.py --listen\n```\n\n**Windows users:**\n\n```\ncd %USERPROFILE%\\ComfyUI && venv\\Scripts\\activate && python main.py --listen\n```\n\n**Important:** The `--listen` flag is required. Without it, MindStudio cannot connect to the server.\n\nLeave this terminal window open. When you see "To see the GUI go to: http://0.0.0.0:8188", the server is ready. Go back to the tunnel and select **Refresh Providers** -- your models should appear.\n\n## Troubleshooting\n\n- **MindStudio says ComfyUI is "not running"** -- Make sure you started with the `--listen` flag. Without it, the server won\'t accept connections from MindStudio.\n\n- **Server is running but no models show up** -- Check that your model files are in the right folders under `~/ComfyUI/models/`. Checkpoint files go in `checkpoints/`, UNET files go in `diffusion_models/`.\n\n- **Generation fails with workflow errors** -- For Wan2.1, you need all three files (UNET, text encoder, VAE). If any are missing, generation will fail.\n\n- **"CUDA out of memory"** -- Video generation needs a lot of GPU memory. Try reducing the resolution or number of frames in your generation settings, or use LTX-Video which is lighter.\n\n- **Server crashes mid-generation** -- Press Ctrl+C in the terminal and run the start command again.\n';
1174
+ var readme_default4 = "# ComfyUI\n\nComfyUI is a node-based workflow tool for running image and video generation models locally. MindStudio automatically discovers your saved workflows and makes them available as models -- any workflow you build or download in ComfyUI can be used through MindStudio.\n\n**Default port:** 8188\n**Website:** https://www.comfy.org\n\n## What You'll Need\n\n- **A GPU with 8+ GB of VRAM** -- Image and video generation is demanding. Without enough GPU memory, generation will fail or be extremely slow.\n\n## Step 1: Install ComfyUI\n\nDownload and install ComfyUI Desktop from the official website:\n\nhttps://www.comfy.org/download\n\nThe installer handles Python, dependencies, and everything else automatically. Follow the on-screen prompts to complete setup.\n\n## Step 2: Save a Workflow\n\nMindStudio discovers workflows you've saved in ComfyUI. If you don't have any saved workflows yet, open the ComfyUI interface in your browser (http://127.0.0.1:8188), build or load a workflow, and save it using the menu. Downloaded workflow files placed in ComfyUI's workflows folder will also be discovered.\n\nAny workflow that produces image or video output will work. MindStudio detects the output type automatically based on the nodes in your workflow.\n\n## Step 3: Start the Server\n\nOpen ComfyUI Desktop. Once it's running, go back to the tunnel and select **Refresh Providers** -- your saved workflows should appear as models.\n\nIf you're running ComfyUI from the command line instead, start it with:\n\n```\ncd ~/ComfyUI && python main.py --listen\n```\n\n**Important:** The `--listen` flag is required when running from the command line. Without it, MindStudio cannot connect to the server.\n\n## Tip: Workflow Converter\n\nMindStudio automatically installs a custom node called `comfyui-workflow-to-api-converter-endpoint` into ComfyUI's `custom_nodes/` folder. This converts workflows saved in ComfyUI's UI format into the API format needed for execution. **After the first run, you'll need to restart ComfyUI once** so it picks up the new node \u2014 after that, it works automatically. If the auto-install doesn't work (e.g. permissions issues), you can install it manually by cloning https://github.com/SethRobinson/comfyui-workflow-to-api-converter-endpoint into your ComfyUI `custom_nodes/` directory and restarting ComfyUI. Without this node, only workflows already saved in API format will be discovered.\n\n## Troubleshooting\n\n- **MindStudio says ComfyUI is \"not running\"** -- Make sure ComfyUI Desktop is open, or if running from the terminal, that you started with the `--listen` flag.\n\n- **Server is running but no workflows show up** -- Make sure you have at least one saved workflow in ComfyUI. Open the ComfyUI interface, load or build a workflow, and save it.\n\n- **\"CUDA out of memory\"** -- Your GPU doesn't have enough memory for the workflow you're running. Try a lighter model or reduce resolution in your workflow.\n\n- **Server crashes mid-generation** -- Restart ComfyUI Desktop, or press Ctrl+C in the terminal and run the start command again.\n";
1051
1175
 
1052
1176
  // src/providers/comfyui/index.ts
1177
+ var COMFYUI_PORTS = [8e3, 8188];
1053
1178
  var ComfyUIProvider = class {
1054
1179
  name = "comfyui";
1055
1180
  displayName = "ComfyUI";
1056
- description = "Generate videos locally using node-based workflows. Supports LTX-Video and Wan2.1.";
1057
- capabilities = ["video"];
1181
+ description = "Run any saved ComfyUI workflow \u2014 images, video, and more.";
1182
+ capabilities = ["image", "video"];
1058
1183
  readme = readme_default4;
1059
- defaultBaseUrl = "http://127.0.0.1:8188";
1184
+ defaultBaseUrl = "http://127.0.0.1:8000";
1060
1185
  get baseUrl() {
1061
1186
  return getProviderBaseUrl(this.name, this.defaultBaseUrl);
1062
1187
  }
1063
1188
  getBaseUrl() {
1064
1189
  return this.baseUrl;
1065
1190
  }
1066
- async isRunning() {
1067
- try {
1068
- const response = await fetch(`${this.getBaseUrl()}/system_stats`, {
1069
- method: "GET",
1070
- signal: AbortSignal.timeout(5e3)
1071
- });
1072
- return response.ok;
1073
- } catch {
1074
- return false;
1075
- }
1076
- }
1077
- async detect() {
1078
- const savedPath = getProviderInstallPath(this.name);
1079
- const possiblePaths = [
1080
- ...savedPath ? [savedPath] : [],
1081
- path4.join(os4.homedir(), "ComfyUI"),
1082
- path4.join(os4.homedir(), "comfyui"),
1083
- path4.join(os4.homedir(), "Projects", "ComfyUI"),
1084
- path4.join(os4.homedir(), "Code", "ComfyUI")
1085
- ];
1086
- let installed = false;
1087
- for (const p of possiblePaths) {
1088
- if (fs3.existsSync(path4.join(p, "main.py")) && fs3.existsSync(path4.join(p, "requirements.txt"))) {
1089
- installed = true;
1090
- break;
1191
+ /**
1192
+ * Try to reach ComfyUI on the configured URL, then fall back to known ports.
1193
+ * Persists whichever URL responds so future calls go direct.
1194
+ */
1195
+ async findRunningUrl() {
1196
+ const configured = this.getBaseUrl();
1197
+ if (await this.checkUrl(configured)) return configured;
1198
+ for (const port of COMFYUI_PORTS) {
1199
+ const url = `http://127.0.0.1:${port}`;
1200
+ if (url === configured) continue;
1201
+ if (await this.checkUrl(url)) {
1202
+ setProviderBaseUrl(this.name, url);
1203
+ return url;
1091
1204
  }
1092
1205
  }
1093
- let running = false;
1206
+ return null;
1207
+ }
1208
+ async checkUrl(url) {
1094
1209
  try {
1095
- const response = await fetch("http://127.0.0.1:8188/system_stats", {
1210
+ const response = await fetch(`${url}/system_stats`, {
1096
1211
  signal: AbortSignal.timeout(1e3)
1097
1212
  });
1098
- running = response.ok;
1099
- if (running) installed = true;
1213
+ return response.ok;
1100
1214
  } catch {
1101
- running = false;
1215
+ return false;
1102
1216
  }
1103
- return { installed, running };
1104
1217
  }
1105
1218
  /**
1106
- * Discover video models by scanning ComfyUI's model directories.
1219
+ * Query the running ComfyUI server for its install path via /internal/folder_paths.
1220
+ * Derives the root from the custom_nodes path and persists it for future use.
1107
1221
  */
1108
- async discoverModels() {
1109
- const models = [];
1110
- try {
1111
- const response = await fetch(
1112
- `${this.getBaseUrl()}/object_info/CheckpointLoaderSimple`,
1113
- { signal: AbortSignal.timeout(5e3) }
1114
- );
1115
- if (response.ok) {
1116
- const data = await response.json();
1117
- const nodeInfo = data.CheckpointLoaderSimple;
1118
- const checkpoints = nodeInfo?.input?.required?.ckpt_name?.[0] || [];
1119
- for (const name of checkpoints) {
1120
- if (isKnownVideoModel(name)) {
1121
- const workflow = getWorkflowForModel(name);
1122
- models.push({
1123
- name,
1124
- provider: this.name,
1125
- capability: "video",
1126
- parameterSize: workflow?.displayName
1127
- });
1128
- }
1129
- }
1130
- }
1131
- } catch {
1132
- }
1222
+ async queryInstallPath(baseUrl) {
1133
1223
  try {
1134
- const response = await fetch(
1135
- `${this.getBaseUrl()}/object_info/UNETLoader`,
1136
- { signal: AbortSignal.timeout(5e3) }
1137
- );
1138
- if (response.ok) {
1139
- const data = await response.json();
1140
- const nodeInfo = data.UNETLoader;
1141
- const unetModels = nodeInfo?.input?.required?.unet_name?.[0] || [];
1142
- for (const name of unetModels) {
1143
- if (isKnownVideoModel(name) && !models.some((m) => m.name === name)) {
1144
- const workflow = getWorkflowForModel(name);
1145
- models.push({
1146
- name,
1147
- provider: this.name,
1148
- capability: "video",
1149
- parameterSize: workflow?.displayName
1150
- });
1151
- }
1152
- }
1153
- }
1224
+ const response = await fetch(`${baseUrl}/internal/folder_paths`, {
1225
+ signal: AbortSignal.timeout(3e3)
1226
+ });
1227
+ if (!response.ok) return null;
1228
+ const data = await response.json();
1229
+ const customNodesPaths = data.custom_nodes;
1230
+ if (!customNodesPaths || customNodesPaths.length === 0) return null;
1231
+ const installPath = path7.dirname(customNodesPaths[0]);
1232
+ setProviderInstallPath(this.name, installPath);
1233
+ return installPath;
1154
1234
  } catch {
1235
+ return null;
1155
1236
  }
1156
- if (models.length === 0) {
1157
- const installPath = getProviderInstallPath(this.name);
1158
- if (installPath) {
1159
- const dirs = [
1160
- path4.join(installPath, "models", "checkpoints"),
1161
- path4.join(installPath, "models", "diffusion_models")
1162
- ];
1163
- for (const dir of dirs) {
1164
- if (fs3.existsSync(dir)) {
1165
- try {
1166
- const files = fs3.readdirSync(dir);
1167
- for (const file of files) {
1168
- if (isKnownVideoModel(file) && !models.some((m) => m.name === file)) {
1169
- const workflow = getWorkflowForModel(file);
1170
- models.push({
1171
- name: file,
1172
- provider: this.name,
1173
- capability: "video",
1174
- parameterSize: workflow?.displayName
1175
- });
1176
- }
1177
- }
1178
- } catch {
1179
- }
1180
- }
1181
- }
1237
+ }
1238
+ async isRunning() {
1239
+ return await this.findRunningUrl() !== null;
1240
+ }
1241
+ async detect() {
1242
+ const runningUrl = await this.findRunningUrl();
1243
+ if (runningUrl) {
1244
+ const queriedPath = await this.queryInstallPath(runningUrl);
1245
+ if (queriedPath) {
1246
+ ensureConverterInstalled(queriedPath).catch(() => {
1247
+ });
1182
1248
  }
1249
+ return { installed: true, running: true };
1183
1250
  }
1184
- return models;
1251
+ const savedPath = getProviderInstallPath(this.name);
1252
+ return { installed: !!savedPath, running: false };
1185
1253
  }
1186
1254
  /**
1187
- * Generate a video using ComfyUI.
1255
+ * Discover workflow-based models from user-saved ComfyUI workflows.
1188
1256
  */
1189
- async generateVideo(model, prompt, options, onProgress) {
1190
- const baseUrl = this.getBaseUrl();
1191
- const workflowConfig = getWorkflowForModel(model);
1192
- if (!workflowConfig) {
1193
- throw new Error(
1194
- `No workflow template found for model: ${model}. Supported families: LTX-Video, Wan2.1`
1195
- );
1196
- }
1197
- const defaults = workflowConfig.defaults;
1198
- const seed = options?.seed !== void 0 && options.seed !== -1 ? options.seed : Math.floor(Math.random() * 2 ** 32);
1199
- const workflow = workflowConfig.buildWorkflow({
1200
- model,
1201
- prompt,
1202
- negativePrompt: options?.negativePrompt || "worst quality, blurry, distorted",
1203
- width: options?.width || defaults.width,
1204
- height: options?.height || defaults.height,
1205
- numFrames: options?.numFrames || defaults.numFrames,
1206
- fps: options?.fps || defaults.fps,
1207
- steps: options?.steps || defaults.steps,
1208
- cfgScale: options?.cfgScale || defaults.cfgScale,
1209
- seed
1210
- });
1211
- const clientId = `mindstudio_${Date.now()}_${Math.random().toString(36).slice(2)}`;
1212
- const wsUrl = baseUrl.replace(/^http/, "ws") + `/ws?clientId=${clientId}`;
1213
- const submitResponse = await fetch(`${baseUrl}/prompt`, {
1214
- method: "POST",
1215
- headers: { "Content-Type": "application/json" },
1216
- body: JSON.stringify({
1217
- prompt: workflow,
1218
- client_id: clientId
1219
- })
1220
- });
1221
- if (!submitResponse.ok) {
1222
- const errorText = await submitResponse.text();
1223
- throw new Error(
1224
- `ComfyUI prompt submission failed: ${submitResponse.status} ${errorText}`
1225
- );
1226
- }
1227
- const submitResult2 = await submitResponse.json();
1228
- if (submitResult2.node_errors && Object.keys(submitResult2.node_errors).length > 0) {
1229
- throw new Error(
1230
- `ComfyUI workflow validation failed: ${JSON.stringify(submitResult2.node_errors)}`
1231
- );
1232
- }
1233
- const promptId = submitResult2.prompt_id;
1234
- await this.waitForCompletion(wsUrl, promptId, onProgress);
1235
- const historyResponse = await fetch(`${baseUrl}/history/${promptId}`, {
1236
- signal: AbortSignal.timeout(3e4)
1237
- });
1238
- if (!historyResponse.ok) {
1239
- throw new Error(
1240
- `Failed to fetch result history: ${historyResponse.status}`
1241
- );
1242
- }
1243
- const history = await historyResponse.json();
1244
- const promptHistory = history[promptId];
1245
- if (!promptHistory) {
1246
- throw new Error("No result found in ComfyUI history");
1247
- }
1248
- const outputNodeId = workflowConfig.outputNodeId;
1249
- const outputData = promptHistory.outputs[outputNodeId];
1250
- const outputFiles = outputData?.gifs || outputData?.images;
1251
- if (!outputFiles || outputFiles.length === 0) {
1252
- throw new Error("No output files found in ComfyUI result");
1257
+ async discoverModels() {
1258
+ const installPath = getProviderInstallPath(this.name) ?? null;
1259
+ return discoverWorkflows(this.getBaseUrl(), installPath);
1260
+ }
1261
+ /**
1262
+ * Generate an image using a ComfyUI workflow, with progress tracking.
1263
+ */
1264
+ async generateImage(_model, _prompt, options, onProgress) {
1265
+ if (!options?.workflow) {
1266
+ throw new Error("ComfyUI image generation requires a workflow");
1253
1267
  }
1254
- const outputFile = outputFiles[0];
1255
- const fileUrl = new URL(`${baseUrl}/view`);
1256
- fileUrl.searchParams.set("filename", outputFile.filename);
1257
- fileUrl.searchParams.set("subfolder", outputFile.subfolder || "");
1258
- fileUrl.searchParams.set("type", outputFile.type || "output");
1259
- const fileResponse = await fetch(fileUrl.toString(), {
1260
- signal: AbortSignal.timeout(6e4)
1268
+ const result = await executeWorkflow({
1269
+ baseUrl: this.getBaseUrl(),
1270
+ workflow: options.workflow,
1271
+ onProgress: onProgress ? (p) => onProgress({ step: p.step, totalSteps: p.totalSteps }) : void 0
1261
1272
  });
1262
- if (!fileResponse.ok) {
1263
- throw new Error(`Failed to download output file: ${fileResponse.status}`);
1264
- }
1265
- const fileBuffer = await fileResponse.arrayBuffer();
1266
- const videoBase64 = Buffer.from(fileBuffer).toString("base64");
1267
- const ext = path4.extname(outputFile.filename).toLowerCase();
1268
- const mimeType = ext === ".mp4" ? "video/mp4" : ext === ".webm" ? "video/webm" : ext === ".webp" ? "image/webp" : ext === ".gif" ? "image/gif" : "video/mp4";
1269
- const fps = options?.fps || defaults.fps;
1270
- const numFrames = options?.numFrames || defaults.numFrames;
1271
1273
  return {
1272
- videoBase64,
1273
- mimeType,
1274
- duration: numFrames / fps,
1275
- fps,
1276
- seed
1274
+ imageBase64: result.dataBase64,
1275
+ mimeType: result.mimeType
1277
1276
  };
1278
1277
  }
1279
1278
  /**
1280
- * Wait for a ComfyUI prompt to finish execution via WebSocket.
1279
+ * Generate a video using a ComfyUI workflow.
1281
1280
  */
1282
- waitForCompletion(wsUrl, promptId, onProgress) {
1283
- return new Promise((resolve, reject) => {
1284
- const timeoutMs = 30 * 60 * 1e3;
1285
- let ws;
1286
- const timeout = setTimeout(() => {
1287
- try {
1288
- ws?.close();
1289
- } catch {
1290
- }
1291
- reject(new Error("Video generation timed out after 30 minutes"));
1292
- }, timeoutMs);
1293
- try {
1294
- ws = new WebSocket(wsUrl);
1295
- } catch (err) {
1296
- clearTimeout(timeout);
1297
- reject(
1298
- new Error(
1299
- `Failed to connect to ComfyUI WebSocket: ${err instanceof Error ? err.message : err}`
1300
- )
1301
- );
1302
- return;
1303
- }
1304
- ws.onmessage = (event) => {
1305
- try {
1306
- const message = JSON.parse(
1307
- typeof event.data === "string" ? event.data : ""
1308
- );
1309
- if (message.type === "progress") {
1310
- const data = message.data;
1311
- if (!data.prompt_id || data.prompt_id === promptId) {
1312
- onProgress?.({
1313
- step: data.value,
1314
- totalSteps: data.max,
1315
- currentNode: data.node
1316
- });
1317
- }
1318
- }
1319
- if (message.type === "execution_success") {
1320
- const data = message.data;
1321
- if (data.prompt_id === promptId) {
1322
- clearTimeout(timeout);
1323
- ws.close();
1324
- resolve();
1325
- }
1326
- }
1327
- if (message.type === "execution_error") {
1328
- const data = message.data;
1329
- if (data.prompt_id === promptId) {
1330
- clearTimeout(timeout);
1331
- ws.close();
1332
- reject(
1333
- new Error(
1334
- `ComfyUI execution error${data.node_type ? ` in ${data.node_type}` : ""}: ${data.exception_message || "Unknown error"}`
1335
- )
1336
- );
1337
- }
1338
- }
1339
- } catch {
1340
- }
1341
- };
1342
- ws.onerror = () => {
1343
- clearTimeout(timeout);
1344
- reject(new Error("ComfyUI WebSocket error: connection failed"));
1345
- };
1346
- ws.onclose = (event) => {
1347
- if (!event.wasClean) {
1348
- clearTimeout(timeout);
1349
- reject(new Error("ComfyUI WebSocket connection closed unexpectedly"));
1350
- }
1351
- };
1281
+ async generateVideo(_model, _prompt, options, onProgress) {
1282
+ if (!options?.workflow) {
1283
+ throw new Error("ComfyUI video generation requires a workflow");
1284
+ }
1285
+ const result = await executeWorkflow({
1286
+ baseUrl: this.getBaseUrl(),
1287
+ workflow: options.workflow,
1288
+ onProgress: onProgress ? (p) => onProgress({
1289
+ step: p.step,
1290
+ totalSteps: p.totalSteps,
1291
+ currentNode: p.currentNode
1292
+ }) : void 0
1352
1293
  });
1353
- }
1354
- /**
1355
- * Get parameter schemas for video generation UI configuration.
1356
- */
1357
- async getParameterSchemas() {
1358
- return [
1359
- {
1360
- type: "number",
1361
- label: "Width",
1362
- variable: "width",
1363
- helpText: "Video width in pixels. Larger = better quality but bigger file.",
1364
- defaultValue: 512,
1365
- numberOptions: { min: 256, max: 1280, step: 64 }
1366
- },
1367
- {
1368
- type: "number",
1369
- label: "Height",
1370
- variable: "height",
1371
- helpText: "Video height in pixels. Larger = better quality but bigger file.",
1372
- defaultValue: 320,
1373
- numberOptions: { min: 256, max: 1280, step: 64 }
1374
- },
1375
- {
1376
- type: "number",
1377
- label: "Frames",
1378
- variable: "numFrames",
1379
- helpText: "Number of frames to generate. More frames = longer video but bigger file. Keep low to avoid upload limits.",
1380
- defaultValue: 41,
1381
- numberOptions: { min: 9, max: 97, step: 8 }
1382
- },
1383
- {
1384
- type: "number",
1385
- label: "FPS",
1386
- variable: "fps",
1387
- helpText: "Frames per second for the output video.",
1388
- defaultValue: 8,
1389
- numberOptions: { min: 4, max: 30, step: 1 }
1390
- },
1391
- {
1392
- type: "number",
1393
- label: "Steps",
1394
- variable: "steps",
1395
- helpText: "Number of denoising steps. More steps = higher quality but slower.",
1396
- defaultValue: 20,
1397
- numberOptions: { min: 10, max: 100, step: 1 }
1398
- },
1399
- {
1400
- type: "number",
1401
- label: "CFG Scale",
1402
- variable: "cfgScale",
1403
- helpText: "How strongly the video should follow the prompt. Higher = more literal.",
1404
- defaultValue: 7,
1405
- numberOptions: { min: 1, max: 20, step: 0.5 }
1406
- },
1407
- {
1408
- type: "number",
1409
- label: "Seed",
1410
- variable: "seed",
1411
- helpText: "A specific value used to guide randomness. Use -1 for random.",
1412
- defaultValue: -1,
1413
- numberOptions: { min: -1, max: 2147483647 }
1414
- },
1415
- {
1416
- type: "text",
1417
- label: "Negative Prompt",
1418
- variable: "negativePrompt",
1419
- helpText: "Things you don't want in the video",
1420
- placeholder: "worst quality, blurry, distorted"
1421
- }
1422
- ];
1294
+ return {
1295
+ videoBase64: result.dataBase64,
1296
+ mimeType: result.mimeType
1297
+ };
1423
1298
  }
1424
1299
  };
1425
1300
  var comfyui_default = new ComfyUIProvider();
@@ -1471,14 +1346,15 @@ async function discoverAllModelsWithParameters() {
1471
1346
  const modelsWithParams = await Promise.all(
1472
1347
  runningProviders.map(async (provider) => {
1473
1348
  const models = await provider.discoverModels();
1349
+ const realModels = models.filter((m) => !m.statusHint);
1474
1350
  if (typeof provider.getParameterSchemas === "function") {
1475
1351
  const parameters = await provider.getParameterSchemas();
1476
- return models.map((model) => ({
1352
+ return realModels.map((model) => ({
1477
1353
  ...model,
1478
- parameters
1354
+ parameters: model.parameters ?? parameters
1479
1355
  }));
1480
1356
  }
1481
- return models;
1357
+ return realModels;
1482
1358
  })
1483
1359
  );
1484
1360
  return modelsWithParams.flat();
@@ -1644,45 +1520,32 @@ var TunnelRunner = class {
1644
1520
  }
1645
1521
  const prompt = request.payload.prompt || "";
1646
1522
  const config2 = request.payload.config || {};
1647
- let result;
1648
- if (provider.generateImageWithProgress) {
1649
- result = await provider.generateImageWithProgress(
1650
- request.modelId,
1651
- prompt,
1652
- {
1653
- negativePrompt: config2.negativePrompt,
1654
- width: config2.width,
1655
- height: config2.height,
1656
- steps: config2.steps,
1657
- cfgScale: config2.cfgScale,
1658
- seed: config2.seed,
1659
- sampler: config2.sampler
1660
- },
1661
- async (progress) => {
1662
- await submitGenerationProgress(
1663
- request.id,
1664
- progress.step,
1665
- progress.totalSteps,
1666
- progress.preview
1667
- );
1668
- requestEvents.emitProgress({
1669
- id: request.id,
1670
- step: progress.step,
1671
- totalSteps: progress.totalSteps
1672
- });
1673
- }
1674
- );
1675
- } else {
1676
- result = await provider.generateImage(request.modelId, prompt, {
1523
+ const result = await provider.generateImage(
1524
+ request.modelId,
1525
+ prompt,
1526
+ {
1677
1527
  negativePrompt: config2.negativePrompt,
1678
1528
  width: config2.width,
1679
1529
  height: config2.height,
1680
1530
  steps: config2.steps,
1681
1531
  cfgScale: config2.cfgScale,
1682
1532
  seed: config2.seed,
1683
- sampler: config2.sampler
1684
- });
1685
- }
1533
+ sampler: config2.sampler,
1534
+ workflow: config2.workflow
1535
+ },
1536
+ async (progress) => {
1537
+ await submitProgress(
1538
+ request.id,
1539
+ `Step ${progress.step}/${progress.totalSteps}`,
1540
+ "log"
1541
+ );
1542
+ requestEvents.emitProgress({
1543
+ id: request.id,
1544
+ step: progress.step,
1545
+ totalSteps: progress.totalSteps
1546
+ });
1547
+ }
1548
+ );
1686
1549
  await submitResult(request.id, true, {
1687
1550
  imageBase64: result.imageBase64,
1688
1551
  mimeType: result.mimeType,
@@ -1713,13 +1576,14 @@ var TunnelRunner = class {
1713
1576
  fps: config2.fps,
1714
1577
  steps: config2.steps,
1715
1578
  cfgScale: config2.cfgScale,
1716
- seed: config2.seed
1579
+ seed: config2.seed,
1580
+ workflow: config2.workflow
1717
1581
  },
1718
1582
  async (progress) => {
1719
- await submitGenerationProgress(
1583
+ await submitProgress(
1720
1584
  request.id,
1721
- progress.step,
1722
- progress.totalSteps
1585
+ `Step ${progress.step}/${progress.totalSteps}`,
1586
+ "log"
1723
1587
  );
1724
1588
  requestEvents.emitProgress({
1725
1589
  id: request.id,
@@ -1754,8 +1618,9 @@ export {
1754
1618
  setApiKey,
1755
1619
  getConfigPath,
1756
1620
  verifyApiKey,
1757
- registerLocalModel,
1758
- getRegisteredModels,
1621
+ syncLocalModel,
1622
+ updateLocalModel,
1623
+ getSyncedModels,
1759
1624
  requestDeviceAuth,
1760
1625
  pollDeviceAuth,
1761
1626
  discoverAllModels,
@@ -1765,4 +1630,4 @@ export {
1765
1630
  requestEvents,
1766
1631
  TunnelRunner
1767
1632
  };
1768
- //# sourceMappingURL=chunk-PTK4SJQK.js.map
1633
+ //# sourceMappingURL=chunk-V3RKCMCQ.js.map