comfyui-skill-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,139 @@
1
+ Metadata-Version: 2.4
2
+ Name: comfyui-skill-cli
3
+ Version: 0.1.0
4
+ Summary: ComfyUI Skill CLI — Agent-friendly workflow management
5
+ Author: HuangYuChuh
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/HuangYuChuh/ComfyUI_Skill_CLI
8
+ Project-URL: Repository, https://github.com/HuangYuChuh/ComfyUI_Skill_CLI
9
+ Project-URL: Issues, https://github.com/HuangYuChuh/ComfyUI_Skill_CLI/issues
10
+ Keywords: comfyui,cli,agent,skill,workflow,ai,image-generation
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Environment :: Console
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Software Development :: Libraries
20
+ Classifier: Topic :: Multimedia :: Graphics
21
+ Requires-Python: >=3.10
22
+ Description-Content-Type: text/markdown
23
+ License-File: LICENSE
24
+ Requires-Dist: typer>=0.9
25
+ Requires-Dist: rich>=13.0
26
+ Requires-Dist: requests>=2.28
27
+ Dynamic: license-file
28
+
29
+ # ComfyUI Skill CLI
30
+
31
+ Agent-friendly command-line tool for managing and executing [ComfyUI](https://github.com/comfyanonymous/ComfyUI) workflow skills.
32
+
33
+ ## What is this?
34
+
35
+ ComfyUI Skill CLI turns ComfyUI workflows into callable commands. Any AI agent that can run shell commands (Claude, Codex, OpenClaw, etc.) can use ComfyUI through this CLI.
36
+
37
+ ```bash
38
+ # List available skills
39
+ comfyui-skill list --json
40
+
41
+ # Execute a workflow
42
+ comfyui-skill run local/txt2img --args '{"prompt": "a white cat", "seed": 42}' --json
43
+
44
+ # Check server health
45
+ comfyui-skill server status --json
46
+ ```
47
+
48
+ Every command supports `--json` for structured output. Pipe-friendly by default.
49
+
50
+ ## Install
51
+
52
+ ```bash
53
+ pipx install comfyui-skill-cli
54
+ ```
55
+
56
+ ## Usage
57
+
58
+ Run commands from within a [ComfyUI Skills](https://github.com/HuangYuChuh/ComfyUI_Skills_OpenClaw) project directory:
59
+
60
+ ```bash
61
+ cd /path/to/your-skills-project
62
+ comfyui-skill list
63
+ ```
64
+
65
+ The CLI reads `config.json` and `data/` from the current working directory.
66
+
67
+ ### Commands
68
+
69
+ | Command | Description |
70
+ |---------|-------------|
71
+ | `comfyui-skill list` | List all available skills with parameters |
72
+ | `comfyui-skill info <id>` | Show skill details and parameter schema |
73
+ | `comfyui-skill run <id> --args '{...}'` | Execute a skill (blocking) |
74
+ | `comfyui-skill submit <id> --args '{...}'` | Submit a skill (non-blocking) |
75
+ | `comfyui-skill status <prompt-id>` | Check execution status |
76
+ | `comfyui-skill server list` | List configured servers |
77
+ | `comfyui-skill server status` | Check if ComfyUI server is online |
78
+ | `comfyui-skill deps check <id>` | Check missing dependencies |
79
+ | `comfyui-skill deps install <id>` | Install missing dependencies |
80
+
81
+ ### Global Options
82
+
83
+ | Option | Description |
84
+ |--------|-------------|
85
+ | `--json, -j` | Force JSON output |
86
+ | `--server, -s` | Specify server ID |
87
+ | `--dir, -d` | Specify data directory (default: current directory) |
88
+ | `--verbose, -v` | Verbose output |
89
+
90
+ ### Output Modes
91
+
92
+ - **TTY** → Rich tables and progress bars (human-friendly)
93
+ - **Pipe / `--json`** → Structured JSON (agent-friendly)
94
+ - **Errors** → Always stderr
95
+
96
+ ### Skill ID Format
97
+
98
+ ```bash
99
+ comfyui-skill run local/txt2img # server_id/workflow_id
100
+ comfyui-skill run txt2img # uses default server
101
+ comfyui-skill run txt2img -s my_server # explicit server
102
+ ```
103
+
104
+ ## For AI Agents
105
+
106
+ This CLI is designed to be called from `SKILL.md` definitions:
107
+
108
+ ```markdown
109
+ ## Available Commands
110
+ comfyui-skill list --json
111
+ comfyui-skill info <server_id>/<workflow_id> --json
112
+ comfyui-skill run <server_id>/<workflow_id> --args '{"prompt":"..."}' --json
113
+
114
+ ## Typical Flow
115
+ 1. `comfyui-skill server status --json` — verify server is online
116
+ 2. `comfyui-skill list --json` — discover available skills
117
+ 3. `comfyui-skill info <id> --json` — check required parameters
118
+ 4. `comfyui-skill run <id> --args '{...}' --json` — execute
119
+ ```
120
+
121
+ ## Exit Codes
122
+
123
+ | Code | Meaning |
124
+ |------|---------|
125
+ | 0 | Success |
126
+ | 1 | General error |
127
+ | 2 | Invalid arguments |
128
+ | 3 | Server connection failed |
129
+ | 4 | Resource not found |
130
+ | 5 | Execution failed |
131
+ | 6 | Timeout |
132
+
133
+ ## Compatibility
134
+
135
+ Built with [Typer](https://typer.tiangolo.com/), the same framework as [comfy-cli](https://github.com/Comfy-Org/comfy-cli). Designed to be integrated as a `comfy skills` subcommand in the future.
136
+
137
+ ## License
138
+
139
+ MIT
@@ -0,0 +1,18 @@
1
+ comfyui_skill_cli-0.1.0.dist-info/licenses/LICENSE,sha256=fbdPU9jhPg5qMfAZ-JBFwSeaDYvfOU_0Jev9maM8ZAo,1068
2
+ comfyui_skills_cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ comfyui_skills_cli/__main__.py,sha256=5z9TeduMvrj9nhMuDlCRb5qkmvuCdfD6JQIvFZ_tXKM,86
4
+ comfyui_skills_cli/client.py,sha256=kaZOYgDIL5jlLqIsUVxQrSz4dRhup6x6spF8p98KwO8,4584
5
+ comfyui_skills_cli/config.py,sha256=uTzF4eL_fty3DDdIMZh6A6t2FKSOesCG0bwvpUtP2BY,987
6
+ comfyui_skills_cli/main.py,sha256=N6OdYHmVYUMqqoA1gidQUcULRmXqbYvo26UKPkTnuQ8,1747
7
+ comfyui_skills_cli/output.py,sha256=bPbmX2mWVuP77P9qJBdttunFpTSzPfW58zXpn9D0Z10,2834
8
+ comfyui_skills_cli/storage.py,sha256=vk6i7Ynu6uszhs6V_3dtqt4nWW8EWgczlCsHTA9UVg0,3465
9
+ comfyui_skills_cli/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ comfyui_skills_cli/commands/deps.py,sha256=QoEm29y27Okw-4dhS4fs3yH0B7qadjBrUQfatzWzKWU,8292
11
+ comfyui_skills_cli/commands/run.py,sha256=MDmbJMOb9tPaZKFebZQiMP77f8fygmAAmWixaMmh0jg,9856
12
+ comfyui_skills_cli/commands/server.py,sha256=-6keAY64_N-XU7h_EV3uHx6DvTjkUGGsdkr-nlXTmYQ,1698
13
+ comfyui_skills_cli/commands/skill.py,sha256=ColrJJ8tzeS7konXjm5INSqIXIcY8a-OcnbDTJo-rQM,1711
14
+ comfyui_skill_cli-0.1.0.dist-info/METADATA,sha256=WRdvvn7JMHRC_GKSgUezWHZhc8AMS0_Pzy2xZn-ayPs,4435
15
+ comfyui_skill_cli-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
16
+ comfyui_skill_cli-0.1.0.dist-info/entry_points.txt,sha256=ny_jXyKiqTimpFwsxg-MaTt--7RhlhomfuHKPFfISVI,67
17
+ comfyui_skill_cli-0.1.0.dist-info/top_level.txt,sha256=FOjZBQVWY4nOpBMsnQUW4kA8kpfAjLvMBUcRluK5220,19
18
+ comfyui_skill_cli-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ comfyui-skill = comfyui_skills_cli.__main__:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 HuangYuChuh
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ comfyui_skills_cli
File without changes
@@ -0,0 +1,9 @@
1
+ from .main import app
2
+
3
+
4
+ def main():
5
+ app()
6
+
7
+
8
+ if __name__ == "__main__":
9
+ main()
@@ -0,0 +1,139 @@
1
+ """ComfyUI HTTP client — all server communication goes through here."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import time
7
+ import uuid
8
+ from typing import Any
9
+
10
+ import requests
11
+
12
+
13
+ class ComfyUIClient:
14
+ def __init__(self, server_url: str, auth: str = "", comfy_api_key: str = "", timeout: float = 30.0):
15
+ self.server_url = server_url.rstrip("/")
16
+ self.auth = auth
17
+ self.comfy_api_key = comfy_api_key
18
+ self.timeout = timeout
19
+
20
+ def _headers(self) -> dict[str, str]:
21
+ headers: dict[str, str] = {}
22
+ if self.auth:
23
+ headers["Authorization"] = f"Bearer {self.auth}"
24
+ return headers
25
+
26
+ def _get(self, path: str, **kwargs: Any) -> requests.Response:
27
+ return requests.get(
28
+ f"{self.server_url}{path}",
29
+ headers=self._headers(),
30
+ timeout=self.timeout,
31
+ **kwargs,
32
+ )
33
+
34
+ def _post(self, path: str, json_data: Any = None, **kwargs: Any) -> requests.Response:
35
+ return requests.post(
36
+ f"{self.server_url}{path}",
37
+ headers=self._headers(),
38
+ json=json_data,
39
+ timeout=self.timeout,
40
+ **kwargs,
41
+ )
42
+
43
+ # -- Health --
44
+
45
+ def check_health(self) -> dict[str, Any]:
46
+ try:
47
+ resp = self._get("/system_stats")
48
+ resp.raise_for_status()
49
+ return {"status": "online", "data": resp.json()}
50
+ except (requests.RequestException, ValueError) as exc:
51
+ return {"status": "offline", "error": str(exc)}
52
+
53
+ # -- Prompt execution --
54
+
55
+ def queue_prompt(self, workflow: dict[str, Any]) -> dict[str, Any]:
56
+ payload: dict[str, Any] = {
57
+ "prompt": workflow,
58
+ "client_id": str(uuid.uuid4()),
59
+ }
60
+ if self.comfy_api_key:
61
+ payload["extra_data"] = {"api_key_comfy_org": self.comfy_api_key}
62
+ resp = self._post("/prompt", json_data=payload)
63
+ resp.raise_for_status()
64
+ return resp.json()
65
+
66
+ def get_history(self, prompt_id: str) -> dict[str, Any] | None:
67
+ resp = self._get(f"/history/{prompt_id}")
68
+ if resp.status_code != 200:
69
+ return None
70
+ data = resp.json()
71
+ return data.get(prompt_id)
72
+
73
+ def get_queue(self) -> dict[str, Any]:
74
+ resp = self._get("/queue")
75
+ resp.raise_for_status()
76
+ return resp.json()
77
+
78
+ def download_output(self, filename: str, subfolder: str = "", output_type: str = "output") -> bytes:
79
+ resp = self._get("/view", params={
80
+ "filename": filename,
81
+ "subfolder": subfolder,
82
+ "type": output_type,
83
+ })
84
+ resp.raise_for_status()
85
+ return resp.content
86
+
87
+ # -- Node info --
88
+
89
+ def get_object_info(self) -> dict[str, Any]:
90
+ resp = self._get("/object_info")
91
+ resp.raise_for_status()
92
+ return resp.json()
93
+
94
+ def get_models(self, folder: str) -> list[str]:
95
+ resp = self._get(f"/models/{folder}")
96
+ resp.raise_for_status()
97
+ return resp.json()
98
+
99
+ # -- Manager API (ComfyUI-Manager plugin) --
100
+
101
+ def manager_start_queue(self) -> bool:
102
+ try:
103
+ resp = self._get("/manager/queue/start", timeout=10)
104
+ return resp.status_code < 500
105
+ except requests.RequestException:
106
+ return False
107
+
108
+ def manager_install_node(self, repo_url: str, pkg_name: str) -> dict[str, Any]:
109
+ resp = self._post("/manager/queue/install", json_data={
110
+ "id": pkg_name,
111
+ "url": repo_url,
112
+ "install_type": "git-clone",
113
+ })
114
+ if resp.status_code == 404:
115
+ return {"success": False, "error": "ComfyUI Manager not installed"}
116
+ if resp.status_code >= 400:
117
+ return {"success": False, "error": f"Manager API error: {resp.status_code}"}
118
+ return {"success": True}
119
+
120
+ def manager_queue_status(self) -> dict[str, Any] | None:
121
+ try:
122
+ resp = self._get("/manager/queue/status", timeout=10)
123
+ if resp.status_code != 200:
124
+ return None
125
+ return resp.json()
126
+ except (requests.RequestException, ValueError):
127
+ return None
128
+
129
+ def manager_wait_for_queue(self, max_polls: int = 60, interval: float = 3.0) -> bool:
130
+ for _ in range(max_polls):
131
+ time.sleep(interval)
132
+ status = self.manager_queue_status()
133
+ if status is None:
134
+ continue
135
+ total = status.get("total", 0)
136
+ done = status.get("done", 0)
137
+ if total > 0 and done >= total:
138
+ return True
139
+ return False
File without changes
@@ -0,0 +1,232 @@
1
+ """comfyui-skill deps check / install"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any
7
+
8
+ import typer
9
+
10
+ from ..client import ComfyUIClient
11
+ from ..config import get_base_dir, get_default_server_id, get_server, load_config
12
+ from ..output import output_error, output_event, output_result
13
+ from ..storage import get_workflow_data
14
+
15
+ app = typer.Typer()
16
+
17
+ # Known model loader node types and the input field that references a model file
18
+ MODEL_LOADER_MAP: dict[str, tuple[str, str]] = {
19
+ "CheckpointLoaderSimple": ("ckpt_name", "checkpoints"),
20
+ "CheckpointLoader": ("ckpt_name", "checkpoints"),
21
+ "LoraLoader": ("lora_name", "loras"),
22
+ "LoraLoaderModelOnly": ("lora_name", "loras"),
23
+ "VAELoader": ("vae_name", "vae"),
24
+ "ControlNetLoader": ("control_net_name", "controlnet"),
25
+ "CLIPLoader": ("clip_name", "text_encoders"),
26
+ "UNETLoader": ("unet_name", "diffusion_models"),
27
+ "unCLIPCheckpointLoader": ("ckpt_name", "checkpoints"),
28
+ "StyleModelLoader": ("style_model_name", "style_models"),
29
+ "CLIPVisionLoader": ("clip_name", "clip_vision"),
30
+ "UpscaleModelLoader": ("model_name", "upscale_models"),
31
+ "PhotoMakerLoader": ("photomaker_model_name", "photomaker"),
32
+ }
33
+
34
+
35
+ @app.command("check")
36
+ def deps_check(
37
+ ctx: typer.Context,
38
+ skill_id: str = typer.Argument(help="Skill ID: server_id/workflow_id"),
39
+ ):
40
+ """Check if a skill's dependencies (custom nodes and models) are installed."""
41
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
42
+ config = load_config(base_dir)
43
+
44
+ if "/" in skill_id:
45
+ server_id, workflow_id = skill_id.split("/", 1)
46
+ else:
47
+ server_id = ctx.obj.get("server") or get_default_server_id(config)
48
+ workflow_id = skill_id
49
+
50
+ server_config = get_server(config, server_id)
51
+ if not server_config:
52
+ output_error(ctx, "SERVER_NOT_FOUND", f'Server "{server_id}" not found.')
53
+ return
54
+
55
+ workflow_data = get_workflow_data(base_dir, server_id, workflow_id)
56
+ if not workflow_data:
57
+ output_error(ctx, "SKILL_NOT_FOUND", f'Skill "{skill_id}" not found.')
58
+ return
59
+
60
+ client = ComfyUIClient(
61
+ server_url=server_config.get("url", "http://127.0.0.1:8188"),
62
+ auth=server_config.get("auth", ""),
63
+ )
64
+
65
+ # Check server is reachable
66
+ health = client.check_health()
67
+ if health.get("status") != "online":
68
+ output_error(ctx, "SERVER_OFFLINE", f'Server "{server_id}" is offline.',
69
+ hint="Start ComfyUI first, then retry.")
70
+ return
71
+
72
+ # Get installed nodes
73
+ try:
74
+ object_info = client.get_object_info()
75
+ except Exception as exc:
76
+ output_error(ctx, "OBJECT_INFO_FAILED", f"Failed to query /object_info: {exc}")
77
+ return
78
+ installed_nodes = set(object_info.keys())
79
+
80
+ # Extract required nodes from workflow
81
+ required_nodes = set()
82
+ for node in workflow_data.values():
83
+ if isinstance(node, dict) and "class_type" in node:
84
+ required_nodes.add(node["class_type"])
85
+
86
+ # Find missing nodes
87
+ missing_nodes = []
88
+ for class_type in sorted(required_nodes - installed_nodes):
89
+ missing_nodes.append({
90
+ "class_type": class_type,
91
+ "can_auto_install": False,
92
+ })
93
+
94
+ # Check models
95
+ missing_models = _check_missing_models(client, workflow_data)
96
+
97
+ is_ready = len(missing_nodes) == 0 and len(missing_models) == 0
98
+
99
+ output_result(ctx, {
100
+ "is_ready": is_ready,
101
+ "missing_nodes": missing_nodes,
102
+ "missing_models": missing_models,
103
+ "total_nodes_required": len(required_nodes),
104
+ "total_nodes_installed": len(required_nodes) - len(missing_nodes),
105
+ })
106
+
107
+
108
+ @app.command("install")
109
+ def deps_install(
110
+ ctx: typer.Context,
111
+ skill_id: str = typer.Argument(help="Skill ID: server_id/workflow_id"),
112
+ repos: str = typer.Option("[]", "--repos", "-r", help="JSON array of git repo URLs to install"),
113
+ ):
114
+ """Install missing custom node packages via ComfyUI Manager."""
115
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
116
+ config = load_config(base_dir)
117
+
118
+ if "/" in skill_id:
119
+ server_id, workflow_id = skill_id.split("/", 1)
120
+ else:
121
+ server_id = ctx.obj.get("server") or get_default_server_id(config)
122
+ workflow_id = skill_id
123
+
124
+ server_config = get_server(config, server_id)
125
+ if not server_config:
126
+ output_error(ctx, "SERVER_NOT_FOUND", f'Server "{server_id}" not found.')
127
+ return
128
+
129
+ # Parse repos
130
+ try:
131
+ repo_urls = json.loads(repos)
132
+ if not isinstance(repo_urls, list):
133
+ output_error(ctx, "INVALID_ARGS", "--repos must be a JSON array of URLs")
134
+ return
135
+ except json.JSONDecodeError:
136
+ output_error(ctx, "INVALID_ARGS", "Invalid JSON for --repos")
137
+ return
138
+
139
+ if not repo_urls:
140
+ output_error(ctx, "INVALID_ARGS", "No repo URLs provided. Use --repos '[\"https://github.com/...\"]'")
141
+ return
142
+
143
+ client = ComfyUIClient(
144
+ server_url=server_config.get("url", "http://127.0.0.1:8188"),
145
+ auth=server_config.get("auth", ""),
146
+ )
147
+
148
+ # Start manager queue
149
+ if not client.manager_start_queue():
150
+ output_error(ctx, "MANAGER_UNAVAILABLE",
151
+ "ComfyUI Manager is not available on this server.",
152
+ hint="Install ComfyUI-Manager first: https://github.com/ltdrdata/ComfyUI-Manager")
153
+ return
154
+
155
+ results = []
156
+ needs_restart = False
157
+
158
+ for repo_url in repo_urls:
159
+ pkg_name = repo_url.rstrip("/").split("/")[-1].replace(".git", "")
160
+ output_event(ctx, "installing", package=pkg_name, source=repo_url)
161
+
162
+ install_result = client.manager_install_node(repo_url, pkg_name)
163
+
164
+ if install_result.get("success"):
165
+ # Wait for queue to finish
166
+ success = client.manager_wait_for_queue(max_polls=60, interval=3.0)
167
+ results.append({
168
+ "package": pkg_name,
169
+ "source": repo_url,
170
+ "success": success,
171
+ "method": "manager_queue",
172
+ "message": "installed via Manager" if success else "Manager queue timed out",
173
+ })
174
+ if success:
175
+ needs_restart = True
176
+ output_event(ctx, "installed", package=pkg_name, success=True)
177
+ else:
178
+ output_event(ctx, "installed", package=pkg_name, success=False, message="queue timed out")
179
+ else:
180
+ error_msg = install_result.get("error", "unknown error")
181
+ results.append({
182
+ "package": pkg_name,
183
+ "source": repo_url,
184
+ "success": False,
185
+ "message": error_msg,
186
+ })
187
+ output_event(ctx, "installed", package=pkg_name, success=False, message=error_msg)
188
+
189
+ output_result(ctx, {
190
+ "results": results,
191
+ "needs_restart": needs_restart,
192
+ "installed": sum(1 for r in results if r["success"]),
193
+ "failed": sum(1 for r in results if not r["success"]),
194
+ })
195
+
196
+
197
+ def _check_missing_models(client: ComfyUIClient, workflow_data: dict[str, Any]) -> list[dict[str, str]]:
198
+ """Check for missing model files referenced in the workflow."""
199
+ missing = []
200
+ checked_folders: dict[str, set[str]] = {}
201
+
202
+ for node_id, node in workflow_data.items():
203
+ if not isinstance(node, dict):
204
+ continue
205
+ class_type = node.get("class_type", "")
206
+ if class_type not in MODEL_LOADER_MAP:
207
+ continue
208
+
209
+ field_name, folder = MODEL_LOADER_MAP[class_type]
210
+ inputs = node.get("inputs", {})
211
+ model_filename = inputs.get(field_name)
212
+
213
+ if not model_filename or not isinstance(model_filename, str):
214
+ continue
215
+
216
+ # Cache the model list per folder
217
+ if folder not in checked_folders:
218
+ try:
219
+ models = client.get_models(folder)
220
+ checked_folders[folder] = set(models) if isinstance(models, list) else set()
221
+ except Exception:
222
+ checked_folders[folder] = set()
223
+
224
+ if model_filename not in checked_folders[folder]:
225
+ missing.append({
226
+ "filename": model_filename,
227
+ "folder": folder,
228
+ "loader_node": class_type,
229
+ "node_id": str(node_id),
230
+ })
231
+
232
+ return missing
@@ -0,0 +1,273 @@
1
+ """comfy-skills run / submit / status"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import copy
6
+ import json
7
+ import os
8
+ import time
9
+ import uuid
10
+ from typing import Any
11
+
12
+ import typer
13
+
14
+ from ..client import ComfyUIClient
15
+ from ..config import get_base_dir, get_default_server_id, get_server, load_config
16
+ from ..output import OutputFormat, get_output_format, is_machine_mode, output_error, output_event, output_result
17
+ from ..storage import get_schema, get_workflow_data
18
+
19
+ _POLL_INITIAL = 1.0
20
+ _POLL_MAX = 10.0
21
+ _POLL_FACTOR = 1.5
22
+
23
+
24
+ def run_cmd(
25
+ ctx: typer.Context,
26
+ skill_id: str = typer.Argument(help="Skill ID: server_id/workflow_id or workflow_id"),
27
+ args: str = typer.Option("{}", "--args", "-a", help="JSON parameters"),
28
+ ):
29
+ """Execute a skill (blocking — waits for completion)."""
30
+ base_dir, server_id, workflow_id = _resolve_skill(ctx, skill_id)
31
+ client, schema_data, workflow_data = _prepare(ctx, base_dir, server_id, workflow_id)
32
+
33
+ input_args = _parse_args(ctx, args)
34
+ parameters = _get_parameters(schema_data)
35
+
36
+ # Inject parameters into workflow
37
+ workflow = _inject_params(workflow_data, parameters, input_args)
38
+
39
+ # Submit
40
+ try:
41
+ result = client.queue_prompt(workflow)
42
+ except Exception as exc:
43
+ output_error(ctx, "SUBMIT_FAILED", f"Failed to submit workflow: {exc}")
44
+ return
45
+
46
+ prompt_id = result.get("prompt_id", "")
47
+ fmt = get_output_format(ctx)
48
+
49
+ # stream-json: emit events as they happen
50
+ output_event(ctx, "queued", prompt_id=prompt_id)
51
+
52
+ # Rich progress for text mode
53
+ if fmt == OutputFormat.TEXT:
54
+ from rich.console import Console
55
+ console = Console(stderr=True)
56
+ console.print(f"[dim]Queued: {prompt_id}[/dim]")
57
+
58
+ # Poll until complete
59
+ poll_interval = _POLL_INITIAL
60
+ prev_status = ""
61
+ while True:
62
+ history = client.get_history(prompt_id)
63
+ if history:
64
+ outputs = history.get("outputs", {})
65
+ status_info = history.get("status", {})
66
+
67
+ if status_info.get("completed", False) or outputs:
68
+ images = _collect_outputs(outputs)
69
+ output_event(ctx, "completed", prompt_id=prompt_id, outputs=images)
70
+
71
+ # Final result — json and stream-json both get this
72
+ if fmt == OutputFormat.STREAM_JSON:
73
+ return
74
+ output_result(ctx, {
75
+ "status": "success",
76
+ "prompt_id": prompt_id,
77
+ "outputs": images,
78
+ })
79
+ return
80
+
81
+ if status_info.get("status_str") == "error":
82
+ error_msg = _format_errors(history)
83
+ output_event(ctx, "error", prompt_id=prompt_id, message=error_msg)
84
+ output_error(ctx, "EXECUTION_FAILED", error_msg)
85
+ return
86
+
87
+ # Check queue position
88
+ queue = client.get_queue()
89
+ current_status = ""
90
+ for item in queue.get("queue_running", []):
91
+ if len(item) > 1 and item[1] == prompt_id:
92
+ current_status = "running"
93
+ break
94
+ if not current_status:
95
+ for i, item in enumerate(queue.get("queue_pending", [])):
96
+ if len(item) > 1 and item[1] == prompt_id:
97
+ current_status = f"queued:{i}"
98
+ break
99
+
100
+ if current_status and current_status != prev_status:
101
+ if current_status == "running":
102
+ output_event(ctx, "running", prompt_id=prompt_id)
103
+ if fmt == OutputFormat.TEXT:
104
+ console.print("[yellow]Running...[/yellow]")
105
+ elif current_status.startswith("queued:"):
106
+ pos = current_status.split(":")[1]
107
+ output_event(ctx, "queued", prompt_id=prompt_id, position=int(pos))
108
+ prev_status = current_status
109
+
110
+ time.sleep(poll_interval)
111
+ poll_interval = min(poll_interval * _POLL_FACTOR, _POLL_MAX)
112
+
113
+
114
+ def submit_cmd(
115
+ ctx: typer.Context,
116
+ skill_id: str = typer.Argument(help="Skill ID: server_id/workflow_id or workflow_id"),
117
+ args: str = typer.Option("{}", "--args", "-a", help="JSON parameters"),
118
+ ):
119
+ """Submit a skill for execution (non-blocking — returns immediately)."""
120
+ base_dir, server_id, workflow_id = _resolve_skill(ctx, skill_id)
121
+ client, schema_data, workflow_data = _prepare(ctx, base_dir, server_id, workflow_id)
122
+
123
+ input_args = _parse_args(ctx, args)
124
+ parameters = _get_parameters(schema_data)
125
+ workflow = _inject_params(workflow_data, parameters, input_args)
126
+
127
+ try:
128
+ result = client.queue_prompt(workflow)
129
+ except Exception as exc:
130
+ output_error(ctx, "SUBMIT_FAILED", f"Failed to submit workflow: {exc}")
131
+ return
132
+
133
+ output_result(ctx, {
134
+ "status": "submitted",
135
+ "prompt_id": result.get("prompt_id", ""),
136
+ })
137
+
138
+
139
+ def status_cmd(
140
+ ctx: typer.Context,
141
+ prompt_id: str = typer.Argument(help="Prompt ID from submit"),
142
+ ):
143
+ """Check execution status of a submitted workflow."""
144
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
145
+ config = load_config(base_dir)
146
+ server_id = ctx.obj.get("server") or get_default_server_id(config)
147
+ server_config = get_server(config, server_id)
148
+
149
+ if not server_config:
150
+ output_error(ctx, "SERVER_NOT_FOUND", f'Server "{server_id}" not found.')
151
+ return
152
+
153
+ client = _build_client(server_config)
154
+ history = client.get_history(prompt_id)
155
+
156
+ if history:
157
+ status_info = history.get("status", {})
158
+ outputs = history.get("outputs", {})
159
+ if status_info.get("completed", False) or outputs:
160
+ images = _collect_outputs(outputs)
161
+ output_result(ctx, {"status": "success", "prompt_id": prompt_id, "outputs": images})
162
+ return
163
+ if status_info.get("status_str") == "error":
164
+ output_result(ctx, {"status": "error", "prompt_id": prompt_id, "error": _format_errors(history)})
165
+ return
166
+
167
+ # Check queue
168
+ queue = client.get_queue()
169
+ for item in queue.get("queue_running", []):
170
+ if item[1] == prompt_id:
171
+ output_result(ctx, {"status": "running", "prompt_id": prompt_id})
172
+ return
173
+ for i, item in enumerate(queue.get("queue_pending", [])):
174
+ if item[1] == prompt_id:
175
+ output_result(ctx, {"status": "queued", "prompt_id": prompt_id, "position": i})
176
+ return
177
+
178
+ output_result(ctx, {"status": "not_found", "prompt_id": prompt_id})
179
+
180
+
181
+ # -- Helpers --
182
+
183
+ def _resolve_skill(ctx: typer.Context, skill_id: str) -> tuple[Any, str, str]:
184
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
185
+ if "/" in skill_id:
186
+ server_id, workflow_id = skill_id.split("/", 1)
187
+ else:
188
+ config = load_config(base_dir)
189
+ server_id = ctx.obj.get("server") or get_default_server_id(config)
190
+ workflow_id = skill_id
191
+ return base_dir, server_id, workflow_id
192
+
193
+
194
+ def _prepare(ctx: typer.Context, base_dir: Any, server_id: str, workflow_id: str):
195
+ config = load_config(base_dir)
196
+ server_config = get_server(config, server_id)
197
+ if not server_config:
198
+ output_error(ctx, "SERVER_NOT_FOUND", f'Server "{server_id}" not found.')
199
+
200
+ schema_data = get_schema(base_dir, server_id, workflow_id)
201
+ workflow_data = get_workflow_data(base_dir, server_id, workflow_id)
202
+ if not workflow_data:
203
+ output_error(ctx, "SKILL_NOT_FOUND", f'Skill "{server_id}/{workflow_id}" not found.')
204
+
205
+ client = _build_client(server_config)
206
+ return client, schema_data or {}, workflow_data
207
+
208
+
209
+ def _build_client(server_config: dict[str, Any]) -> ComfyUIClient:
210
+ return ComfyUIClient(
211
+ server_url=server_config.get("url", "http://127.0.0.1:8188"),
212
+ auth=server_config.get("auth", ""),
213
+ comfy_api_key=server_config.get("comfy_api_key", ""),
214
+ )
215
+
216
+
217
+ def _parse_args(ctx: typer.Context, args_str: str) -> dict[str, Any]:
218
+ try:
219
+ return json.loads(args_str)
220
+ except json.JSONDecodeError as exc:
221
+ output_error(ctx, "INVALID_ARGS", f"Invalid JSON in --args: {exc}")
222
+ return {}
223
+
224
+
225
+ def _get_parameters(schema_data: dict[str, Any]) -> dict[str, Any]:
226
+ parameters = dict(schema_data.get("parameters", {}))
227
+ ui_parameters = schema_data.get("ui_parameters", {})
228
+ if ui_parameters:
229
+ for key, ui_param in ui_parameters.items():
230
+ name = ui_param.get("name", key)
231
+ if name not in parameters and ui_param.get("exposed", False):
232
+ parameters[name] = ui_param
233
+ return parameters
234
+
235
+
236
+ def _inject_params(
237
+ workflow_data: dict[str, Any],
238
+ parameters: dict[str, Any],
239
+ args: dict[str, Any],
240
+ ) -> dict[str, Any]:
241
+ workflow = copy.deepcopy(workflow_data)
242
+ for key, value in args.items():
243
+ if key not in parameters:
244
+ continue
245
+ node_id = str(parameters[key].get("node_id", ""))
246
+ field = parameters[key].get("field", "")
247
+ if node_id in workflow and isinstance(workflow[node_id], dict) and "inputs" in workflow[node_id]:
248
+ workflow[node_id]["inputs"][field] = value
249
+ return workflow
250
+
251
+
252
+ def _collect_outputs(outputs: dict[str, Any]) -> list[dict[str, str]]:
253
+ images = []
254
+ for node_output in outputs.values():
255
+ if not isinstance(node_output, dict):
256
+ continue
257
+ for img in node_output.get("images", []):
258
+ images.append({
259
+ "filename": img.get("filename", ""),
260
+ "subfolder": img.get("subfolder", ""),
261
+ "type": img.get("type", "output"),
262
+ })
263
+ return images
264
+
265
+
266
+ def _format_errors(history: dict[str, Any]) -> str:
267
+ status_info = history.get("status", {})
268
+ messages = status_info.get("messages", [])
269
+ parts = []
270
+ for msg in messages:
271
+ if isinstance(msg, list) and len(msg) >= 2:
272
+ parts.append(str(msg[1]))
273
+ return "; ".join(parts) if parts else "Workflow execution failed"
@@ -0,0 +1,57 @@
1
+ """comfy-skills server list / status"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import typer
6
+
7
+ from ..client import ComfyUIClient
8
+ from ..config import get_base_dir, get_default_server_id, get_server, get_servers, load_config
9
+ from ..output import output_error, output_result
10
+
11
+ app = typer.Typer()
12
+
13
+
14
+ @app.command("list")
15
+ def server_list(ctx: typer.Context):
16
+ """List all configured servers."""
17
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
18
+ config = load_config(base_dir)
19
+ servers = get_servers(config)
20
+ result = [
21
+ {
22
+ "id": s.get("id", ""),
23
+ "name": s.get("name", ""),
24
+ "url": s.get("url", ""),
25
+ "enabled": s.get("enabled", True),
26
+ }
27
+ for s in servers
28
+ ]
29
+ output_result(ctx, result)
30
+
31
+
32
+ @app.command("status")
33
+ def server_status(
34
+ ctx: typer.Context,
35
+ server_id: str = typer.Argument("", help="Server ID (default: default server)"),
36
+ ):
37
+ """Check if a ComfyUI server is online."""
38
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
39
+ config = load_config(base_dir)
40
+ sid = server_id or ctx.obj.get("server") or get_default_server_id(config)
41
+ server_config = get_server(config, sid)
42
+
43
+ if not server_config:
44
+ output_error(ctx, "SERVER_NOT_FOUND", f'Server "{sid}" not found.',
45
+ hint="Run `comfy-skills server list` to see configured servers.")
46
+ return
47
+
48
+ client = ComfyUIClient(
49
+ server_url=server_config.get("url", "http://127.0.0.1:8188"),
50
+ auth=server_config.get("auth", ""),
51
+ )
52
+ health = client.check_health()
53
+ output_result(ctx, {
54
+ "server_id": sid,
55
+ "url": server_config.get("url", ""),
56
+ **health,
57
+ })
@@ -0,0 +1,54 @@
1
+ """comfy-skills skill list / info"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import typer
6
+
7
+ from ..config import get_base_dir, get_default_server_id, get_servers, load_config
8
+ from ..output import output_error, output_result
9
+ from ..storage import get_workflow_detail, list_workflows
10
+
11
+ app = typer.Typer()
12
+
13
+
14
+ @app.command("list")
15
+ def skill_list(ctx: typer.Context):
16
+ """List all available skills."""
17
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
18
+ config = load_config(base_dir)
19
+ server_id = ctx.obj.get("server") or ""
20
+
21
+ all_skills = []
22
+ if server_id:
23
+ all_skills = list_workflows(base_dir, server_id)
24
+ else:
25
+ for s in get_servers(config):
26
+ if s.get("enabled", True):
27
+ all_skills.extend(list_workflows(base_dir, s["id"]))
28
+
29
+ output_result(ctx, all_skills)
30
+
31
+
32
+ @app.command("info")
33
+ def skill_info(
34
+ ctx: typer.Context,
35
+ skill_id: str = typer.Argument(help="Skill ID in format: server_id/workflow_id"),
36
+ ):
37
+ """Show skill details including parameter schema."""
38
+ base_dir = get_base_dir(ctx.obj.get("base_dir", ""))
39
+
40
+ if "/" in skill_id:
41
+ server_id, workflow_id = skill_id.split("/", 1)
42
+ else:
43
+ config = load_config(base_dir)
44
+ server_id = ctx.obj.get("server") or get_default_server_id(config)
45
+ workflow_id = skill_id
46
+
47
+ detail = get_workflow_detail(base_dir, server_id, workflow_id)
48
+ if detail is None:
49
+ output_error(ctx, "SKILL_NOT_FOUND", f'Skill "{skill_id}" not found.',
50
+ hint="Run `comfy-skills skill list` to see available skills.")
51
+
52
+ # Don't include full workflow_data in info output (too large)
53
+ detail.pop("workflow_data", None)
54
+ output_result(ctx, detail)
@@ -0,0 +1,36 @@
1
+ """Read config.json and data/ from the current working directory."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+
10
+ def get_base_dir(override: str = "") -> Path:
11
+ if override:
12
+ return Path(override).resolve()
13
+ return Path.cwd()
14
+
15
+
16
+ def load_config(base_dir: Path) -> dict[str, Any]:
17
+ config_path = base_dir / "config.json"
18
+ if not config_path.exists():
19
+ return {"servers": []}
20
+ with open(config_path, encoding="utf-8") as f:
21
+ return json.load(f)
22
+
23
+
24
+ def get_servers(config: dict[str, Any]) -> list[dict[str, Any]]:
25
+ return [s for s in config.get("servers", []) if isinstance(s, dict)]
26
+
27
+
28
+ def get_server(config: dict[str, Any], server_id: str) -> dict[str, Any] | None:
29
+ for server in get_servers(config):
30
+ if server.get("id") == server_id:
31
+ return server
32
+ return None
33
+
34
+
35
+ def get_default_server_id(config: dict[str, Any]) -> str:
36
+ return config.get("default_server", "local")
@@ -0,0 +1,50 @@
1
+ """ComfyUI Skill CLI — Typer app with global options and subcommand registration."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import typer
6
+
7
+ from .commands import deps, run, server, skill
8
+
9
+ app = typer.Typer(
10
+ name="comfyui-skill",
11
+ help="ComfyUI Skill CLI — Agent-friendly workflow management",
12
+ no_args_is_help=True,
13
+ rich_markup_mode="rich",
14
+ )
15
+
16
+
17
+ @app.callback()
18
+ def main(
19
+ ctx: typer.Context,
20
+ json_output: bool = typer.Option(False, "--json", "-j", help="JSON output (shortcut for --output-format json)"),
21
+ output_format: str = typer.Option("", "--output-format", help="Output format: text, json, stream-json"),
22
+ server_id: str = typer.Option("", "--server", "-s", help="Server ID"),
23
+ base_dir: str = typer.Option("", "--dir", "-d", help="Data directory (default: current directory)"),
24
+ verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
25
+ ):
26
+ ctx.ensure_object(dict)
27
+ ctx.obj["json"] = json_output
28
+ ctx.obj["output_format"] = output_format
29
+ ctx.obj["server"] = server_id
30
+ ctx.obj["base_dir"] = base_dir
31
+ ctx.obj["verbose"] = verbose
32
+
33
+
34
+ # Subcommand groups — each needs a callback to inherit parent context
35
+ for sub_app in [deps.app, server.app]:
36
+ @sub_app.callback()
37
+ def _pass_context(ctx: typer.Context):
38
+ if ctx.parent and ctx.parent.obj:
39
+ ctx.ensure_object(dict)
40
+ ctx.obj.update(ctx.parent.obj)
41
+
42
+ app.add_typer(deps.app, name="deps", help="Manage dependencies")
43
+ app.add_typer(server.app, name="server", help="Manage servers")
44
+
45
+ # Top-level commands
46
+ app.command("list")(skill.skill_list)
47
+ app.command("info")(skill.skill_info)
48
+ app.command("run")(run.run_cmd)
49
+ app.command("submit")(run.submit_cmd)
50
+ app.command("status")(run.status_cmd)
@@ -0,0 +1,85 @@
1
+ """Unified output formatting — the only place that handles text / JSON / stream-JSON."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import sys
7
+ from enum import Enum
8
+ from typing import Any
9
+
10
+ import typer
11
+ from rich.console import Console
12
+ from rich.table import Table
13
+
14
+
15
+ class OutputFormat(str, Enum):
16
+ TEXT = "text" # Rich tables, progress bars (human)
17
+ JSON = "json" # Single JSON result at end (agent, one-shot)
18
+ STREAM_JSON = "stream-json" # NDJSON events in real time (agent, streaming)
19
+
20
+
21
+ def get_output_format(ctx: typer.Context) -> OutputFormat:
22
+ if ctx.obj:
23
+ fmt = ctx.obj.get("output_format", "")
24
+ if fmt:
25
+ return OutputFormat(fmt)
26
+ if ctx.obj.get("json"):
27
+ return OutputFormat.JSON
28
+ if not sys.stdout.isatty():
29
+ return OutputFormat.JSON
30
+ return OutputFormat.TEXT
31
+
32
+
33
+ def is_machine_mode(ctx: typer.Context) -> bool:
34
+ return get_output_format(ctx) in (OutputFormat.JSON, OutputFormat.STREAM_JSON)
35
+
36
+
37
+ def output_result(ctx: typer.Context, data: Any) -> None:
38
+ fmt = get_output_format(ctx)
39
+ if fmt in (OutputFormat.JSON, OutputFormat.STREAM_JSON):
40
+ indent = 2 if sys.stdout.isatty() else None
41
+ json.dump(data, sys.stdout, ensure_ascii=False, indent=indent, default=str)
42
+ sys.stdout.write("\n")
43
+ else:
44
+ _print_rich(data)
45
+
46
+
47
+ def output_error(ctx: typer.Context, code: str, message: str, hint: str = "") -> None:
48
+ if is_machine_mode(ctx):
49
+ err: dict[str, Any] = {"error": {"code": code, "message": message}}
50
+ if hint:
51
+ err["error"]["hint"] = hint
52
+ json.dump(err, sys.stderr, ensure_ascii=False)
53
+ sys.stderr.write("\n")
54
+ else:
55
+ console = Console(stderr=True)
56
+ console.print(f"[red bold]Error:[/red bold] {message}")
57
+ if hint:
58
+ console.print(f"[dim]Hint: {hint}[/dim]")
59
+ raise typer.Exit(code=1)
60
+
61
+
62
+ def output_event(ctx: typer.Context, event_type: str, **data: Any) -> None:
63
+ """Emit a single NDJSON event line. Only outputs in stream-json mode."""
64
+ fmt = get_output_format(ctx)
65
+ if fmt == OutputFormat.STREAM_JSON:
66
+ json.dump({"event": event_type, **data}, sys.stdout, ensure_ascii=False, default=str)
67
+ sys.stdout.write("\n")
68
+ sys.stdout.flush()
69
+
70
+
71
+ def _print_rich(data: Any) -> None:
72
+ console = Console()
73
+ if isinstance(data, list) and data and isinstance(data[0], dict):
74
+ table = Table()
75
+ keys = list(data[0].keys())
76
+ for key in keys:
77
+ table.add_column(key)
78
+ for item in data:
79
+ table.add_row(*[str(item.get(k, "")) for k in keys])
80
+ console.print(table)
81
+ elif isinstance(data, dict):
82
+ for key, value in data.items():
83
+ console.print(f"[bold]{key}:[/bold] {value}")
84
+ else:
85
+ console.print(data)
@@ -0,0 +1,104 @@
1
+ """Read workflow and schema data from the data/ directory."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+
10
+ def list_workflows(base_dir: Path, server_id: str) -> list[dict[str, Any]]:
11
+ data_dir = base_dir / "data" / server_id
12
+ if not data_dir.exists():
13
+ return []
14
+
15
+ workflows = []
16
+ for workflow_dir in sorted(data_dir.iterdir()):
17
+ if not workflow_dir.is_dir():
18
+ continue
19
+ schema_path = workflow_dir / "schema.json"
20
+ if not schema_path.exists():
21
+ continue
22
+ schema = _load_json(schema_path)
23
+ workflows.append({
24
+ "workflow_id": workflow_dir.name,
25
+ "server_id": server_id,
26
+ "description": schema.get("description", ""),
27
+ "enabled": schema.get("enabled", True),
28
+ "parameters": _summarize_params(schema),
29
+ })
30
+ return workflows
31
+
32
+
33
+ def get_workflow_detail(base_dir: Path, server_id: str, workflow_id: str) -> dict[str, Any] | None:
34
+ workflow_dir = base_dir / "data" / server_id / workflow_id
35
+ if not workflow_dir.is_dir():
36
+ return None
37
+
38
+ schema = _load_json(workflow_dir / "schema.json") if (workflow_dir / "schema.json").exists() else {}
39
+ workflow_data = _load_json(workflow_dir / "workflow.json") if (workflow_dir / "workflow.json").exists() else {}
40
+
41
+ parameters = schema.get("parameters", {})
42
+ ui_parameters = schema.get("ui_parameters", {})
43
+ merged = _merge_parameters(parameters, ui_parameters)
44
+
45
+ return {
46
+ "workflow_id": workflow_id,
47
+ "server_id": server_id,
48
+ "description": schema.get("description", ""),
49
+ "enabled": schema.get("enabled", True),
50
+ "parameters": merged,
51
+ "workflow_data": workflow_data,
52
+ }
53
+
54
+
55
+ def get_workflow_data(base_dir: Path, server_id: str, workflow_id: str) -> dict[str, Any] | None:
56
+ path = base_dir / "data" / server_id / workflow_id / "workflow.json"
57
+ if not path.exists():
58
+ return None
59
+ return _load_json(path)
60
+
61
+
62
+ def get_schema(base_dir: Path, server_id: str, workflow_id: str) -> dict[str, Any] | None:
63
+ path = base_dir / "data" / server_id / workflow_id / "schema.json"
64
+ if not path.exists():
65
+ return None
66
+ return _load_json(path)
67
+
68
+
69
+ def _load_json(path: Path) -> dict[str, Any]:
70
+ with open(path, encoding="utf-8") as f:
71
+ return json.load(f)
72
+
73
+
74
+ def _summarize_params(schema: dict[str, Any]) -> dict[str, Any]:
75
+ parameters = schema.get("parameters", {})
76
+ ui_parameters = schema.get("ui_parameters", {})
77
+ merged = _merge_parameters(parameters, ui_parameters)
78
+ return {
79
+ name: {
80
+ "type": p.get("type", "string"),
81
+ "required": p.get("required", False),
82
+ "description": p.get("description", ""),
83
+ }
84
+ for name, p in merged.items()
85
+ if p.get("exposed", True)
86
+ }
87
+
88
+
89
+ def _merge_parameters(
90
+ parameters: dict[str, Any],
91
+ ui_parameters: dict[str, Any],
92
+ ) -> dict[str, Any]:
93
+ if not ui_parameters:
94
+ return parameters
95
+ merged = dict(parameters)
96
+ for key, ui_param in ui_parameters.items():
97
+ name = ui_param.get("name", key)
98
+ if name in merged:
99
+ for field in ("type", "required", "description", "default"):
100
+ if field in ui_param and ui_param[field]:
101
+ merged[name][field] = ui_param[field]
102
+ elif ui_param.get("exposed", False):
103
+ merged[name] = ui_param
104
+ return merged