comfy-runtime 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,165 @@
1
+ Metadata-Version: 2.4
2
+ Name: comfy-runtime
3
+ Version: 0.1.0
4
+ Summary: Minimal ComfyUI runtime for deploying custom nodes as microservices
5
+ Author: ComfyUI Community
6
+ License: GPL-3.0
7
+ Project-URL: Repository, https://github.com/anyin233/comfy-runtime
8
+ Keywords: comfyui,runtime,nodes,microservice,inference
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Requires-Python: >=3.10
19
+ Description-Content-Type: text/markdown
20
+ Requires-Dist: torch
21
+ Requires-Dist: torchvision
22
+ Requires-Dist: torchaudio
23
+ Requires-Dist: numpy>=1.25.0
24
+ Requires-Dist: pillow
25
+ Requires-Dist: safetensors>=0.4.2
26
+ Requires-Dist: scipy
27
+ Requires-Dist: einops
28
+ Requires-Dist: transformers>=4.50.3
29
+ Requires-Dist: tokenizers>=0.13.3
30
+ Requires-Dist: sentencepiece
31
+ Requires-Dist: typing_extensions
32
+ Requires-Dist: tqdm
33
+ Requires-Dist: psutil
34
+ Requires-Dist: filelock
35
+ Requires-Dist: requests
36
+ Requires-Dist: torchsde
37
+ Requires-Dist: pyyaml
38
+ Requires-Dist: blake3
39
+ Requires-Dist: simpleeval>=1.0.0
40
+ Provides-Extra: dev
41
+ Requires-Dist: pytest>=7.0; extra == "dev"
42
+ Requires-Dist: pytest-asyncio; extra == "dev"
43
+
44
+ # comfy-runtime
45
+
46
+ Minimal ComfyUI Runtime for Custom Nodes
47
+
48
+ `comfy-runtime` is a minimal Python runtime for loading and executing ComfyUI nodes outside the full ComfyUI app. It is designed for packaging custom nodes into scripts, workers, and microservices. It is not a server, not a UI, and not a workflow engine.
49
+
50
+ ## Installation
51
+
52
+ ```bash
53
+ pip install comfy-runtime
54
+ # Optional: built-in extra nodes
55
+ pip install comfy-builtin-nodes
56
+ ```
57
+
58
+ ## Quick Start
59
+
60
+ ```python
61
+ import comfy_runtime
62
+
63
+ # Configure model paths (optional — has defaults)
64
+ comfy_runtime.configure(
65
+ models_dir="/path/to/models",
66
+ output_dir="/path/to/output",
67
+ )
68
+
69
+ # Execute a built-in node
70
+ result = comfy_runtime.execute_node(
71
+ "EmptyLatentImage",
72
+ width=512, height=512, batch_size=1,
73
+ )
74
+ latent = result[0]
75
+ print(f"Latent shape: {latent['samples'].shape}")
76
+
77
+ # Load extra nodes from a file
78
+ comfy_runtime.load_nodes_from_path("/path/to/my_custom_node.py")
79
+
80
+ # List all registered nodes
81
+ print(comfy_runtime.list_nodes())
82
+
83
+ # Get node info
84
+ info = comfy_runtime.get_node_info("EmptyLatentImage")
85
+ print(info["input_types"])
86
+ ```
87
+
88
+ ## API Reference
89
+
90
+ ### `configure(models_dir, output_dir, input_dir, temp_dir, vram_mode, device)`
91
+
92
+ Configures runtime directories and device flags. This must be called before importing `comfy.model_management`.
93
+
94
+ ### `execute_node(class_type, **kwargs) -> tuple`
95
+
96
+ Runs one registered node and returns its output tuple. Handles both V1 and V3 node styles transparently.
97
+
98
+ ### `create_node_instance(class_type) -> object`
99
+
100
+ Creates a reusable instance of a registered node class. Useful for stateful nodes.
101
+
102
+ ### `register_node(class_type, node_cls, display_name)`
103
+
104
+ Registers a node class manually under a given class type.
105
+
106
+ ### `load_nodes_from_path(path) -> list[str]`
107
+
108
+ Loads nodes from a `.py` file or a directory of Python files. Supports both V1 mapping based nodes and V3 entrypoint based nodes.
109
+
110
+ ### `list_nodes() -> list[str]`
111
+
112
+ Returns all registered node class names.
113
+
114
+ ### `get_node_info(class_type) -> dict`
115
+
116
+ Returns public metadata for a node, including input types, return types, category, and execution method.
117
+
118
+ ### `unregister_node(class_type)`
119
+
120
+ Removes a registered node from the runtime registry.
121
+
122
+ ### `get_config() -> dict`
123
+
124
+ Returns the current runtime configuration.
125
+
126
+ ### Exceptions
127
+
128
+ - `NodeNotFoundError`
129
+ - `NodeExecutionError`
130
+
131
+ ## Microservice Example
132
+
133
+ ```python
134
+ # Example: wrapping a node with FastAPI
135
+ from fastapi import FastAPI
136
+ import comfy_runtime
137
+
138
+ app = FastAPI()
139
+ comfy_runtime.configure(models_dir="/data/models", output_dir="/data/output")
140
+
141
+ @app.post("/generate")
142
+ def generate(width: int = 512, height: int = 512):
143
+ result = comfy_runtime.execute_node(
144
+ "EmptyLatentImage", width=width, height=height, batch_size=1,
145
+ )
146
+ return {"shape": list(result[0]["samples"].shape)}
147
+ ```
148
+
149
+ ## Memory Management
150
+
151
+ ```python
152
+ from comfy.model_management import soft_empty_cache, cleanup_models
153
+ soft_empty_cache() # Free unused VRAM
154
+ ```
155
+
156
+ ## Limitations
157
+
158
+ - Single-node execution only, no workflow graph
159
+ - Single-process only, no interrupt signaling across processes
160
+ - No built-in HTTP server, wrap it with FastAPI or gRPC yourself
161
+ - `configure()` must be called before importing `model_management`
162
+
163
+ ## License
164
+
165
+ GPL-3.0, same as ComfyUI
@@ -0,0 +1,122 @@
1
+ # comfy-runtime
2
+
3
+ Minimal ComfyUI Runtime for Custom Nodes
4
+
5
+ `comfy-runtime` is a minimal Python runtime for loading and executing ComfyUI nodes outside the full ComfyUI app. It is designed for packaging custom nodes into scripts, workers, and microservices. It is not a server, not a UI, and not a workflow engine.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ pip install comfy-runtime
11
+ # Optional: built-in extra nodes
12
+ pip install comfy-builtin-nodes
13
+ ```
14
+
15
+ ## Quick Start
16
+
17
+ ```python
18
+ import comfy_runtime
19
+
20
+ # Configure model paths (optional — has defaults)
21
+ comfy_runtime.configure(
22
+ models_dir="/path/to/models",
23
+ output_dir="/path/to/output",
24
+ )
25
+
26
+ # Execute a built-in node
27
+ result = comfy_runtime.execute_node(
28
+ "EmptyLatentImage",
29
+ width=512, height=512, batch_size=1,
30
+ )
31
+ latent = result[0]
32
+ print(f"Latent shape: {latent['samples'].shape}")
33
+
34
+ # Load extra nodes from a file
35
+ comfy_runtime.load_nodes_from_path("/path/to/my_custom_node.py")
36
+
37
+ # List all registered nodes
38
+ print(comfy_runtime.list_nodes())
39
+
40
+ # Get node info
41
+ info = comfy_runtime.get_node_info("EmptyLatentImage")
42
+ print(info["input_types"])
43
+ ```
44
+
45
+ ## API Reference
46
+
47
+ ### `configure(models_dir, output_dir, input_dir, temp_dir, vram_mode, device)`
48
+
49
+ Configures runtime directories and device flags. This must be called before importing `comfy.model_management`.
50
+
51
+ ### `execute_node(class_type, **kwargs) -> tuple`
52
+
53
+ Runs one registered node and returns its output tuple. Handles both V1 and V3 node styles transparently.
54
+
55
+ ### `create_node_instance(class_type) -> object`
56
+
57
+ Creates a reusable instance of a registered node class. Useful for stateful nodes.
58
+
59
+ ### `register_node(class_type, node_cls, display_name)`
60
+
61
+ Registers a node class manually under a given class type.
62
+
63
+ ### `load_nodes_from_path(path) -> list[str]`
64
+
65
+ Loads nodes from a `.py` file or a directory of Python files. Supports both V1 mapping based nodes and V3 entrypoint based nodes.
66
+
67
+ ### `list_nodes() -> list[str]`
68
+
69
+ Returns all registered node class names.
70
+
71
+ ### `get_node_info(class_type) -> dict`
72
+
73
+ Returns public metadata for a node, including input types, return types, category, and execution method.
74
+
75
+ ### `unregister_node(class_type)`
76
+
77
+ Removes a registered node from the runtime registry.
78
+
79
+ ### `get_config() -> dict`
80
+
81
+ Returns the current runtime configuration.
82
+
83
+ ### Exceptions
84
+
85
+ - `NodeNotFoundError`
86
+ - `NodeExecutionError`
87
+
88
+ ## Microservice Example
89
+
90
+ ```python
91
+ # Example: wrapping a node with FastAPI
92
+ from fastapi import FastAPI
93
+ import comfy_runtime
94
+
95
+ app = FastAPI()
96
+ comfy_runtime.configure(models_dir="/data/models", output_dir="/data/output")
97
+
98
+ @app.post("/generate")
99
+ def generate(width: int = 512, height: int = 512):
100
+ result = comfy_runtime.execute_node(
101
+ "EmptyLatentImage", width=width, height=height, batch_size=1,
102
+ )
103
+ return {"shape": list(result[0]["samples"].shape)}
104
+ ```
105
+
106
+ ## Memory Management
107
+
108
+ ```python
109
+ from comfy.model_management import soft_empty_cache, cleanup_models
110
+ soft_empty_cache() # Free unused VRAM
111
+ ```
112
+
113
+ ## Limitations
114
+
115
+ - Single-node execution only, no workflow graph
116
+ - Single-process only, no interrupt signaling across processes
117
+ - No built-in HTTP server, wrap it with FastAPI or gRPC yourself
118
+ - `configure()` must be called before importing `model_management`
119
+
120
+ ## License
121
+
122
+ GPL-3.0, same as ComfyUI
@@ -0,0 +1,22 @@
1
+ __version__ = "0.1.0"
2
+
3
+ from comfy_runtime.bootstrap import bootstrap
4
+ from comfy_runtime.config import configure, get_config
5
+
6
+ bootstrap()
7
+
8
+ from comfy_runtime.executor import (
9
+ NodeExecutionError,
10
+ NodeNotFoundError,
11
+ create_node_instance,
12
+ execute_node,
13
+ get_node_class,
14
+ get_node_info,
15
+ list_nodes,
16
+ )
17
+ from comfy_runtime.registry import (
18
+ load_nodes_from_path,
19
+ register_node,
20
+ register_nodes,
21
+ unregister_node,
22
+ )
@@ -0,0 +1,166 @@
1
+ """Register all stub modules in sys.modules before any comfy imports.
2
+
3
+ Bootstrap order is critical — comfy_aimdo must come first because many
4
+ ComfyUI modules import it at the top level. The sequence is:
5
+
6
+ 1. comfy_aimdo stubs (7 import sites across ComfyUI)
7
+ 2. server stub (nodes_images.py imports PromptServer at module level)
8
+ 3. latent_preview stub (avoids torch/PIL/heavy-comfy imports)
9
+ 4. comfy.options shim (sets args_parsing = False so cli_args skips argparse)
10
+ 5. vendor shims (registers _vendor/ packages under short names in sys.modules)
11
+ """
12
+
13
+ import importlib
14
+ from importlib.machinery import ModuleSpec
15
+ import sys
16
+ import types
17
+
18
+
19
+ def bootstrap():
20
+ """Register all stubs in sys.modules BEFORE any comfy imports."""
21
+ from comfy_runtime.stubs.comfy_aimdo_stub import install_aimdo_stubs
22
+
23
+ install_aimdo_stubs()
24
+
25
+ import comfy_runtime.stubs.server_stub as server_module
26
+
27
+ sys.modules["server"] = server_module
28
+
29
+ import comfy_runtime.stubs.latent_preview_stub as latent_preview_module
30
+
31
+ sys.modules["latent_preview"] = latent_preview_module
32
+
33
+ try:
34
+ importlib.import_module("torchvision")
35
+ except Exception:
36
+
37
+ def _torchvision_unavailable(*args, **kwargs):
38
+ raise RuntimeError(
39
+ "torchvision is unavailable in this comfy_runtime environment"
40
+ )
41
+
42
+ torchvision_module = types.ModuleType("torchvision")
43
+ torchvision_models = types.ModuleType("torchvision.models")
44
+ torchvision_ops = types.ModuleType("torchvision.ops")
45
+ torchvision_transforms = types.ModuleType("torchvision.transforms")
46
+
47
+ setattr(
48
+ torchvision_module,
49
+ "__spec__",
50
+ ModuleSpec("torchvision", loader=None, is_package=True),
51
+ )
52
+ setattr(torchvision_module, "__path__", [])
53
+ setattr(
54
+ torchvision_models,
55
+ "__spec__",
56
+ ModuleSpec("torchvision.models", loader=None),
57
+ )
58
+ setattr(
59
+ torchvision_ops,
60
+ "__spec__",
61
+ ModuleSpec("torchvision.ops", loader=None),
62
+ )
63
+ setattr(
64
+ torchvision_transforms,
65
+ "__spec__",
66
+ ModuleSpec("torchvision.transforms", loader=None),
67
+ )
68
+
69
+ setattr(torchvision_models, "efficientnet_v2_s", _torchvision_unavailable)
70
+ setattr(torchvision_ops, "box_convert", _torchvision_unavailable)
71
+ setattr(torchvision_transforms, "Compose", _torchvision_unavailable)
72
+ setattr(torchvision_transforms, "Normalize", _torchvision_unavailable)
73
+
74
+ setattr(torchvision_module, "models", torchvision_models)
75
+ setattr(torchvision_module, "ops", torchvision_ops)
76
+ setattr(torchvision_module, "transforms", torchvision_transforms)
77
+
78
+ sys.modules["torchvision"] = torchvision_module
79
+ sys.modules["torchvision.models"] = torchvision_models
80
+ sys.modules["torchvision.ops"] = torchvision_ops
81
+ sys.modules["torchvision.transforms"] = torchvision_transforms
82
+
83
+ try:
84
+ importlib.import_module("av")
85
+ except Exception:
86
+
87
+ def _av_unavailable(*args, **kwargs):
88
+ raise RuntimeError("av is unavailable in this comfy_runtime environment")
89
+
90
+ av_module = types.ModuleType("av")
91
+ av_container = types.ModuleType("av.container")
92
+ av_subtitles = types.ModuleType("av.subtitles")
93
+ av_subtitles_stream = types.ModuleType("av.subtitles.stream")
94
+ input_container = type("InputContainer", (), {})
95
+ subtitle_stream = type("SubtitleStream", (), {})
96
+ video_stream = type("VideoStream", (), {})
97
+
98
+ setattr(av_module, "__spec__", ModuleSpec("av", loader=None, is_package=True))
99
+ setattr(av_module, "__path__", [])
100
+ setattr(av_container, "__spec__", ModuleSpec("av.container", loader=None))
101
+ setattr(
102
+ av_subtitles,
103
+ "__spec__",
104
+ ModuleSpec("av.subtitles", loader=None, is_package=True),
105
+ )
106
+ setattr(av_subtitles, "__path__", [])
107
+ setattr(
108
+ av_subtitles_stream,
109
+ "__spec__",
110
+ ModuleSpec("av.subtitles.stream", loader=None),
111
+ )
112
+
113
+ setattr(av_container, "InputContainer", input_container)
114
+ setattr(av_subtitles_stream, "SubtitleStream", subtitle_stream)
115
+ setattr(av_subtitles, "stream", av_subtitles_stream)
116
+ setattr(av_module, "container", av_container)
117
+ setattr(av_module, "subtitles", av_subtitles)
118
+ setattr(av_module, "VideoStream", video_stream)
119
+ setattr(av_module, "open", _av_unavailable)
120
+ setattr(av_module, "time_base", 1)
121
+
122
+ sys.modules["av"] = av_module
123
+ sys.modules["av.container"] = av_container
124
+ sys.modules["av.subtitles"] = av_subtitles
125
+ sys.modules["av.subtitles.stream"] = av_subtitles_stream
126
+
127
+ if "comfy" not in sys.modules:
128
+ try:
129
+ import comfy # noqa: F401 # type: ignore[import-not-found]
130
+ except ImportError:
131
+ comfy_parent = types.ModuleType("comfy")
132
+ comfy_parent.__path__ = []
133
+ sys.modules["comfy"] = comfy_parent
134
+
135
+ options_module = types.ModuleType("comfy.options")
136
+ options_module.args_parsing = False # type: ignore[attr-defined]
137
+ options_module.enable_args_parsing = lambda enable=True: None # type: ignore[attr-defined]
138
+ sys.modules["comfy.options"] = options_module
139
+ sys.modules["comfy"].options = options_module # type: ignore[attr-defined]
140
+
141
+ from comfy_runtime.shim import install_shims
142
+
143
+ try:
144
+ cli_args_mod = importlib.import_module("comfy_runtime._vendor.comfy.cli_args")
145
+ torch = importlib.import_module("torch")
146
+
147
+ if not torch.cuda.is_available():
148
+ cli_args_mod.args.cpu = True
149
+ sys.modules["comfy.cli_args"] = cli_args_mod
150
+ except Exception:
151
+ pass
152
+
153
+ try:
154
+ diffusionmodules_mod = importlib.import_module(
155
+ "comfy_runtime._vendor.comfy.ldm.modules.diffusionmodules"
156
+ )
157
+ diffusion_mmdit_mod = importlib.import_module(
158
+ "comfy_runtime._vendor.comfy.ldm.modules.diffusionmodules.mmdit"
159
+ )
160
+ setattr(diffusionmodules_mod, "mmdit", diffusion_mmdit_mod)
161
+ sys.modules["comfy.ldm.modules.diffusionmodules"] = diffusionmodules_mod
162
+ sys.modules["comfy.ldm.modules.diffusionmodules.mmdit"] = diffusion_mmdit_mod
163
+ except Exception:
164
+ pass
165
+
166
+ install_shims()
@@ -0,0 +1,105 @@
1
+ """Runtime configuration API for comfy-runtime."""
2
+
3
+
4
+ def configure(
5
+ models_dir=None,
6
+ output_dir=None,
7
+ input_dir=None,
8
+ temp_dir=None,
9
+ vram_mode=None,
10
+ device=None,
11
+ **kwargs,
12
+ ):
13
+ """Configure model paths and runtime settings.
14
+
15
+ Args:
16
+ models_dir: Base directory that contains Comfy model subdirectories.
17
+ output_dir: Output directory for generated files.
18
+ input_dir: Input directory for source files.
19
+ temp_dir: Temporary directory for intermediate files.
20
+ vram_mode: VRAM mode flag to enable on comfy.cli_args.args.
21
+ device: CUDA device index or ``"cpu"`` to force CPU mode.
22
+ **kwargs: Extra attributes to apply directly to comfy.cli_args.args.
23
+
24
+ Returns:
25
+ None.
26
+ """
27
+ from comfy.cli_args import args # type: ignore[import-not-found]
28
+
29
+ if vram_mode == "highvram":
30
+ args.highvram = True
31
+ elif vram_mode == "normalvram":
32
+ args.normalvram = True
33
+ elif vram_mode == "lowvram":
34
+ args.lowvram = True
35
+ elif vram_mode == "novram":
36
+ args.novram = True
37
+ elif vram_mode == "cpu":
38
+ args.cpu = True
39
+
40
+ if device is not None:
41
+ if isinstance(device, int):
42
+ args.cuda_device = device
43
+ elif device == "cpu":
44
+ args.cpu = True
45
+
46
+ for key, val in kwargs.items():
47
+ setattr(args, key, val)
48
+
49
+ import folder_paths # type: ignore[import-not-found]
50
+
51
+ if output_dir is not None:
52
+ folder_paths.output_directory = output_dir
53
+ if input_dir is not None:
54
+ folder_paths.input_directory = input_dir
55
+ if temp_dir is not None:
56
+ folder_paths.temp_directory = temp_dir
57
+
58
+ if models_dir is not None:
59
+ import os
60
+
61
+ model_categories = {
62
+ "checkpoints": {".ckpt", ".safetensors", ".pt", ".bin", ".pth"},
63
+ "configs": {".yaml", ".yml", ".json"},
64
+ "loras": {".safetensors", ".ckpt", ".pt", ".bin"},
65
+ "vae": {".safetensors", ".pt", ".bin", ".ckpt"},
66
+ "text_encoders": {".safetensors", ".pt", ".bin", ".ckpt"},
67
+ "diffusion_models": {".safetensors", ".pt", ".bin", ".ckpt"},
68
+ "clip_vision": {".safetensors", ".pt", ".bin", ".ckpt"},
69
+ "style_models": {".safetensors", ".pt", ".bin", ".ckpt"},
70
+ "embeddings": {".safetensors", ".pt", ".bin", ".ckpt"},
71
+ "controlnet": {".safetensors", ".pt", ".bin", ".ckpt"},
72
+ "unet": {".safetensors", ".pt", ".bin", ".ckpt"},
73
+ "upscale_models": {".safetensors", ".pt", ".bin", ".ckpt"},
74
+ }
75
+ for category, extensions in model_categories.items():
76
+ cat_dir = os.path.join(models_dir, category)
77
+ if category in folder_paths.folder_names_and_paths:
78
+ existing_paths, existing_exts = folder_paths.folder_names_and_paths[
79
+ category
80
+ ]
81
+ folder_paths.folder_names_and_paths[category] = (
82
+ [cat_dir] + list(existing_paths),
83
+ set(existing_exts) | extensions,
84
+ )
85
+ else:
86
+ folder_paths.folder_names_and_paths[category] = ([cat_dir], extensions)
87
+
88
+
89
+ def get_config():
90
+ """Return current configuration as a dictionary.
91
+
92
+ Returns:
93
+ dict: Selected runtime directories and device-related flags.
94
+ """
95
+ from comfy.cli_args import args # type: ignore[import-not-found]
96
+ import folder_paths # type: ignore[import-not-found]
97
+
98
+ return {
99
+ "output_directory": getattr(folder_paths, "output_directory", None),
100
+ "input_directory": getattr(folder_paths, "input_directory", None),
101
+ "temp_directory": getattr(folder_paths, "temp_directory", None),
102
+ "cpu": getattr(args, "cpu", False),
103
+ "highvram": getattr(args, "highvram", False),
104
+ "cuda_device": getattr(args, "cuda_device", 0),
105
+ }