comfy-env 0.0.65__py3-none-any.whl → 0.0.67__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfy_env/__init__.py +68 -122
- comfy_env/cli.py +74 -204
- comfy_env/config/__init__.py +19 -0
- comfy_env/config/parser.py +151 -0
- comfy_env/config/types.py +64 -0
- comfy_env/install.py +83 -361
- comfy_env/isolation/__init__.py +9 -0
- comfy_env/isolation/wrap.py +351 -0
- comfy_env/nodes.py +2 -2
- comfy_env/pixi/__init__.py +48 -0
- comfy_env/pixi/core.py +356 -0
- comfy_env/{resolver.py → pixi/resolver.py} +1 -14
- comfy_env/prestartup.py +60 -0
- comfy_env/templates/comfy-env-instructions.txt +30 -87
- comfy_env/templates/comfy-env.toml +69 -128
- comfy_env/workers/__init__.py +21 -32
- comfy_env/workers/base.py +1 -1
- comfy_env/workers/{torch_mp.py → mp.py} +47 -14
- comfy_env/workers/{venv.py → subprocess.py} +397 -443
- {comfy_env-0.0.65.dist-info → comfy_env-0.0.67.dist-info}/METADATA +23 -92
- comfy_env-0.0.67.dist-info/RECORD +32 -0
- comfy_env/decorator.py +0 -700
- comfy_env/env/__init__.py +0 -46
- comfy_env/env/config.py +0 -191
- comfy_env/env/config_file.py +0 -706
- comfy_env/env/manager.py +0 -636
- comfy_env/env/security.py +0 -267
- comfy_env/ipc/__init__.py +0 -55
- comfy_env/ipc/bridge.py +0 -476
- comfy_env/ipc/protocol.py +0 -265
- comfy_env/ipc/tensor.py +0 -371
- comfy_env/ipc/torch_bridge.py +0 -401
- comfy_env/ipc/transport.py +0 -318
- comfy_env/ipc/worker.py +0 -221
- comfy_env/isolation.py +0 -310
- comfy_env/pixi.py +0 -760
- comfy_env/registry.py +0 -130
- comfy_env/stub_imports.py +0 -270
- comfy_env/stubs/__init__.py +0 -1
- comfy_env/stubs/comfy/__init__.py +0 -6
- comfy_env/stubs/comfy/model_management.py +0 -58
- comfy_env/stubs/comfy/utils.py +0 -29
- comfy_env/stubs/folder_paths.py +0 -71
- comfy_env/wheel_sources.yml +0 -141
- comfy_env/workers/pool.py +0 -241
- comfy_env-0.0.65.dist-info/RECORD +0 -48
- /comfy_env/{env/cuda_gpu_detection.py → pixi/cuda_detection.py} +0 -0
- /comfy_env/{env → pixi}/platform/__init__.py +0 -0
- /comfy_env/{env → pixi}/platform/base.py +0 -0
- /comfy_env/{env → pixi}/platform/darwin.py +0 -0
- /comfy_env/{env → pixi}/platform/linux.py +0 -0
- /comfy_env/{env → pixi}/platform/windows.py +0 -0
- {comfy_env-0.0.65.dist-info → comfy_env-0.0.67.dist-info}/WHEEL +0 -0
- {comfy_env-0.0.65.dist-info → comfy_env-0.0.67.dist-info}/entry_points.txt +0 -0
- {comfy_env-0.0.65.dist-info → comfy_env-0.0.67.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,82 +1,85 @@
|
|
|
1
1
|
# =============================================================================
|
|
2
2
|
# comfy-env.toml - Environment configuration for ComfyUI custom nodes
|
|
3
|
-
# Documentation: https://github.com/PozzettiAndrea/comfy-env
|
|
4
3
|
# =============================================================================
|
|
5
4
|
#
|
|
6
|
-
#
|
|
7
|
-
#
|
|
8
|
-
#
|
|
9
|
-
#
|
|
5
|
+
# comfy-env.toml is a SUPERSET of pixi.toml. All pixi.toml syntax works here.
|
|
6
|
+
# We add custom sections: [cuda], [node_reqs], and top-level `python = "x.x"`.
|
|
7
|
+
#
|
|
8
|
+
# GPU is auto-detected to select the right CUDA version.
|
|
10
9
|
#
|
|
11
10
|
# Quick start:
|
|
12
11
|
# pip install comfy-env
|
|
13
12
|
# comfy-env init # Creates this template
|
|
14
13
|
# comfy-env install # Installs dependencies
|
|
15
|
-
# comfy-env doctor # Verifies installation
|
|
16
14
|
|
|
17
15
|
|
|
18
16
|
# =============================================================================
|
|
19
|
-
#
|
|
17
|
+
# PYTHON VERSION (optional)
|
|
20
18
|
# =============================================================================
|
|
21
|
-
#
|
|
22
|
-
#
|
|
19
|
+
# Specify Python version for isolated environments (e.g., bpy needs Python 3.11)
|
|
20
|
+
# Omit this to use the host Python version.
|
|
23
21
|
|
|
24
|
-
|
|
25
|
-
linux = []
|
|
26
|
-
# Examples:
|
|
27
|
-
# linux = ["libgl1", "libopengl0"] # For OpenGL rendering (nvdiffrast)
|
|
28
|
-
# linux = ["python3-dev"] # For packages that compile C extensions
|
|
29
|
-
# linux = ["ffmpeg"] # For video processing nodes
|
|
22
|
+
# python = "3.11"
|
|
30
23
|
|
|
31
24
|
|
|
32
25
|
# =============================================================================
|
|
33
|
-
# CUDA PACKAGES
|
|
26
|
+
# CUDA PACKAGES (custom section)
|
|
34
27
|
# =============================================================================
|
|
35
|
-
# CUDA
|
|
36
|
-
#
|
|
37
|
-
#
|
|
28
|
+
# Pre-compiled CUDA wheels from the cuda-wheels index.
|
|
29
|
+
# Pixi auto-selects the right wheel based on your GPU.
|
|
30
|
+
# Supports CUDA 12.8 → PyTorch 2.8 or CUDA 12.4 → PyTorch 2.4
|
|
38
31
|
|
|
39
32
|
[cuda]
|
|
40
|
-
|
|
41
|
-
#
|
|
42
|
-
#
|
|
43
|
-
#
|
|
44
|
-
#
|
|
45
|
-
#
|
|
46
|
-
#
|
|
47
|
-
#
|
|
48
|
-
#
|
|
49
|
-
# dpvo_cuda = "0.0.0" # DPVO CUDA extensions
|
|
33
|
+
packages = []
|
|
34
|
+
# Examples:
|
|
35
|
+
# packages = [
|
|
36
|
+
# "nvdiffrast", # NVIDIA differentiable rasterizer
|
|
37
|
+
# "pytorch3d", # PyTorch3D - 3D deep learning
|
|
38
|
+
# "gsplat", # Gaussian splatting
|
|
39
|
+
# "cumesh", # GPU mesh processing
|
|
40
|
+
# "flash-attn", # Flash Attention (Linux only)
|
|
41
|
+
# ]
|
|
50
42
|
|
|
51
43
|
|
|
52
44
|
# =============================================================================
|
|
53
|
-
#
|
|
45
|
+
# CONDA PACKAGES (pixi-native)
|
|
54
46
|
# =============================================================================
|
|
55
|
-
#
|
|
47
|
+
# Conda packages from conda-forge (includes system libraries).
|
|
48
|
+
# Use this instead of apt/brew - works cross-platform.
|
|
56
49
|
|
|
57
|
-
[
|
|
58
|
-
|
|
59
|
-
#
|
|
60
|
-
#
|
|
61
|
-
#
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
#
|
|
65
|
-
#
|
|
50
|
+
[dependencies]
|
|
51
|
+
# mesalib = "*" # OpenGL (replaces libgl1)
|
|
52
|
+
# libglu = "*" # GLU library
|
|
53
|
+
# ffmpeg = "*" # Video processing
|
|
54
|
+
# cgal = "*" # Computational geometry
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# =============================================================================
|
|
58
|
+
# PIP PACKAGES (pixi-native)
|
|
59
|
+
# =============================================================================
|
|
60
|
+
# Regular pip packages.
|
|
61
|
+
|
|
62
|
+
[pypi-dependencies]
|
|
63
|
+
# numpy = ">=1.21.0,<2"
|
|
64
|
+
# trimesh = { version = ">=4.0.0", extras = ["easy"] }
|
|
65
|
+
# pillow = "*"
|
|
66
66
|
|
|
67
67
|
|
|
68
68
|
# =============================================================================
|
|
69
|
-
#
|
|
69
|
+
# PLATFORM-SPECIFIC PACKAGES (pixi-native)
|
|
70
70
|
# =============================================================================
|
|
71
|
-
#
|
|
72
|
-
|
|
71
|
+
# Use pixi's target syntax for platform-specific packages.
|
|
72
|
+
|
|
73
|
+
# [target.linux-64.pypi-dependencies]
|
|
74
|
+
# embreex = "*"
|
|
73
75
|
|
|
74
|
-
[
|
|
75
|
-
#
|
|
76
|
+
# [target.win-64.pypi-dependencies]
|
|
77
|
+
# msvc-runtime = "*"
|
|
78
|
+
# embreex = "*"
|
|
76
79
|
|
|
77
80
|
|
|
78
81
|
# =============================================================================
|
|
79
|
-
# NODE DEPENDENCIES (
|
|
82
|
+
# NODE DEPENDENCIES (custom section)
|
|
80
83
|
# =============================================================================
|
|
81
84
|
# Other ComfyUI custom nodes this node depends on.
|
|
82
85
|
|
|
@@ -85,93 +88,31 @@ requirements = []
|
|
|
85
88
|
# ComfyUI-Impact-Pack = "ltdrdata/ComfyUI-Impact-Pack"
|
|
86
89
|
|
|
87
90
|
|
|
88
|
-
# #############################################################################
|
|
89
|
-
#
|
|
90
|
-
# PROCESS ISOLATION (ADVANCED)
|
|
91
|
-
#
|
|
92
|
-
# #############################################################################
|
|
93
|
-
#
|
|
94
|
-
# For nodes that need completely isolated dependencies (different Python
|
|
95
|
-
# version, conda packages, conflicting native libraries), define an isolated
|
|
96
|
-
# environment with `isolated = true`.
|
|
97
|
-
#
|
|
98
|
-
# RECOMMENDED: Pack-wide isolation (all nodes in one environment)
|
|
99
|
-
# ----------------------------------------------------------------
|
|
100
|
-
# This is the simplest approach - all your nodes run in the same isolated env.
|
|
101
|
-
#
|
|
102
|
-
# Step 1: Define environment in comfy-env.toml (this file)
|
|
103
|
-
# Step 2: In __init__.py:
|
|
104
|
-
#
|
|
105
|
-
# from comfy_env import setup_isolated_imports, enable_isolation
|
|
106
|
-
#
|
|
107
|
-
# # Setup import stubs BEFORE importing nodes
|
|
108
|
-
# setup_isolated_imports(__file__)
|
|
109
|
-
#
|
|
110
|
-
# from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
|
|
111
|
-
#
|
|
112
|
-
# # Enable isolation for all nodes
|
|
113
|
-
# enable_isolation(NODE_CLASS_MAPPINGS)
|
|
114
|
-
#
|
|
115
91
|
# =============================================================================
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
# -----------------------------------------------------------------------------
|
|
119
|
-
# Example: Full pack isolation with conda packages (RECOMMENDED)
|
|
120
|
-
# -----------------------------------------------------------------------------
|
|
121
|
-
# Uses pixi to create an isolated environment with conda + pip packages.
|
|
122
|
-
|
|
123
|
-
# [mypack]
|
|
124
|
-
# python = "3.11"
|
|
125
|
-
# isolated = true # Required for enable_isolation()
|
|
126
|
-
#
|
|
127
|
-
# [mypack.conda]
|
|
128
|
-
# channels = ["conda-forge"]
|
|
129
|
-
# packages = ["cgal", "openmesh"]
|
|
130
|
-
#
|
|
131
|
-
# [mypack.packages]
|
|
132
|
-
# requirements = ["trimesh[easy]>=4.0", "numpy", "scipy"]
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
# -----------------------------------------------------------------------------
|
|
136
|
-
# Example: Multiple isolated environments (per-node control)
|
|
137
|
-
# -----------------------------------------------------------------------------
|
|
138
|
-
# Use @isolated(env="envname") decorator when different nodes need different envs.
|
|
139
|
-
#
|
|
140
|
-
# from comfy_env import isolated
|
|
92
|
+
# PROCESS ISOLATION
|
|
93
|
+
# =============================================================================
|
|
141
94
|
#
|
|
142
|
-
#
|
|
143
|
-
# class PreprocessNode: ...
|
|
95
|
+
# For nodes needing isolated environments, put comfy-env.toml in a subdirectory:
|
|
144
96
|
#
|
|
145
|
-
#
|
|
146
|
-
#
|
|
147
|
-
|
|
148
|
-
#
|
|
149
|
-
#
|
|
97
|
+
# ComfyUI-MyPack/
|
|
98
|
+
# ├── comfy-env.toml # Root: installs to main env
|
|
99
|
+
# ├── __init__.py
|
|
100
|
+
# └── nodes/
|
|
101
|
+
# └── isolated/
|
|
102
|
+
# ├── comfy-env.toml # Subdirectory: isolated env
|
|
103
|
+
# └── *.py
|
|
150
104
|
#
|
|
151
|
-
#
|
|
152
|
-
# requirements = ["opencv-python-headless", "pillow"]
|
|
153
|
-
|
|
154
|
-
# [env-inference]
|
|
155
|
-
# python = "3.10"
|
|
105
|
+
# In your nodes/__init__.py:
|
|
156
106
|
#
|
|
157
|
-
#
|
|
158
|
-
#
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
# -----------------------------------------------------------------------------
|
|
162
|
-
# Example: Platform-specific packages
|
|
163
|
-
# -----------------------------------------------------------------------------
|
|
164
|
-
# Different packages for Windows vs Linux.
|
|
165
|
-
|
|
166
|
-
# [crossplatform]
|
|
167
|
-
# python = "3.11"
|
|
168
|
-
# isolated = true
|
|
107
|
+
# from pathlib import Path
|
|
108
|
+
# from comfy_env import wrap_isolated_nodes
|
|
169
109
|
#
|
|
170
|
-
#
|
|
171
|
-
# requirements = ["numpy", "pillow"]
|
|
110
|
+
# NODE_CLASS_MAPPINGS = {}
|
|
172
111
|
#
|
|
173
|
-
#
|
|
174
|
-
#
|
|
112
|
+
# from .main import NODE_CLASS_MAPPINGS as main_nodes
|
|
113
|
+
# NODE_CLASS_MAPPINGS.update(main_nodes)
|
|
175
114
|
#
|
|
176
|
-
#
|
|
177
|
-
#
|
|
115
|
+
# from .isolated import NODE_CLASS_MAPPINGS as isolated_nodes
|
|
116
|
+
# NODE_CLASS_MAPPINGS.update(
|
|
117
|
+
# wrap_isolated_nodes(isolated_nodes, Path(__file__).parent / "isolated")
|
|
118
|
+
# )
|
comfy_env/workers/__init__.py
CHANGED
|
@@ -1,49 +1,38 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Workers -
|
|
2
|
+
Workers - Process isolation for ComfyUI nodes.
|
|
3
3
|
|
|
4
|
-
This module provides
|
|
4
|
+
This module provides two isolation tiers:
|
|
5
5
|
|
|
6
|
-
Tier 1:
|
|
7
|
-
- Uses
|
|
8
|
-
- Zero-copy tensor transfer via
|
|
6
|
+
Tier 1: MPWorker (same Python, fresh CUDA context)
|
|
7
|
+
- Uses multiprocessing.Queue
|
|
8
|
+
- Zero-copy tensor transfer via shared memory
|
|
9
9
|
- ~30ms overhead per call
|
|
10
10
|
- Use for: Memory isolation, fresh CUDA context
|
|
11
11
|
|
|
12
|
-
Tier 2:
|
|
13
|
-
-
|
|
14
|
-
-
|
|
15
|
-
- ~100-500ms overhead per call
|
|
12
|
+
Tier 2: SubprocessWorker (different Python/venv)
|
|
13
|
+
- Persistent subprocess + socket IPC
|
|
14
|
+
- ~50-100ms overhead per call
|
|
16
15
|
- Use for: Different PyTorch versions, incompatible deps
|
|
17
16
|
|
|
18
|
-
Tier 3: ContainerWorker (full isolation) [future]
|
|
19
|
-
- Docker with GPU passthrough
|
|
20
|
-
- Use for: Different CUDA versions, hermetic environments
|
|
21
|
-
|
|
22
17
|
Usage:
|
|
23
|
-
from comfy_env.workers import
|
|
24
|
-
|
|
25
|
-
# Get a named worker from the pool
|
|
26
|
-
worker = get_worker("sam3d")
|
|
27
|
-
result = worker.call(my_function, image=tensor)
|
|
18
|
+
from comfy_env.workers import MPWorker, SubprocessWorker
|
|
28
19
|
|
|
29
|
-
#
|
|
30
|
-
worker =
|
|
20
|
+
# Create worker directly
|
|
21
|
+
worker = MPWorker()
|
|
31
22
|
result = worker.call(my_function, arg1, arg2)
|
|
23
|
+
|
|
24
|
+
# Or use SubprocessWorker for isolated Python
|
|
25
|
+
worker = SubprocessWorker(python="/path/to/venv/bin/python")
|
|
26
|
+
result = worker.call(my_function, image=tensor)
|
|
32
27
|
"""
|
|
33
28
|
|
|
34
|
-
from .base import Worker
|
|
35
|
-
from .
|
|
36
|
-
from .
|
|
37
|
-
from .pool import WorkerPool, get_worker, register_worker, shutdown_workers, list_workers
|
|
29
|
+
from .base import Worker, WorkerError
|
|
30
|
+
from .mp import MPWorker
|
|
31
|
+
from .subprocess import SubprocessWorker
|
|
38
32
|
|
|
39
33
|
__all__ = [
|
|
40
34
|
"Worker",
|
|
41
|
-
"
|
|
42
|
-
"
|
|
43
|
-
"
|
|
44
|
-
"WorkerPool",
|
|
45
|
-
"get_worker",
|
|
46
|
-
"register_worker",
|
|
47
|
-
"shutdown_workers",
|
|
48
|
-
"list_workers",
|
|
35
|
+
"WorkerError",
|
|
36
|
+
"MPWorker",
|
|
37
|
+
"SubprocessWorker",
|
|
49
38
|
]
|
comfy_env/workers/base.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
2
|
+
MPWorker - Same-venv isolation using multiprocessing.
|
|
3
3
|
|
|
4
4
|
This is the simplest and fastest worker type:
|
|
5
|
-
- Uses
|
|
6
|
-
- Zero-copy tensor transfer via
|
|
5
|
+
- Uses multiprocessing.Queue for IPC
|
|
6
|
+
- Zero-copy tensor transfer via shared memory (automatic)
|
|
7
7
|
- Fresh CUDA context in subprocess
|
|
8
8
|
- ~30ms overhead per call
|
|
9
9
|
|
|
@@ -13,7 +13,7 @@ Use this when you need:
|
|
|
13
13
|
- Same Python environment as host
|
|
14
14
|
|
|
15
15
|
Example:
|
|
16
|
-
worker =
|
|
16
|
+
worker = MPWorker()
|
|
17
17
|
|
|
18
18
|
def gpu_work(image):
|
|
19
19
|
import torch
|
|
@@ -40,7 +40,7 @@ _SHUTDOWN = object()
|
|
|
40
40
|
_CALL_METHOD = "call_method"
|
|
41
41
|
|
|
42
42
|
|
|
43
|
-
def _worker_loop(queue_in, queue_out, sys_path_additions=None):
|
|
43
|
+
def _worker_loop(queue_in, queue_out, sys_path_additions=None, lib_path=None):
|
|
44
44
|
"""
|
|
45
45
|
Worker process main loop.
|
|
46
46
|
|
|
@@ -55,6 +55,7 @@ def _worker_loop(queue_in, queue_out, sys_path_additions=None):
|
|
|
55
55
|
queue_in: Input queue for receiving work items
|
|
56
56
|
queue_out: Output queue for sending results
|
|
57
57
|
sys_path_additions: Paths to add to sys.path
|
|
58
|
+
lib_path: Path to add to LD_LIBRARY_PATH (for conda libraries)
|
|
58
59
|
"""
|
|
59
60
|
import os
|
|
60
61
|
import sys
|
|
@@ -63,10 +64,40 @@ def _worker_loop(queue_in, queue_out, sys_path_additions=None):
|
|
|
63
64
|
# Set worker mode env var
|
|
64
65
|
os.environ["COMFYUI_ISOLATION_WORKER"] = "1"
|
|
65
66
|
|
|
66
|
-
#
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
67
|
+
# Set LD_LIBRARY_PATH for conda libraries (must be done before imports)
|
|
68
|
+
if lib_path:
|
|
69
|
+
if sys.platform == "win32":
|
|
70
|
+
# Windows: add to PATH for DLL loading
|
|
71
|
+
os.environ["PATH"] = lib_path + ";" + os.environ.get("PATH", "")
|
|
72
|
+
else:
|
|
73
|
+
# Linux/Mac: LD_LIBRARY_PATH
|
|
74
|
+
os.environ["LD_LIBRARY_PATH"] = lib_path + ":" + os.environ.get("LD_LIBRARY_PATH", "")
|
|
75
|
+
|
|
76
|
+
# Find ComfyUI base and add to sys.path for real folder_paths/comfy modules
|
|
77
|
+
# This works because comfy.options.args_parsing=False by default, so folder_paths
|
|
78
|
+
# auto-detects its base directory from __file__ location
|
|
79
|
+
def _find_comfyui_base():
|
|
80
|
+
cwd = Path.cwd().resolve()
|
|
81
|
+
# Check common child directories (for test environments)
|
|
82
|
+
for base in [cwd, cwd.parent]:
|
|
83
|
+
for child in [".comfy-test-env/ComfyUI", "ComfyUI"]:
|
|
84
|
+
candidate = base / child
|
|
85
|
+
if (candidate / "main.py").exists() and (candidate / "comfy").exists():
|
|
86
|
+
return candidate
|
|
87
|
+
# Walk up from cwd looking for ComfyUI
|
|
88
|
+
current = cwd
|
|
89
|
+
for _ in range(10):
|
|
90
|
+
if (current / "main.py").exists() and (current / "comfy").exists():
|
|
91
|
+
return current
|
|
92
|
+
current = current.parent
|
|
93
|
+
# Check COMFYUI_BASE env var as fallback
|
|
94
|
+
if os.environ.get("COMFYUI_BASE"):
|
|
95
|
+
return Path(os.environ["COMFYUI_BASE"])
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
comfyui_base = _find_comfyui_base()
|
|
99
|
+
if comfyui_base and str(comfyui_base) not in sys.path:
|
|
100
|
+
sys.path.insert(0, str(comfyui_base))
|
|
70
101
|
|
|
71
102
|
# Add custom paths to sys.path for module discovery
|
|
72
103
|
if sys_path_additions:
|
|
@@ -356,7 +387,7 @@ def _execute_method_call(module_name: str, class_name: str, method_name: str,
|
|
|
356
387
|
return original_method(instance, **kwargs)
|
|
357
388
|
|
|
358
389
|
|
|
359
|
-
class
|
|
390
|
+
class MPWorker(Worker):
|
|
360
391
|
"""
|
|
361
392
|
Worker using torch.multiprocessing for same-venv isolation.
|
|
362
393
|
|
|
@@ -370,16 +401,18 @@ class TorchMPWorker(Worker):
|
|
|
370
401
|
interpreter without inherited state from the parent.
|
|
371
402
|
"""
|
|
372
403
|
|
|
373
|
-
def __init__(self, name: Optional[str] = None, sys_path: Optional[list] = None):
|
|
404
|
+
def __init__(self, name: Optional[str] = None, sys_path: Optional[list] = None, lib_path: Optional[str] = None):
|
|
374
405
|
"""
|
|
375
406
|
Initialize the worker.
|
|
376
407
|
|
|
377
408
|
Args:
|
|
378
409
|
name: Optional name for logging/debugging.
|
|
379
410
|
sys_path: Optional list of paths to add to sys.path in worker process.
|
|
411
|
+
lib_path: Optional path to add to LD_LIBRARY_PATH (for conda libraries).
|
|
380
412
|
"""
|
|
381
|
-
self.name = name or "
|
|
413
|
+
self.name = name or "MPWorker"
|
|
382
414
|
self._sys_path = sys_path or []
|
|
415
|
+
self._lib_path = lib_path
|
|
383
416
|
self._process = None
|
|
384
417
|
self._queue_in = None
|
|
385
418
|
self._queue_out = None
|
|
@@ -406,7 +439,7 @@ class TorchMPWorker(Worker):
|
|
|
406
439
|
self._queue_out = ctx.Queue()
|
|
407
440
|
self._process = ctx.Process(
|
|
408
441
|
target=_worker_loop,
|
|
409
|
-
args=(self._queue_in, self._queue_out, self._sys_path),
|
|
442
|
+
args=(self._queue_in, self._queue_out, self._sys_path, self._lib_path),
|
|
410
443
|
daemon=True,
|
|
411
444
|
)
|
|
412
445
|
self._process.start()
|
|
@@ -593,4 +626,4 @@ class TorchMPWorker(Worker):
|
|
|
593
626
|
|
|
594
627
|
def __repr__(self):
|
|
595
628
|
status = "alive" if self.is_alive() else "stopped"
|
|
596
|
-
return f"<
|
|
629
|
+
return f"<MPWorker name={self.name!r} status={status}>"
|