comfy-env 0.1.9__tar.gz → 0.1.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {comfy_env-0.1.9 → comfy_env-0.1.11}/PKG-INFO +1 -1
  2. {comfy_env-0.1.9 → comfy_env-0.1.11}/pyproject.toml +1 -1
  3. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/isolation/wrap.py +60 -4
  4. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/core.py +47 -5
  5. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/prestartup.py +13 -2
  6. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/workers/mp.py +58 -0
  7. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/workers/subprocess.py +53 -72
  8. {comfy_env-0.1.9 → comfy_env-0.1.11}/.github/workflows/ci.yml +0 -0
  9. {comfy_env-0.1.9 → comfy_env-0.1.11}/.github/workflows/publish.yml +0 -0
  10. {comfy_env-0.1.9 → comfy_env-0.1.11}/.gitignore +0 -0
  11. {comfy_env-0.1.9 → comfy_env-0.1.11}/LICENSE +0 -0
  12. {comfy_env-0.1.9 → comfy_env-0.1.11}/README.md +0 -0
  13. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/__init__.py +0 -0
  14. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/cli.py +0 -0
  15. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/config/__init__.py +0 -0
  16. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/config/parser.py +0 -0
  17. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/config/types.py +0 -0
  18. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/errors.py +0 -0
  19. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/install.py +0 -0
  20. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/isolation/__init__.py +0 -0
  21. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/nodes.py +0 -0
  22. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/__init__.py +0 -0
  23. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/cuda_detection.py +0 -0
  24. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/platform/__init__.py +0 -0
  25. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/platform/base.py +0 -0
  26. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/platform/darwin.py +0 -0
  27. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/platform/linux.py +0 -0
  28. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/platform/windows.py +0 -0
  29. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/pixi/resolver.py +0 -0
  30. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/templates/comfy-env-instructions.txt +0 -0
  31. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/templates/comfy-env.toml +0 -0
  32. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/workers/__init__.py +0 -0
  33. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/workers/base.py +0 -0
  34. {comfy_env-0.1.9 → comfy_env-0.1.11}/src/comfy_env/workers/tensor_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: comfy-env
3
- Version: 0.1.9
3
+ Version: 0.1.11
4
4
  Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
5
5
  Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
6
6
  Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "comfy-env"
3
- version = "0.1.9"
3
+ version = "0.1.11"
4
4
  description = "Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -34,6 +34,12 @@ from typing import Any, Dict, Optional
34
34
  # Debug logging (set COMFY_ENV_DEBUG=1 to enable)
35
35
  _DEBUG = os.environ.get("COMFY_ENV_DEBUG", "").lower() in ("1", "true", "yes")
36
36
 
37
+
38
+ def get_env_name(dir_name: str) -> str:
39
+ """Convert directory name to env name: ComfyUI-UniRig → _env_unirig"""
40
+ name = dir_name.lower().replace("-", "_").lstrip("comfyui_")
41
+ return f"_env_{name}"
42
+
37
43
  # Global worker cache (one per isolated environment)
38
44
  _workers: Dict[str, Any] = {}
39
45
  _workers_lock = threading.Lock()
@@ -144,13 +150,27 @@ def _find_env_paths(node_dir: Path) -> tuple[Optional[Path], Optional[Path]]:
144
150
  """
145
151
  import glob
146
152
 
147
- # Check pixi environment first
153
+ # Check _env_<name> directory first (new pattern)
154
+ env_name = get_env_name(node_dir.name)
155
+ env_dir = node_dir / env_name
156
+ if env_dir.exists():
157
+ if sys.platform == "win32":
158
+ site_packages = env_dir / "Lib" / "site-packages"
159
+ lib_dir = env_dir / "Library" / "bin"
160
+ else:
161
+ pattern = str(env_dir / "lib" / "python*" / "site-packages")
162
+ matches = glob.glob(pattern)
163
+ site_packages = Path(matches[0]) if matches else None
164
+ lib_dir = env_dir / "lib"
165
+ if site_packages and site_packages.exists():
166
+ return site_packages, lib_dir if lib_dir.exists() else None
167
+
168
+ # Fallback: Check old .pixi/envs/default (for backward compat)
148
169
  pixi_env = node_dir / ".pixi" / "envs" / "default"
149
170
  if pixi_env.exists():
150
- # Find site-packages (pythonX.Y varies)
151
171
  if sys.platform == "win32":
152
172
  site_packages = pixi_env / "Lib" / "site-packages"
153
- lib_dir = pixi_env / "Library" / "bin" # Windows DLLs
173
+ lib_dir = pixi_env / "Library" / "bin"
154
174
  else:
155
175
  pattern = str(pixi_env / "lib" / "python*" / "site-packages")
156
176
  matches = glob.glob(pattern)
@@ -176,6 +196,12 @@ def _find_env_paths(node_dir: Path) -> tuple[Optional[Path], Optional[Path]]:
176
196
 
177
197
  def _find_env_dir(node_dir: Path) -> Optional[Path]:
178
198
  """Find the environment directory (for cache key)."""
199
+ # Check _env_<name> first
200
+ env_name = get_env_name(node_dir.name)
201
+ env_dir = node_dir / env_name
202
+ if env_dir.exists():
203
+ return env_dir
204
+ # Fallback to old paths
179
205
  pixi_env = node_dir / ".pixi" / "envs" / "default"
180
206
  if pixi_env.exists():
181
207
  return pixi_env
@@ -185,6 +211,34 @@ def _find_env_dir(node_dir: Path) -> Optional[Path]:
185
211
  return None
186
212
 
187
213
 
214
+ def _find_custom_node_root(nodes_dir: Path) -> Optional[Path]:
215
+ """
216
+ Find the custom node root (direct child of custom_nodes/).
217
+
218
+ Uses folder_paths to find custom_nodes directories, then finds
219
+ which one is an ancestor of nodes_dir.
220
+
221
+ Example: /path/custom_nodes/ComfyUI-UniRig/nodes/nodes_gpu
222
+ -> returns /path/custom_nodes/ComfyUI-UniRig
223
+ """
224
+ try:
225
+ import folder_paths
226
+ custom_nodes_dirs = folder_paths.get_folder_paths("custom_nodes")
227
+ except (ImportError, KeyError):
228
+ return None
229
+
230
+ for cn_dir in custom_nodes_dirs:
231
+ cn_path = Path(cn_dir)
232
+ try:
233
+ rel = nodes_dir.relative_to(cn_path)
234
+ if rel.parts:
235
+ return cn_path / rel.parts[0]
236
+ except ValueError:
237
+ continue
238
+
239
+ return None
240
+
241
+
188
242
  def _wrap_node_class(
189
243
  cls: type,
190
244
  env_dir: Path,
@@ -362,7 +416,9 @@ def wrap_isolated_nodes(
362
416
  print(f"[comfy-env] Run 'comfy-env install' in {nodes_dir}")
363
417
  return node_class_mappings
364
418
 
365
- # Build sys.path for the worker - site-packages first, then node dir
419
+ # Build sys.path - site-packages first, then nodes_dir
420
+ # Note: isolated modules should use absolute imports (their dir is in sys.path)
421
+ # Relative imports would require importing parent package which may have host-only deps
366
422
  sys_path = [str(site_packages), str(nodes_dir)]
367
423
 
368
424
  # lib_dir for LD_LIBRARY_PATH (conda libraries)
@@ -281,6 +281,12 @@ def ensure_pixi(
281
281
  return pixi_path
282
282
 
283
283
 
284
+ def get_env_name(dir_name: str) -> str:
285
+ """Convert directory name to env name: ComfyUI-UniRig → _env_unirig"""
286
+ name = dir_name.lower().replace("-", "_").lstrip("comfyui_")
287
+ return f"_env_{name}"
288
+
289
+
284
290
  def clean_pixi_artifacts(node_dir: Path, log: Callable[[str], None] = print) -> None:
285
291
  """Remove previous pixi installation artifacts."""
286
292
  for path in [node_dir / "pixi.toml", node_dir / "pixi.lock"]:
@@ -289,11 +295,21 @@ def clean_pixi_artifacts(node_dir: Path, log: Callable[[str], None] = print) ->
289
295
  pixi_dir = node_dir / ".pixi"
290
296
  if pixi_dir.exists():
291
297
  shutil.rmtree(pixi_dir)
298
+ # Also clean old _env_* directories
299
+ env_name = get_env_name(node_dir.name)
300
+ env_dir = node_dir / env_name
301
+ if env_dir.exists():
302
+ shutil.rmtree(env_dir)
292
303
 
293
304
 
294
305
  def get_pixi_python(node_dir: Path) -> Optional[Path]:
295
306
  """Get path to Python in the pixi environment."""
296
- env_dir = node_dir / ".pixi" / "envs" / "default"
307
+ # Check new _env_<name> location first
308
+ env_name = get_env_name(node_dir.name)
309
+ env_dir = node_dir / env_name
310
+ if not env_dir.exists():
311
+ # Fallback to old .pixi path
312
+ env_dir = node_dir / ".pixi" / "envs" / "default"
297
313
  if sys.platform == "win32":
298
314
  python_path = env_dir / "python.exe"
299
315
  else:
@@ -441,11 +457,14 @@ def pixi_install(
441
457
  # Build pypi-dependencies section (CUDA packages excluded - installed separately)
442
458
  pypi_deps = pixi_data.get("pypi-dependencies", {})
443
459
 
444
- # Add torch if we have CUDA packages
460
+ # Enforce torch version if we have CUDA packages (must match cuda_packages wheels)
445
461
  if cfg.has_cuda and torch_version:
446
462
  torch_major = torch_version.split(".")[0]
447
463
  torch_minor = int(torch_version.split(".")[1])
448
- pypi_deps.setdefault("torch", f">={torch_version},<{torch_major}.{torch_minor + 1}")
464
+ required_torch = f">={torch_version},<{torch_major}.{torch_minor + 1}"
465
+ if "torch" in pypi_deps and pypi_deps["torch"] != required_torch:
466
+ log(f"Overriding torch={pypi_deps['torch']} with {required_torch} (required for cuda_packages)")
467
+ pypi_deps["torch"] = required_torch
449
468
 
450
469
  # NOTE: CUDA packages are NOT added here - they're installed with --no-deps after pixi
451
470
 
@@ -478,8 +497,16 @@ def pixi_install(
478
497
  if not python_path:
479
498
  raise RuntimeError("Could not find Python in pixi environment")
480
499
 
481
- # Get Python version from the pixi environment
482
- py_version = f"{sys.version_info.major}.{sys.version_info.minor}"
500
+ # Get Python version from the pixi environment (not host Python)
501
+ result = subprocess.run(
502
+ [str(python_path), "-c", "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"],
503
+ capture_output=True, text=True
504
+ )
505
+ if result.returncode == 0:
506
+ py_version = result.stdout.strip()
507
+ else:
508
+ py_version = f"{sys.version_info.major}.{sys.version_info.minor}"
509
+ log(f"Warning: Could not detect pixi Python version, using host: {py_version}")
483
510
 
484
511
  for package in cfg.cuda_packages:
485
512
  # Find direct wheel URL (bypasses metadata validation)
@@ -507,5 +534,20 @@ def pixi_install(
507
534
 
508
535
  log("CUDA packages installed")
509
536
 
537
+ # Move environment from .pixi/envs/default to _env_<name>
538
+ old_env = node_dir / ".pixi" / "envs" / "default"
539
+ env_name = get_env_name(node_dir.name)
540
+ new_env = node_dir / env_name
541
+
542
+ if old_env.exists():
543
+ if new_env.exists():
544
+ shutil.rmtree(new_env) # Clean old env
545
+ shutil.move(str(old_env), str(new_env))
546
+ # Clean up .pixi directory (keep pixi.toml and pixi.lock)
547
+ pixi_dir = node_dir / ".pixi"
548
+ if pixi_dir.exists():
549
+ shutil.rmtree(pixi_dir)
550
+ log(f"Moved environment to {new_env}")
551
+
510
552
  log("Installation complete!")
511
553
  return True
@@ -11,6 +11,12 @@ from pathlib import Path
11
11
  from typing import Optional, Dict
12
12
 
13
13
 
14
+ def get_env_name(dir_name: str) -> str:
15
+ """Convert directory name to env name: ComfyUI-UniRig → _env_unirig"""
16
+ name = dir_name.lower().replace("-", "_").lstrip("comfyui_")
17
+ return f"_env_{name}"
18
+
19
+
14
20
  def _load_env_vars(config_path: str) -> Dict[str, str]:
15
21
  """
16
22
  Load [env_vars] section from comfy-env.toml.
@@ -121,10 +127,15 @@ def setup_env(node_dir: Optional[str] = None) -> None:
121
127
  for key, value in env_vars.items():
122
128
  os.environ[key] = value
123
129
 
124
- pixi_env = os.path.join(node_dir, ".pixi", "envs", "default")
130
+ # Check _env_<name> first, then fallback to old .pixi path
131
+ env_name = get_env_name(os.path.basename(node_dir))
132
+ pixi_env = os.path.join(node_dir, env_name)
125
133
 
126
134
  if not os.path.exists(pixi_env):
127
- return # No pixi environment
135
+ # Fallback to old .pixi path
136
+ pixi_env = os.path.join(node_dir, ".pixi", "envs", "default")
137
+ if not os.path.exists(pixi_env):
138
+ return # No environment found
128
139
 
129
140
  if sys.platform == "win32":
130
141
  # Windows: add to PATH for DLL loading
@@ -40,6 +40,53 @@ _SHUTDOWN = object()
40
40
  _CALL_METHOD = "call_method"
41
41
 
42
42
 
43
+ # ---------------------------------------------------------------------------
44
+ # Tensor file transfer - avoids CUDA IPC issues with cudaMallocAsync
45
+ # ---------------------------------------------------------------------------
46
+
47
+ def _save_tensors_to_files(obj, file_registry=None):
48
+ """Recursively save torch tensors to temp files for IPC."""
49
+ if file_registry is None:
50
+ file_registry = []
51
+
52
+ try:
53
+ import torch
54
+ if isinstance(obj, torch.Tensor):
55
+ import tempfile
56
+ f = tempfile.NamedTemporaryFile(suffix='.pt', delete=False)
57
+ torch.save(obj.cpu(), f.name) # Always save as CPU tensor
58
+ f.close()
59
+ file_registry.append(f.name)
60
+ return {"__tensor_file__": f.name, "dtype": str(obj.dtype), "device": str(obj.device)}
61
+ except ImportError:
62
+ pass
63
+
64
+ if isinstance(obj, dict):
65
+ return {k: _save_tensors_to_files(v, file_registry) for k, v in obj.items()}
66
+ elif isinstance(obj, list):
67
+ return [_save_tensors_to_files(v, file_registry) for v in obj]
68
+ elif isinstance(obj, tuple):
69
+ return tuple(_save_tensors_to_files(v, file_registry) for v in obj)
70
+ return obj
71
+
72
+
73
+ def _load_tensors_from_files(obj):
74
+ """Recursively load torch tensors from temp files."""
75
+ if isinstance(obj, dict):
76
+ if "__tensor_file__" in obj:
77
+ import os
78
+ import torch
79
+ tensor = torch.load(obj["__tensor_file__"], weights_only=True)
80
+ os.unlink(obj["__tensor_file__"]) # Cleanup temp file
81
+ return tensor
82
+ return {k: _load_tensors_from_files(v) for k, v in obj.items()}
83
+ elif isinstance(obj, list):
84
+ return [_load_tensors_from_files(v) for v in obj]
85
+ elif isinstance(obj, tuple):
86
+ return tuple(_load_tensors_from_files(v) for v in obj)
87
+ return obj
88
+
89
+
43
90
  def _dump_worker_env(worker_name: str = "unknown", print_to_terminal: bool = False):
44
91
  """Dump worker environment to .comfy-env/logs/ (always) and optionally print."""
45
92
  import json
@@ -205,14 +252,20 @@ def _worker_loop(queue_in, queue_out, sys_path_additions=None, lib_path=None, en
205
252
  # Handle method call protocol
206
253
  if isinstance(item, tuple) and len(item) == 6 and item[0] == _CALL_METHOD:
207
254
  _, module_name, class_name, method_name, self_state, kwargs = item
255
+ # Load tensors from files (saved by host to avoid cudaMallocAsync IPC issues)
256
+ kwargs = _load_tensors_from_files(kwargs)
208
257
  result = _execute_method_call(
209
258
  module_name, class_name, method_name, self_state, kwargs
210
259
  )
260
+ # Save tensors to files to avoid CUDA IPC issues with cudaMallocAsync
261
+ result = _save_tensors_to_files(result)
211
262
  queue_out.put(("ok", result))
212
263
  else:
213
264
  # Direct function call (legacy)
214
265
  func, args, kwargs = item
215
266
  result = func(*args, **kwargs)
267
+ # Save tensors to files to avoid CUDA IPC issues with cudaMallocAsync
268
+ result = _save_tensors_to_files(result)
216
269
  queue_out.put(("ok", result))
217
270
 
218
271
  except Exception as e:
@@ -646,6 +699,9 @@ class MPWorker(Worker):
646
699
  """
647
700
  self._ensure_started()
648
701
 
702
+ # Save tensors to files to avoid CUDA IPC issues with cudaMallocAsync
703
+ kwargs = _save_tensors_to_files(kwargs)
704
+
649
705
  # Send method call request using protocol
650
706
  self._queue_in.put((
651
707
  _CALL_METHOD,
@@ -672,6 +728,8 @@ class MPWorker(Worker):
672
728
 
673
729
  # Handle response
674
730
  if status == "ok":
731
+ # Load tensors from temp files
732
+ result = _load_tensors_from_files(result)
675
733
  return result
676
734
  elif status == "error":
677
735
  msg, tb = result
@@ -221,28 +221,26 @@ def _to_shm(obj, registry, visited=None):
221
221
  visited[obj_id] = result
222
222
  return result
223
223
 
224
- # torch.Tensor → convert to numpy → shared memory
224
+ # torch.Tensor → convert to numpy → shared memory (with marker to restore type)
225
225
  if t == 'Tensor':
226
226
  arr = obj.detach().cpu().numpy()
227
- return _to_shm(arr, registry, visited)
227
+ result = _to_shm(arr, registry, visited)
228
+ result["__was_tensor__"] = True
229
+ return result
228
230
 
229
- # trimesh.Trimesh → vertices + faces arrays shared memory
231
+ # trimesh.Trimesh → pickle shared memory (preserves visual, metadata, normals)
230
232
  if t == 'Trimesh':
231
- verts = np.ascontiguousarray(obj.vertices, dtype=np.float64)
232
- faces = np.ascontiguousarray(obj.faces, dtype=np.int64)
233
-
234
- v_block = shm.SharedMemory(create=True, size=verts.nbytes)
235
- np.ndarray(verts.shape, verts.dtype, buffer=v_block.buf)[:] = verts
236
- registry.append(v_block)
233
+ import pickle
234
+ mesh_bytes = pickle.dumps(obj)
237
235
 
238
- f_block = shm.SharedMemory(create=True, size=faces.nbytes)
239
- np.ndarray(faces.shape, faces.dtype, buffer=f_block.buf)[:] = faces
240
- registry.append(f_block)
236
+ block = shm.SharedMemory(create=True, size=len(mesh_bytes))
237
+ block.buf[:len(mesh_bytes)] = mesh_bytes
238
+ registry.append(block)
241
239
 
242
240
  result = {
243
241
  "__shm_trimesh__": True,
244
- "v_name": v_block.name, "v_shape": list(verts.shape),
245
- "f_name": f_block.name, "f_shape": list(faces.shape),
242
+ "name": block.name,
243
+ "size": len(mesh_bytes),
246
244
  }
247
245
  visited[obj_id] = result
248
246
  return result
@@ -279,31 +277,28 @@ def _from_shm(obj, unlink=True):
279
277
  return [_from_shm(v, unlink) for v in obj]
280
278
  return obj
281
279
 
282
- # numpy array
280
+ # numpy array (or tensor that was converted to numpy)
283
281
  if "__shm_np__" in obj:
284
282
  block = shm.SharedMemory(name=obj["__shm_np__"])
285
283
  arr = np.ndarray(tuple(obj["shape"]), dtype=np.dtype(obj["dtype"]), buffer=block.buf).copy()
286
284
  block.close()
287
285
  if unlink:
288
286
  block.unlink()
287
+ # Convert back to tensor if it was originally a tensor
288
+ if obj.get("__was_tensor__"):
289
+ import torch
290
+ return torch.from_numpy(arr)
289
291
  return arr
290
292
 
291
- # trimesh
293
+ # trimesh (pickled to preserve visual, metadata, normals)
292
294
  if "__shm_trimesh__" in obj:
293
- import trimesh
294
- v_block = shm.SharedMemory(name=obj["v_name"])
295
- verts = np.ndarray(tuple(obj["v_shape"]), dtype=np.float64, buffer=v_block.buf).copy()
296
- v_block.close()
297
- if unlink:
298
- v_block.unlink()
299
-
300
- f_block = shm.SharedMemory(name=obj["f_name"])
301
- faces = np.ndarray(tuple(obj["f_shape"]), dtype=np.int64, buffer=f_block.buf).copy()
302
- f_block.close()
295
+ import pickle
296
+ block = shm.SharedMemory(name=obj["name"])
297
+ mesh_bytes = bytes(block.buf[:obj["size"]])
298
+ block.close()
303
299
  if unlink:
304
- f_block.unlink()
305
-
306
- return trimesh.Trimesh(vertices=verts, faces=faces, process=False)
300
+ block.unlink()
301
+ return pickle.loads(mesh_bytes)
307
302
 
308
303
  # regular dict - recurse
309
304
  return {k: _from_shm(v, unlink) for k, v in obj.items()}
@@ -420,42 +415,29 @@ faulthandler.enable(file=sys.stderr, all_threads=True)
420
415
  # Debug logging (set COMFY_ENV_DEBUG=1 to enable)
421
416
  _DEBUG = os.environ.get("COMFY_ENV_DEBUG", "").lower() in ("1", "true", "yes")
422
417
 
423
- # Pre-import bpy FIRST to avoid DLL conflicts with numpy/torch/MKL
424
- # bpy's DLLs must be loaded before other packages load conflicting versions
425
- try:
426
- import bpy
427
- if _DEBUG:
428
- print("[worker] Pre-imported bpy successfully", file=sys.stderr, flush=True)
429
- except ImportError as e:
430
- # bpy not available in this environment - that's fine
431
- pass
432
- except Exception as e:
433
- if _DEBUG:
434
- print(f"[worker] bpy pre-import warning: {e}", file=sys.stderr, flush=True)
435
-
436
418
  # Watchdog: dump all thread stacks every 60 seconds to catch hangs
437
419
  import threading
438
420
  import tempfile as _tempfile
439
421
  _watchdog_log = os.path.join(_tempfile.gettempdir(), "comfy_worker_watchdog.log")
440
422
  def _watchdog():
441
423
  import time
442
- import io
443
424
  tick = 0
444
425
  while True:
445
426
  time.sleep(60)
446
427
  tick += 1
447
- # Capture stack dump to string
448
- buf = io.StringIO()
449
- faulthandler.dump_traceback(file=buf, all_threads=True)
450
- dump = buf.getvalue()
451
-
452
- # Write to file
428
+ # Dump to temp file first (faulthandler needs real file descriptor)
429
+ tmp_path = _watchdog_log + ".tmp"
430
+ with open(tmp_path, "w", encoding="utf-8") as tmp:
431
+ faulthandler.dump_traceback(file=tmp, all_threads=True)
432
+ with open(tmp_path, "r", encoding="utf-8") as tmp:
433
+ dump = tmp.read()
434
+
435
+ # Write to persistent log
453
436
  with open(_watchdog_log, "a", encoding="utf-8") as f:
454
437
  f.write(f"\\n=== WATCHDOG TICK {tick} ({time.strftime('%H:%M:%S')}) ===\\n")
455
438
  f.write(dump)
456
439
  f.write("=== END ===\\n")
457
440
  f.flush()
458
- os.fsync(f.fileno())
459
441
 
460
442
  # Also print
461
443
  print(f"\\n=== WATCHDOG TICK {tick} ===", flush=True)
@@ -544,24 +526,23 @@ def _to_shm(obj, registry, visited=None):
544
526
 
545
527
  if t == 'Tensor':
546
528
  arr = obj.detach().cpu().numpy()
547
- return _to_shm(arr, registry, visited)
529
+ result = _to_shm(arr, registry, visited)
530
+ result["__was_tensor__"] = True
531
+ return result
548
532
 
533
+ # trimesh.Trimesh → pickle → shared memory (preserves visual, metadata, normals)
549
534
  if t == 'Trimesh':
550
- verts = np.ascontiguousarray(obj.vertices, dtype=np.float64)
551
- faces = np.ascontiguousarray(obj.faces, dtype=np.int64)
552
-
553
- v_block = shm.SharedMemory(create=True, size=verts.nbytes)
554
- np.ndarray(verts.shape, verts.dtype, buffer=v_block.buf)[:] = verts
555
- registry.append(v_block)
535
+ import pickle
536
+ mesh_bytes = pickle.dumps(obj)
556
537
 
557
- f_block = shm.SharedMemory(create=True, size=faces.nbytes)
558
- np.ndarray(faces.shape, faces.dtype, buffer=f_block.buf)[:] = faces
559
- registry.append(f_block)
538
+ block = shm.SharedMemory(create=True, size=len(mesh_bytes))
539
+ block.buf[:len(mesh_bytes)] = mesh_bytes
540
+ registry.append(block)
560
541
 
561
542
  result = {
562
543
  "__shm_trimesh__": True,
563
- "v_name": v_block.name, "v_shape": list(verts.shape),
564
- "f_name": f_block.name, "f_shape": list(faces.shape),
544
+ "name": block.name,
545
+ "size": len(mesh_bytes),
565
546
  }
566
547
  visited[obj_id] = result
567
548
  return result
@@ -587,18 +568,18 @@ def _from_shm(obj):
587
568
  block = shm.SharedMemory(name=obj["__shm_np__"])
588
569
  arr = np.ndarray(tuple(obj["shape"]), dtype=np.dtype(obj["dtype"]), buffer=block.buf).copy()
589
570
  block.close()
571
+ # Convert back to tensor if it was originally a tensor
572
+ if obj.get("__was_tensor__"):
573
+ import torch
574
+ return torch.from_numpy(arr)
590
575
  return arr
576
+ # trimesh (pickled to preserve visual, metadata, normals)
591
577
  if "__shm_trimesh__" in obj:
592
- import trimesh
593
- v_block = shm.SharedMemory(name=obj["v_name"])
594
- verts = np.ndarray(tuple(obj["v_shape"]), dtype=np.float64, buffer=v_block.buf).copy()
595
- v_block.close()
596
-
597
- f_block = shm.SharedMemory(name=obj["f_name"])
598
- faces = np.ndarray(tuple(obj["f_shape"]), dtype=np.int64, buffer=f_block.buf).copy()
599
- f_block.close()
600
-
601
- return trimesh.Trimesh(vertices=verts, faces=faces, process=False)
578
+ import pickle
579
+ block = shm.SharedMemory(name=obj["name"])
580
+ mesh_bytes = bytes(block.buf[:obj["size"]])
581
+ block.close()
582
+ return pickle.loads(mesh_bytes)
602
583
  return {k: _from_shm(v) for k, v in obj.items()}
603
584
 
604
585
  def _cleanup_shm(registry):
File without changes
File without changes
File without changes