comfy-env 0.1.12__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -36,7 +36,7 @@ _DEBUG = os.environ.get("COMFY_ENV_DEBUG", "").lower() in ("1", "true", "yes")
36
36
 
37
37
 
38
38
  def get_env_name(dir_name: str) -> str:
39
- """Convert directory name to env name: ComfyUI-UniRig _env_unirig"""
39
+ """Convert directory name to env name: ComfyUI-UniRig -> _env_unirig"""
40
40
  name = dir_name.lower().replace("-", "_").lstrip("comfyui_")
41
41
  return f"_env_{name}"
42
42
 
comfy_env/pixi/core.py CHANGED
@@ -282,7 +282,7 @@ def ensure_pixi(
282
282
 
283
283
 
284
284
  def get_env_name(dir_name: str) -> str:
285
- """Convert directory name to env name: ComfyUI-UniRig _env_unirig"""
285
+ """Convert directory name to env name: ComfyUI-UniRig -> _env_unirig"""
286
286
  name = dir_name.lower().replace("-", "_").lstrip("comfyui_")
287
287
  return f"_env_{name}"
288
288
 
comfy_env/prestartup.py CHANGED
@@ -12,7 +12,7 @@ from typing import Optional, Dict
12
12
 
13
13
 
14
14
  def get_env_name(dir_name: str) -> str:
15
- """Convert directory name to env name: ComfyUI-UniRig _env_unirig"""
15
+ """Convert directory name to env name: ComfyUI-UniRig -> _env_unirig"""
16
16
  name = dir_name.lower().replace("-", "_").lstrip("comfyui_")
17
17
  return f"_env_{name}"
18
18
 
comfy_env/workers/mp.py CHANGED
@@ -29,6 +29,7 @@ from queue import Empty as QueueEmpty
29
29
  from typing import Any, Callable, Optional
30
30
 
31
31
  from .base import Worker, WorkerError
32
+ from .tensor_utils import prepare_for_ipc_recursive, keep_tensors_recursive
32
33
 
33
34
  logger = logging.getLogger("comfy_env")
34
35
 
@@ -40,8 +41,20 @@ _SHUTDOWN = object()
40
41
  _CALL_METHOD = "call_method"
41
42
 
42
43
 
44
+ def _can_use_cuda_ipc():
45
+ """
46
+ Check if CUDA IPC is available.
47
+
48
+ CUDA IPC works with native allocator but breaks with cudaMallocAsync.
49
+ If no backend is specified, CUDA IPC should work (PyTorch default is native).
50
+ """
51
+ import os
52
+ conf = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '')
53
+ return 'cudaMallocAsync' not in conf
54
+
55
+
43
56
  # ---------------------------------------------------------------------------
44
- # Tensor file transfer - avoids CUDA IPC issues with cudaMallocAsync
57
+ # Tensor file transfer - fallback for cudaMallocAsync (CUDA IPC doesn't work)
45
58
  # ---------------------------------------------------------------------------
46
59
 
47
60
  def _save_tensors_to_files(obj, file_registry=None):
@@ -252,20 +265,31 @@ def _worker_loop(queue_in, queue_out, sys_path_additions=None, lib_path=None, en
252
265
  # Handle method call protocol
253
266
  if isinstance(item, tuple) and len(item) == 6 and item[0] == _CALL_METHOD:
254
267
  _, module_name, class_name, method_name, self_state, kwargs = item
255
- # Load tensors from files (saved by host to avoid cudaMallocAsync IPC issues)
256
- kwargs = _load_tensors_from_files(kwargs)
268
+ # Load tensors from files if using file-based transfer
269
+ if not _can_use_cuda_ipc():
270
+ kwargs = _load_tensors_from_files(kwargs)
257
271
  result = _execute_method_call(
258
272
  module_name, class_name, method_name, self_state, kwargs
259
273
  )
260
- # Save tensors to files to avoid CUDA IPC issues with cudaMallocAsync
261
- result = _save_tensors_to_files(result)
274
+ # Handle result based on allocator
275
+ if _can_use_cuda_ipc():
276
+ keep_tensors_recursive(result)
277
+ else:
278
+ result = _save_tensors_to_files(result)
262
279
  queue_out.put(("ok", result))
263
280
  else:
264
281
  # Direct function call (legacy)
265
282
  func, args, kwargs = item
283
+ # Load tensors from files if using file-based transfer
284
+ if not _can_use_cuda_ipc():
285
+ args = tuple(_load_tensors_from_files(a) for a in args)
286
+ kwargs = _load_tensors_from_files(kwargs)
266
287
  result = func(*args, **kwargs)
267
- # Save tensors to files to avoid CUDA IPC issues with cudaMallocAsync
268
- result = _save_tensors_to_files(result)
288
+ # Handle result based on allocator
289
+ if _can_use_cuda_ipc():
290
+ keep_tensors_recursive(result)
291
+ else:
292
+ result = _save_tensors_to_files(result)
269
293
  queue_out.put(("ok", result))
270
294
 
271
295
  except Exception as e:
@@ -661,6 +685,16 @@ class MPWorker(Worker):
661
685
  """
662
686
  self._ensure_started()
663
687
 
688
+ # Handle tensors based on allocator
689
+ if _can_use_cuda_ipc():
690
+ # CUDA IPC - zero copy (works with native allocator)
691
+ kwargs = {k: prepare_for_ipc_recursive(v) for k, v in kwargs.items()}
692
+ args = tuple(prepare_for_ipc_recursive(a) for a in args)
693
+ else:
694
+ # File-based transfer (fallback for cudaMallocAsync)
695
+ kwargs = _save_tensors_to_files(kwargs)
696
+ args = tuple(_save_tensors_to_files(a) for a in args)
697
+
664
698
  # Send work item
665
699
  self._queue_in.put((func, args, kwargs))
666
700
 
@@ -699,8 +733,13 @@ class MPWorker(Worker):
699
733
  """
700
734
  self._ensure_started()
701
735
 
702
- # Save tensors to files to avoid CUDA IPC issues with cudaMallocAsync
703
- kwargs = _save_tensors_to_files(kwargs)
736
+ # Handle tensors based on allocator
737
+ if _can_use_cuda_ipc():
738
+ # CUDA IPC - zero copy (works with native allocator)
739
+ kwargs = prepare_for_ipc_recursive(kwargs)
740
+ else:
741
+ # File-based transfer (fallback for cudaMallocAsync)
742
+ kwargs = _save_tensors_to_files(kwargs)
704
743
 
705
744
  # Send method call request using protocol
706
745
  self._queue_in.put((
@@ -728,8 +767,9 @@ class MPWorker(Worker):
728
767
 
729
768
  # Handle response
730
769
  if status == "ok":
731
- # Load tensors from temp files
732
- result = _load_tensors_from_files(result)
770
+ # Load tensors from temp files if using file-based transfer
771
+ if not _can_use_cuda_ipc():
772
+ result = _load_tensors_from_files(result)
733
773
  return result
734
774
  elif status == "error":
735
775
  msg, tb = result
@@ -211,7 +211,7 @@ def _to_shm(obj, registry, visited=None):
211
211
 
212
212
  t = type(obj).__name__
213
213
 
214
- # numpy array direct shared memory
214
+ # numpy array -> direct shared memory
215
215
  if t == 'ndarray':
216
216
  arr = np.ascontiguousarray(obj)
217
217
  block = shm.SharedMemory(create=True, size=arr.nbytes)
@@ -221,14 +221,14 @@ def _to_shm(obj, registry, visited=None):
221
221
  visited[obj_id] = result
222
222
  return result
223
223
 
224
- # torch.Tensor convert to numpy shared memory (with marker to restore type)
224
+ # torch.Tensor -> convert to numpy -> shared memory (with marker to restore type)
225
225
  if t == 'Tensor':
226
226
  arr = obj.detach().cpu().numpy()
227
227
  result = _to_shm(arr, registry, visited)
228
228
  result["__was_tensor__"] = True
229
229
  return result
230
230
 
231
- # trimesh.Trimesh pickle shared memory (preserves visual, metadata, normals)
231
+ # trimesh.Trimesh -> pickle -> shared memory (preserves visual, metadata, normals)
232
232
  if t == 'Trimesh':
233
233
  import pickle
234
234
  mesh_bytes = pickle.dumps(obj)
@@ -245,7 +245,7 @@ def _to_shm(obj, registry, visited=None):
245
245
  visited[obj_id] = result
246
246
  return result
247
247
 
248
- # Path string
248
+ # Path -> string
249
249
  from pathlib import PurePath
250
250
  if isinstance(obj, PurePath):
251
251
  return str(obj)
@@ -530,7 +530,7 @@ def _to_shm(obj, registry, visited=None):
530
530
  result["__was_tensor__"] = True
531
531
  return result
532
532
 
533
- # trimesh.Trimesh pickle shared memory (preserves visual, metadata, normals)
533
+ # trimesh.Trimesh -> pickle -> shared memory (preserves visual, metadata, normals)
534
534
  if t == 'Trimesh':
535
535
  import pickle
536
536
  mesh_bytes = pickle.dumps(obj)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: comfy-env
3
- Version: 0.1.12
3
+ Version: 0.1.13
4
4
  Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
5
5
  Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
6
6
  Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
@@ -3,14 +3,14 @@ comfy_env/cli.py,sha256=ty4HYlzollCUCS0o6Sha6eczPAsW_gHRVgvck3IfA2w,12723
3
3
  comfy_env/errors.py,sha256=q-C3vyrPa_kk_Ao8l17mIGfJiG2IR0hCFV0GFcNLmcI,9924
4
4
  comfy_env/install.py,sha256=N7eBj8wB2DrGepVYk-Hks2mSf6UuGzj34pfVLNYJgQ4,10357
5
5
  comfy_env/nodes.py,sha256=tUbsUdjnJCUUoxM7NpsdUuawuIz1exfOmWdsLGILXiY,5391
6
- comfy_env/prestartup.py,sha256=aKuW07R0CmoeIXukENJZnGMPzCv7kU7azvHgIOt2usk,5813
6
+ comfy_env/prestartup.py,sha256=aNiTgkDeTdez7bsX0Ew3AoPoUKiTM3fmjjhCGWzZbLY,5812
7
7
  comfy_env/config/__init__.py,sha256=4Guylkb-FV8QxhFwschzpzbr2eu8y-KNgNT3_JOm9jc,403
8
8
  comfy_env/config/parser.py,sha256=dA1lX5ExBEfCqUJwe4V5i_jn2NJ69bMq3c3ji3lMSV8,4295
9
9
  comfy_env/config/types.py,sha256=Sb8HO34xsSZu5YAc2K4M7Gb3QNevJlngf12hHiwuU0w,2140
10
10
  comfy_env/isolation/__init__.py,sha256=vw9a4mpJ2CFjy-PLe_A3zQ6umBQklgqWNxwn9beNw3g,175
11
- comfy_env/isolation/wrap.py,sha256=cTRzbdHJncmg4yUAyH5n9TPBu88grv6D1XezZwXKpok,14638
11
+ comfy_env/isolation/wrap.py,sha256=A6kv4p4xBxx9-1yCNDKuKJi6esnT3hF2Y5wqLQZB_ig,14637
12
12
  comfy_env/pixi/__init__.py,sha256=BUrq7AQf3WDm0cHWh72B2xZbURNnDu2dCuELWiQCUiM,997
13
- comfy_env/pixi/core.py,sha256=fqozZZEoub8dAQaE6v6gyqw1mMiAYiCcdzH0gjCANBo,20637
13
+ comfy_env/pixi/core.py,sha256=ZpjAmwh7EE1jqBK1Q41GJEEXIsAtyqdNsr_ygoPQdUY,20636
14
14
  comfy_env/pixi/cuda_detection.py,sha256=sqB3LjvGNdV4eFqiARQGfyecBM3ZiUmeh6nG0YCRYQw,9751
15
15
  comfy_env/pixi/resolver.py,sha256=U_A8rBDxCj4gUlJt2YJQniP4cCKqxJEiVFgXOoH7vM8,6339
16
16
  comfy_env/pixi/platform/__init__.py,sha256=Nb5MPZIEeanSMEWwqU4p4bnEKTJn1tWcwobnhq9x9IY,614
@@ -22,11 +22,11 @@ comfy_env/templates/comfy-env-instructions.txt,sha256=ve1RAthW7ouumU9h6DM7mIRX1M
22
22
  comfy_env/templates/comfy-env.toml,sha256=ROIqi4BlPL1MEdL1VgebfTHpdwPNYGHwWeigI9Kw-1I,4831
23
23
  comfy_env/workers/__init__.py,sha256=TMVG55d2XLP1mJ3x1d16H0SBDJZtk2kMC5P4HLk9TrA,1073
24
24
  comfy_env/workers/base.py,sha256=4ZYTaQ4J0kBHCoO_OfZnsowm4rJCoqinZUaOtgkOPbw,2307
25
- comfy_env/workers/mp.py,sha256=vsxDGWepmSNgfqBhZPW7h8yOiKEyQcDFYP09masLTV4,32337
26
- comfy_env/workers/subprocess.py,sha256=bNwHpgz_EIIwZVRlgsx_ZEowYRrWbp-8uUIboacc-7M,57136
25
+ comfy_env/workers/mp.py,sha256=R0XWsiHv8gswxa_-iNHU14o_9Og0RFG0QnY9DRZzn2c,34060
26
+ comfy_env/workers/subprocess.py,sha256=B0PsHLuywPIJSRpuA4E8Dg6tGzuro6wqIr8czqMqtPE,57128
27
27
  comfy_env/workers/tensor_utils.py,sha256=TCuOAjJymrSbkgfyvcKtQ_KbVWTqSwP9VH_bCaFLLq8,6409
28
- comfy_env-0.1.12.dist-info/METADATA,sha256=J41pwCOTdj_2zckDDKjse-HfGWErDaDX3Hire4lKwgM,6971
29
- comfy_env-0.1.12.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
30
- comfy_env-0.1.12.dist-info/entry_points.txt,sha256=J4fXeqgxU_YenuW_Zxn_pEL7J-3R0--b6MS5t0QmAr0,49
31
- comfy_env-0.1.12.dist-info/licenses/LICENSE,sha256=E68QZMMpW4P2YKstTZ3QU54HRQO8ecew09XZ4_Vn870,1093
32
- comfy_env-0.1.12.dist-info/RECORD,,
28
+ comfy_env-0.1.13.dist-info/METADATA,sha256=xBivq9U8DeIzi-fwdAHZ1GjSyG-p724kEg42FqMdSlc,6971
29
+ comfy_env-0.1.13.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
30
+ comfy_env-0.1.13.dist-info/entry_points.txt,sha256=J4fXeqgxU_YenuW_Zxn_pEL7J-3R0--b6MS5t0QmAr0,49
31
+ comfy_env-0.1.13.dist-info/licenses/LICENSE,sha256=E68QZMMpW4P2YKstTZ3QU54HRQO8ecew09XZ4_Vn870,1093
32
+ comfy_env-0.1.13.dist-info/RECORD,,