comfy-env 0.0.5__tar.gz → 0.0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {comfy_env-0.0.5 → comfy_env-0.0.6}/PKG-INFO +1 -1
  2. {comfy_env-0.0.5 → comfy_env-0.0.6}/pyproject.toml +1 -1
  3. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/__init__.py +1 -1
  4. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/workers/venv.py +168 -132
  5. {comfy_env-0.0.5 → comfy_env-0.0.6}/.github/workflows/publish.yml +0 -0
  6. {comfy_env-0.0.5 → comfy_env-0.0.6}/.gitignore +0 -0
  7. {comfy_env-0.0.5 → comfy_env-0.0.6}/CLAUDE.md +0 -0
  8. {comfy_env-0.0.5 → comfy_env-0.0.6}/CRITICISM.md +0 -0
  9. {comfy_env-0.0.5 → comfy_env-0.0.6}/LICENSE +0 -0
  10. {comfy_env-0.0.5 → comfy_env-0.0.6}/README.md +0 -0
  11. {comfy_env-0.0.5 → comfy_env-0.0.6}/examples/basic_node/__init__.py +0 -0
  12. {comfy_env-0.0.5 → comfy_env-0.0.6}/examples/basic_node/comfy-env.toml +0 -0
  13. {comfy_env-0.0.5 → comfy_env-0.0.6}/examples/basic_node/nodes.py +0 -0
  14. {comfy_env-0.0.5 → comfy_env-0.0.6}/examples/basic_node/worker.py +0 -0
  15. {comfy_env-0.0.5 → comfy_env-0.0.6}/examples/decorator_node/__init__.py +0 -0
  16. {comfy_env-0.0.5 → comfy_env-0.0.6}/examples/decorator_node/nodes.py +0 -0
  17. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/cli.py +0 -0
  18. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/decorator.py +0 -0
  19. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/__init__.py +0 -0
  20. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/config.py +0 -0
  21. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/config_file.py +0 -0
  22. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/detection.py +0 -0
  23. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/manager.py +0 -0
  24. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/platform/__init__.py +0 -0
  25. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/platform/base.py +0 -0
  26. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/platform/darwin.py +0 -0
  27. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/platform/linux.py +0 -0
  28. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/platform/windows.py +0 -0
  29. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/env/security.py +0 -0
  30. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/errors.py +0 -0
  31. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/install.py +0 -0
  32. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/ipc/__init__.py +0 -0
  33. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/ipc/bridge.py +0 -0
  34. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/ipc/protocol.py +0 -0
  35. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/ipc/tensor.py +0 -0
  36. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/ipc/torch_bridge.py +0 -0
  37. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/ipc/transport.py +0 -0
  38. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/ipc/worker.py +0 -0
  39. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/registry.py +0 -0
  40. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/resolver.py +0 -0
  41. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/runner.py +0 -0
  42. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/stubs/__init__.py +0 -0
  43. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/stubs/folder_paths.py +0 -0
  44. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/workers/__init__.py +0 -0
  45. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/workers/base.py +0 -0
  46. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/workers/pool.py +0 -0
  47. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/workers/tensor_utils.py +0 -0
  48. {comfy_env-0.0.5 → comfy_env-0.0.6}/src/comfy_env/workers/torch_mp.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: comfy-env
3
- Version: 0.0.5
3
+ Version: 0.0.6
4
4
  Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
5
5
  Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
6
6
  Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "comfy-env"
3
- version = "0.0.5"
3
+ version = "0.0.6"
4
4
  description = "Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -40,7 +40,7 @@ This package provides:
40
40
  The @isolated decorator and WorkerBridge are still available.
41
41
  """
42
42
 
43
- __version__ = "0.0.5"
43
+ __version__ = "0.0.6"
44
44
 
45
45
  from .env.config import IsolatedEnv, EnvManagerConfig, LocalConfig, NodeReq
46
46
  from .env.config_file import (
@@ -412,56 +412,60 @@ class VenvWorker(Worker):
412
412
  return f"<VenvWorker name={self.name!r} python={self.python}>"
413
413
 
414
414
 
415
- def _queue_worker_entrypoint(to_host_queue, from_host_queue, shm_dir, sys_paths):
416
- """
417
- Worker process entrypoint using queue-based IPC.
418
-
419
- This function runs in the isolated venv subprocess and handles
420
- requests from the host process.
415
+ # Persistent worker script - runs as __main__ in the venv Python subprocess
416
+ # Uses stdin/stdout JSON for IPC - avoids Windows multiprocessing spawn issues entirely
417
+ _PERSISTENT_WORKER_SCRIPT = '''
418
+ import sys
419
+ import os
420
+ import json
421
+ import traceback
422
+ from types import SimpleNamespace
421
423
 
422
- Args:
423
- to_host_queue: Queue for sending messages to host (worker -> host)
424
- from_host_queue: Queue for receiving messages from host (host -> worker)
425
- shm_dir: Shared memory directory for tensor files
426
- sys_paths: List of paths to add to sys.path
427
- """
428
- import sys
429
- import traceback
430
- from pathlib import Path
431
- from types import SimpleNamespace
424
+ def _deserialize_isolated_objects(obj):
425
+ """Reconstruct objects serialized with __isolated_object__ marker."""
426
+ if isinstance(obj, dict):
427
+ if obj.get("__isolated_object__"):
428
+ attrs = {k: _deserialize_isolated_objects(v) for k, v in obj.get("__attrs__", {}).items()}
429
+ ns = SimpleNamespace(**attrs)
430
+ ns.__class_name__ = obj.get("__class_name__", "Unknown")
431
+ return ns
432
+ return {k: _deserialize_isolated_objects(v) for k, v in obj.items()}
433
+ elif isinstance(obj, list):
434
+ return [_deserialize_isolated_objects(v) for v in obj]
435
+ elif isinstance(obj, tuple):
436
+ return tuple(_deserialize_isolated_objects(v) for v in obj)
437
+ return obj
432
438
 
433
- # Setup paths
434
- for p in sys_paths:
439
+ def main():
440
+ # Read config from first line
441
+ config_line = sys.stdin.readline()
442
+ if not config_line:
443
+ return
444
+ config = json.loads(config_line)
445
+
446
+ # Setup sys.path
447
+ for p in config.get("sys_paths", []):
435
448
  if p not in sys.path:
436
449
  sys.path.insert(0, p)
437
450
 
438
- def _deserialize_isolated_objects(obj):
439
- """Reconstruct objects serialized with __isolated_object__ marker."""
440
- if isinstance(obj, dict):
441
- if obj.get("__isolated_object__"):
442
- attrs = {k: _deserialize_isolated_objects(v) for k, v in obj.get("__attrs__", {}).items()}
443
- ns = SimpleNamespace(**attrs)
444
- ns.__class_name__ = obj.get("__class_name__", "Unknown")
445
- return ns
446
- return {k: _deserialize_isolated_objects(v) for k, v in obj.items()}
447
- elif isinstance(obj, list):
448
- return [_deserialize_isolated_objects(v) for v in obj]
449
- elif isinstance(obj, tuple):
450
- return tuple(_deserialize_isolated_objects(v) for v in obj)
451
- return obj
451
+ # Import torch after path setup
452
+ import torch
452
453
 
453
454
  # Signal ready
454
- to_host_queue.put({"status": "ready"})
455
-
456
- import torch
455
+ sys.stdout.write(json.dumps({"status": "ready"}) + "\\n")
456
+ sys.stdout.flush()
457
457
 
458
+ # Process requests
458
459
  while True:
459
460
  try:
460
- request = from_host_queue.get()
461
+ line = sys.stdin.readline()
462
+ if not line:
463
+ break
464
+ request = json.loads(line)
461
465
  except Exception:
462
466
  break
463
467
 
464
- if request is None or request.get("method") == "shutdown":
468
+ if request.get("method") == "shutdown":
465
469
  break
466
470
 
467
471
  try:
@@ -481,7 +485,6 @@ def _queue_worker_entrypoint(to_host_queue, from_host_queue, shm_dir, sys_paths)
481
485
  module = __import__(module_name, fromlist=[""])
482
486
 
483
487
  if request_type == "call_method":
484
- # Call a method on a class instance
485
488
  class_name = request["class_name"]
486
489
  method_name = request["method_name"]
487
490
  self_state = request.get("self_state")
@@ -493,7 +496,6 @@ def _queue_worker_entrypoint(to_host_queue, from_host_queue, shm_dir, sys_paths)
493
496
  method = getattr(instance, method_name)
494
497
  result = method(**inputs)
495
498
  else:
496
- # Call a module-level function
497
499
  func_name = request["func"]
498
500
  func = getattr(module, func_name)
499
501
  result = func(**inputs)
@@ -502,25 +504,37 @@ def _queue_worker_entrypoint(to_host_queue, from_host_queue, shm_dir, sys_paths)
502
504
  if outputs_path:
503
505
  torch.save(result, outputs_path)
504
506
 
505
- to_host_queue.put({"status": "ok"})
507
+ sys.stdout.write(json.dumps({"status": "ok"}) + "\\n")
508
+ sys.stdout.flush()
506
509
 
507
510
  except Exception as e:
508
- to_host_queue.put({
511
+ sys.stdout.write(json.dumps({
509
512
  "status": "error",
510
513
  "error": str(e),
511
514
  "traceback": traceback.format_exc(),
512
- })
515
+ }) + "\\n")
516
+ sys.stdout.flush()
517
+
518
+ if __name__ == "__main__":
519
+ main()
520
+ '''
513
521
 
514
522
 
515
523
  class PersistentVenvWorker(Worker):
516
524
  """
517
525
  Persistent version of VenvWorker that keeps subprocess alive.
518
526
 
519
- This reduces per-call overhead by ~200-400ms by avoiding subprocess spawn.
520
- Uses multiprocessing.Queue for cross-platform communication with zero-copy
521
- tensor transfer support.
527
+ Uses subprocess.Popen with stdin/stdout JSON IPC instead of multiprocessing.
528
+ This avoids Windows multiprocessing spawn issues where the child process
529
+ tries to reimport __main__ (which fails when using a different Python).
530
+
531
+ Benefits:
532
+ - Works on Windows with different venv Python (full isolation)
533
+ - Compiled CUDA extensions load correctly in the venv
534
+ - ~50-100ms per call (vs ~300-500ms for VenvWorker per-call spawn)
535
+ - Tensor transfer via shared memory files
522
536
 
523
- Use this for high-frequency calls to the same venv.
537
+ Use this for high-frequency calls to isolated venvs.
524
538
  """
525
539
 
526
540
  def __init__(
@@ -530,7 +544,7 @@ class PersistentVenvWorker(Worker):
530
544
  sys_path: Optional[List[str]] = None,
531
545
  env: Optional[Dict[str, str]] = None,
532
546
  name: Optional[str] = None,
533
- share_torch: bool = True,
547
+ share_torch: bool = True, # Kept for API compatibility
534
548
  ):
535
549
  """
536
550
  Initialize persistent worker.
@@ -541,27 +555,26 @@ class PersistentVenvWorker(Worker):
541
555
  sys_path: Additional paths to add to sys.path.
542
556
  env: Additional environment variables.
543
557
  name: Optional name for logging.
544
- share_torch: Use torch.multiprocessing for zero-copy tensor transfer.
558
+ share_torch: Ignored (kept for API compatibility).
545
559
  """
546
560
  self.python = Path(python)
547
561
  self.working_dir = Path(working_dir) if working_dir else Path.cwd()
548
562
  self.sys_path = sys_path or []
549
563
  self.extra_env = env or {}
550
564
  self.name = name or f"PersistentVenvWorker({self.python.parent.parent.name})"
551
- self.share_torch = share_torch
552
565
 
553
566
  if not self.python.exists():
554
567
  raise FileNotFoundError(f"Python not found: {self.python}")
555
568
 
556
569
  self._temp_dir = Path(tempfile.mkdtemp(prefix='comfyui_pvenv_'))
557
570
  self._shm_dir = _get_shm_dir()
558
- self._process = None # Will be multiprocessing.Process
559
- self._to_worker = None # Queue: host -> worker
560
- self._from_worker = None # Queue: worker -> host
561
- self._transport = None
571
+ self._process: Optional[subprocess.Popen] = None
562
572
  self._shutdown = False
563
573
  self._lock = threading.Lock()
564
- self._mp = None # multiprocessing module reference
574
+
575
+ # Write worker script to temp file
576
+ self._worker_script = self._temp_dir / "persistent_worker.py"
577
+ self._worker_script.write_text(_PERSISTENT_WORKER_SCRIPT)
565
578
 
566
579
  def _find_comfyui_base(self) -> Optional[Path]:
567
580
  """Find ComfyUI base directory by walking up from working_dir."""
@@ -573,20 +586,13 @@ class PersistentVenvWorker(Worker):
573
586
  return None
574
587
 
575
588
  def _ensure_started(self):
576
- """Start persistent worker process if not running."""
589
+ """Start persistent worker subprocess if not running."""
577
590
  if self._shutdown:
578
591
  raise RuntimeError(f"{self.name}: Worker has been shut down")
579
592
 
580
- if self._process is not None and self._process.is_alive():
593
+ if self._process is not None and self._process.poll() is None:
581
594
  return # Already running
582
595
 
583
- from ..ipc.transport import create_queue_pair, QueueTransport
584
-
585
- # Create queue pair for bidirectional communication
586
- self._to_worker, self._from_worker, self._mp = create_queue_pair(
587
- share_torch=self.share_torch
588
- )
589
-
590
596
  # Set up environment
591
597
  env = os.environ.copy()
592
598
  env.update(self.extra_env)
@@ -601,35 +607,57 @@ class PersistentVenvWorker(Worker):
601
607
  stubs_dir = Path(__file__).parent.parent / "stubs"
602
608
  all_sys_path = [str(stubs_dir), str(self.working_dir)] + self.sys_path
603
609
 
604
- # On Windows, set VIRTUAL_ENV for proper venv activation
605
- if os.name == "nt":
606
- env["VIRTUAL_ENV"] = str(self.python.parent.parent)
607
-
608
- # Set the Python executable for multiprocessing
609
- self._mp.set_executable(str(self.python))
610
-
611
- # Launch worker process
612
- self._process = self._mp.Process(
613
- target=_queue_worker_entrypoint,
614
- args=(
615
- self._to_worker,
616
- self._from_worker,
617
- str(self._shm_dir),
618
- all_sys_path,
619
- ),
610
+ # Launch subprocess with the venv Python
611
+ # This runs _PERSISTENT_WORKER_SCRIPT as __main__ - no reimport issues!
612
+ self._process = subprocess.Popen(
613
+ [str(self.python), str(self._worker_script)],
614
+ stdin=subprocess.PIPE,
615
+ stdout=subprocess.PIPE,
616
+ stderr=subprocess.PIPE,
617
+ cwd=str(self.working_dir),
618
+ env=env,
619
+ bufsize=1, # Line buffered
620
+ text=True, # Text mode for JSON
620
621
  )
621
- self._process.start()
622
622
 
623
- # Create transport wrapper
624
- self._transport = QueueTransport(self._to_worker, self._from_worker)
623
+ # Send config
624
+ config = {"sys_paths": all_sys_path}
625
+ self._process.stdin.write(json.dumps(config) + "\n")
626
+ self._process.stdin.flush()
627
+
628
+ # Wait for ready signal with timeout
629
+ import select
630
+ if sys.platform == "win32":
631
+ # Windows: can't use select on pipes, use thread with timeout
632
+ ready_line = [None]
633
+ def read_ready():
634
+ try:
635
+ ready_line[0] = self._process.stdout.readline()
636
+ except:
637
+ pass
638
+ t = threading.Thread(target=read_ready, daemon=True)
639
+ t.start()
640
+ t.join(timeout=60)
641
+ line = ready_line[0]
642
+ else:
643
+ # Unix: use select for timeout
644
+ import select
645
+ ready, _, _ = select.select([self._process.stdout], [], [], 60)
646
+ line = self._process.stdout.readline() if ready else None
647
+
648
+ if not line:
649
+ stderr = ""
650
+ try:
651
+ self._process.kill()
652
+ _, stderr = self._process.communicate(timeout=5)
653
+ except:
654
+ pass
655
+ raise RuntimeError(f"{self.name}: Worker failed to start (timeout). stderr: {stderr}")
625
656
 
626
- # Wait for ready signal
627
657
  try:
628
- msg = self._transport.recv(timeout=30)
629
- except Exception:
630
- if self._process.is_alive():
631
- self._process.terminate()
632
- raise RuntimeError(f"{self.name}: Worker failed to start (timeout)")
658
+ msg = json.loads(line)
659
+ except json.JSONDecodeError as e:
660
+ raise RuntimeError(f"{self.name}: Invalid ready message: {line!r}") from e
633
661
 
634
662
  if msg.get("status") != "ready":
635
663
  raise RuntimeError(f"{self.name}: Unexpected ready message: {msg}")
@@ -646,6 +674,45 @@ class PersistentVenvWorker(Worker):
646
674
  f"{self.name}: Use call_module(module='...', func='...') instead."
647
675
  )
648
676
 
677
+ def _send_request(self, request: dict, timeout: float) -> dict:
678
+ """Send request via stdin and read response from stdout with timeout."""
679
+ # Send request
680
+ self._process.stdin.write(json.dumps(request) + "\n")
681
+ self._process.stdin.flush()
682
+
683
+ # Read response with timeout
684
+ if sys.platform == "win32":
685
+ # Windows: use thread for timeout
686
+ response_line = [None]
687
+ def read_response():
688
+ try:
689
+ response_line[0] = self._process.stdout.readline()
690
+ except:
691
+ pass
692
+ t = threading.Thread(target=read_response, daemon=True)
693
+ t.start()
694
+ t.join(timeout=timeout)
695
+ line = response_line[0]
696
+ else:
697
+ # Unix: use select
698
+ import select
699
+ ready, _, _ = select.select([self._process.stdout], [], [], timeout)
700
+ line = self._process.stdout.readline() if ready else None
701
+
702
+ if not line:
703
+ # Timeout - kill process
704
+ try:
705
+ self._process.kill()
706
+ except:
707
+ pass
708
+ self._shutdown = True
709
+ raise TimeoutError(f"{self.name}: Call timed out after {timeout}s")
710
+
711
+ try:
712
+ return json.loads(line)
713
+ except json.JSONDecodeError as e:
714
+ raise WorkerError(f"Invalid response from worker: {line!r}") from e
715
+
649
716
  def call_method(
650
717
  self,
651
718
  module_name: str,
@@ -669,8 +736,6 @@ class PersistentVenvWorker(Worker):
669
736
  Returns:
670
737
  Return value of the method.
671
738
  """
672
- import queue as queue_module
673
-
674
739
  with self._lock:
675
740
  self._ensure_started()
676
741
 
@@ -697,15 +762,7 @@ class PersistentVenvWorker(Worker):
697
762
  "inputs_path": str(inputs_path) if kwargs else None,
698
763
  "outputs_path": str(outputs_path),
699
764
  }
700
- self._transport.send(request)
701
-
702
- # Wait for response with timeout
703
- try:
704
- response = self._transport.recv(timeout=timeout)
705
- except queue_module.Empty:
706
- self._process.terminate()
707
- self._shutdown = True
708
- raise TimeoutError(f"{self.name}: Call timed out")
765
+ response = self._send_request(request, timeout)
709
766
 
710
767
  if response.get("status") == "error":
711
768
  raise WorkerError(
@@ -732,8 +789,6 @@ class PersistentVenvWorker(Worker):
732
789
  **kwargs
733
790
  ) -> Any:
734
791
  """Call a function by module path."""
735
- import queue as queue_module
736
-
737
792
  with self._lock:
738
793
  self._ensure_started()
739
794
 
@@ -758,15 +813,7 @@ class PersistentVenvWorker(Worker):
758
813
  "inputs_path": str(inputs_path) if kwargs else None,
759
814
  "outputs_path": str(outputs_path),
760
815
  }
761
- self._transport.send(request)
762
-
763
- # Wait for response with timeout
764
- try:
765
- response = self._transport.recv(timeout=timeout)
766
- except queue_module.Empty:
767
- self._process.terminate()
768
- self._shutdown = True
769
- raise TimeoutError(f"{self.name}: Call timed out")
816
+ response = self._send_request(request, timeout)
770
817
 
771
818
  if response.get("status") == "error":
772
819
  raise WorkerError(
@@ -792,32 +839,21 @@ class PersistentVenvWorker(Worker):
792
839
  return
793
840
  self._shutdown = True
794
841
 
795
- # Send shutdown signal via queue
796
- if self._transport:
842
+ # Send shutdown signal via stdin
843
+ if self._process and self._process.poll() is None:
797
844
  try:
798
- self._transport.send({"method": "shutdown"})
845
+ self._process.stdin.write(json.dumps({"method": "shutdown"}) + "\n")
846
+ self._process.stdin.flush()
847
+ self._process.stdin.close()
799
848
  except:
800
849
  pass
801
- self._transport.close()
802
-
803
- # Wait for process to exit
804
- if self._process:
805
- self._process.join(timeout=5)
806
- if self._process.is_alive():
807
- self._process.terminate()
808
- self._process.join(timeout=2)
809
850
 
810
- # Clean up queues
811
- if self._to_worker:
812
- try:
813
- self._to_worker.close()
814
- except:
815
- pass
816
- if self._from_worker:
851
+ # Wait for process to exit
817
852
  try:
818
- self._from_worker.close()
819
- except:
820
- pass
853
+ self._process.wait(timeout=5)
854
+ except subprocess.TimeoutExpired:
855
+ self._process.kill()
856
+ self._process.wait(timeout=2)
821
857
 
822
858
  shutil.rmtree(self._temp_dir, ignore_errors=True)
823
859
 
@@ -826,7 +862,7 @@ class PersistentVenvWorker(Worker):
826
862
  return False
827
863
  if self._process is None:
828
864
  return False
829
- return self._process.is_alive()
865
+ return self._process.poll() is None
830
866
 
831
867
  def __repr__(self):
832
868
  status = "alive" if self.is_alive() else "stopped"
File without changes
File without changes
File without changes
File without changes
File without changes