sciv 0.0.104__tar.gz → 0.0.106__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {sciv-0.0.104 → sciv-0.0.106}/PKG-INFO +1 -1
  2. {sciv-0.0.104 → sciv-0.0.106}/pyproject.toml +1 -1
  3. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/preprocessing/_scvi_.py +55 -38
  4. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/util/_core_.py +2 -2
  5. {sciv-0.0.104 → sciv-0.0.106}/.gitignore +0 -0
  6. {sciv-0.0.104 → sciv-0.0.106}/LICENSE +0 -0
  7. {sciv-0.0.104 → sciv-0.0.106}/MANIFEST.in +0 -0
  8. {sciv-0.0.104 → sciv-0.0.106}/README.en.md +0 -0
  9. {sciv-0.0.104 → sciv-0.0.106}/README.md +0 -0
  10. {sciv-0.0.104 → sciv-0.0.106}/requirements.txt +0 -0
  11. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/__init__.py +0 -0
  12. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/file/__init__.py +0 -0
  13. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/file/_read_.py +0 -0
  14. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/file/_write_.py +0 -0
  15. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/model/__init__.py +0 -0
  16. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/model/_core_.py +0 -0
  17. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/__init__.py +0 -0
  18. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_bar_.py +0 -0
  19. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_barcode_.py +0 -0
  20. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_box_.py +0 -0
  21. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_bubble_.py +0 -0
  22. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_core_.py +0 -0
  23. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_graph_.py +0 -0
  24. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_heat_map_.py +0 -0
  25. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_kde_.py +0 -0
  26. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_line_.py +0 -0
  27. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_pie_.py +0 -0
  28. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_radar_.py +0 -0
  29. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_scatter_.py +0 -0
  30. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_venn_.py +0 -0
  31. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/plot/_violin_.py +0 -0
  32. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/preprocessing/__init__.py +0 -0
  33. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/preprocessing/_anndata_.py +0 -0
  34. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/preprocessing/_gencode_.py +0 -0
  35. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/preprocessing/_gsea_.py +0 -0
  36. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/preprocessing/_scanpy_.py +0 -0
  37. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/preprocessing/_snapatac_.py +0 -0
  38. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/tool/__init__.py +0 -0
  39. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/tool/_algorithm_.py +0 -0
  40. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/tool/_matrix_.py +0 -0
  41. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/tool/_random_walk_.py +0 -0
  42. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/util/__init__.py +0 -0
  43. {sciv-0.0.104 → sciv-0.0.106}/src/sciv/util/_constant_.py +0 -0
  44. {sciv-0.0.104 → sciv-0.0.106}/tests/scivTest/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sciv
3
- Version: 0.0.104
3
+ Version: 0.0.106
4
4
  Summary: Unveiling the pivotal cell types involved in variant function regulation at a single-cell resolution
5
5
  Project-URL: github, https://github.com/YuZhengM/sciv
6
6
  Author-email: Zheng-Min Yu <yuzmbio@163.com>
@@ -3,7 +3,7 @@ requires = ["hatchling"]
3
3
  build-backend = "hatchling.build"
4
4
  [project]
5
5
  name = "sciv"
6
- version = "0.0.104"
6
+ version = "0.0.106"
7
7
  authors = [
8
8
  { name = "Zheng-Min Yu", email = "yuzmbio@163.com" },
9
9
  ]
@@ -13,7 +13,7 @@ from torch.cuda import OutOfMemoryError
13
13
 
14
14
  from .. import util as ul
15
15
  from ..tool import umap, tsne
16
- from ..util import path
16
+ from ..util import path, check_gpu_availability
17
17
 
18
18
  __name__: str = "preprocessing_scvi"
19
19
 
@@ -77,53 +77,70 @@ def poisson_vi(
77
77
  scvi.external.POISSONVI.setup_anndata(adata, layer="fragments", batch_key=batch_key)
78
78
  _model_ = scvi.external.POISSONVI(adata)
79
79
 
80
- try:
81
- data_splitter_kwargs = {"drop_dataset_tail": True, "drop_last": False}
82
- with warnings.catch_warnings():
83
- warnings.simplefilter("ignore")
84
- _model_.train(
85
- max_epochs=int(max_epochs),
86
- check_val_every_n_epoch=1,
87
- accelerator="gpu",
88
- devices=-1,
89
- datasplitter_kwargs=data_splitter_kwargs,
90
- strategy="ddp_notebook_find_unused_parameters_true",
91
- lr=lr,
92
- batch_size=int(batch_size),
93
- eps=eps,
94
- early_stopping=early_stopping,
95
- early_stopping_patience=int(early_stopping_patience)
96
- )
97
- except Exception as ex:
80
+ if check_gpu_availability():
98
81
 
99
82
  try:
100
- ul.log(__name__).warning(f"Multiple GPU failed to run, attempting to run on one card.\n {ex}")
101
- with warnings.catch_warnings():
102
- warnings.simplefilter("ignore")
103
- _model_.train(
104
- max_epochs=int(max_epochs),
105
- check_val_every_n_epoch=1,
106
- lr=lr,
107
- batch_size=int(batch_size),
108
- eps=eps,
109
- early_stopping=early_stopping,
110
- early_stopping_patience=int(early_stopping_patience)
111
- )
112
- except Exception as exc:
113
- ul.log(__name__).warning(f"GPU failed to run, try to switch to CPU running.\n {exc}")
83
+ data_splitter_kwargs = {"drop_dataset_tail": True, "drop_last": False}
114
84
  with warnings.catch_warnings():
115
85
  warnings.simplefilter("ignore")
116
- _model_.to_device('cpu')
117
86
  _model_.train(
118
- max_epochs=int(max_epochs),
87
+ max_epochs=max_epochs,
119
88
  check_val_every_n_epoch=1,
89
+ accelerator="gpu",
90
+ devices=-1,
91
+ datasplitter_kwargs=data_splitter_kwargs,
92
+ strategy="ddp_notebook_find_unused_parameters_true",
120
93
  lr=lr,
121
- batch_size=int(batch_size),
94
+ batch_size=batch_size,
122
95
  eps=eps,
123
96
  early_stopping=early_stopping,
124
- early_stopping_patience=int(early_stopping_patience),
125
- accelerator="cpu"
97
+ early_stopping_patience=early_stopping_patience
126
98
  )
99
+ except Exception as ex:
100
+
101
+ try:
102
+ ul.log(__name__).warning(f"Multiple GPU failed to run, attempting to run on one card.\n {ex}")
103
+ with warnings.catch_warnings():
104
+ warnings.simplefilter("ignore")
105
+ _model_.train(
106
+ max_epochs=max_epochs,
107
+ check_val_every_n_epoch=1,
108
+ lr=lr,
109
+ batch_size=batch_size,
110
+ eps=eps,
111
+ early_stopping=early_stopping,
112
+ early_stopping_patience=early_stopping_patience
113
+ )
114
+ except Exception as exc:
115
+ ul.log(__name__).warning(f"GPU failed to run, try to switch to CPU running.\n {exc}")
116
+ with warnings.catch_warnings():
117
+ warnings.simplefilter("ignore")
118
+ _model_.to_device('cpu')
119
+ _model_.train(
120
+ max_epochs=max_epochs,
121
+ check_val_every_n_epoch=1,
122
+ lr=lr,
123
+ batch_size=batch_size,
124
+ eps=eps,
125
+ early_stopping=early_stopping,
126
+ early_stopping_patience=early_stopping_patience,
127
+ accelerator="cpu"
128
+ )
129
+ else:
130
+
131
+ with warnings.catch_warnings():
132
+ warnings.simplefilter("ignore")
133
+ _model_.to_device('cpu')
134
+ _model_.train(
135
+ max_epochs=max_epochs,
136
+ check_val_every_n_epoch=1,
137
+ lr=lr,
138
+ batch_size=batch_size,
139
+ eps=eps,
140
+ early_stopping=early_stopping,
141
+ early_stopping_patience=early_stopping_patience,
142
+ accelerator="cpu"
143
+ )
127
144
 
128
145
  return _model_
129
146
 
@@ -59,7 +59,7 @@ def track_with_memory(interval: float = 60) -> Callable:
59
59
 
60
60
  def decorator(func) -> Callable:
61
61
  @wraps(func)
62
- def wrapper(*args, **kwargs) -> Union[Any, dict]:
62
+ def wrapper(*args, **kwargs) -> dict:
63
63
 
64
64
  process = psutil.Process(os.getpid())
65
65
 
@@ -466,7 +466,7 @@ def add_cluster_info(data: DataFrame, data_ref: DataFrame, cluster: str) -> Data
466
466
  return new_data
467
467
 
468
468
 
469
- def check_gpu_availability(verbose: bool = True) -> bool:
469
+ def check_gpu_availability(verbose: bool = False) -> bool:
470
470
  available = torch.cuda.is_available()
471
471
 
472
472
  if verbose:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes