sciv 0.0.100__tar.gz → 0.0.102__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {sciv-0.0.100 → sciv-0.0.102}/PKG-INFO +2 -1
  2. {sciv-0.0.100 → sciv-0.0.102}/pyproject.toml +3 -2
  3. {sciv-0.0.100 → sciv-0.0.102}/requirements.txt +2 -1
  4. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/model/_core_.py +4 -4
  5. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_bar_.py +21 -4
  6. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_box_.py +12 -3
  7. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_core_.py +7 -3
  8. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_graph_.py +2 -0
  9. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_kde_.py +5 -5
  10. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_line_.py +1 -1
  11. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_pie_.py +2 -1
  12. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_violin_.py +11 -1
  13. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/preprocessing/_scanpy_.py +3 -2
  14. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/preprocessing/_scvi_.py +2 -2
  15. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/tool/_algorithm_.py +19 -14
  16. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/tool/_random_walk_.py +2 -2
  17. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/util/__init__.py +2 -0
  18. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/util/_core_.py +70 -1
  19. {sciv-0.0.100 → sciv-0.0.102}/.gitignore +0 -0
  20. {sciv-0.0.100 → sciv-0.0.102}/LICENSE +0 -0
  21. {sciv-0.0.100 → sciv-0.0.102}/MANIFEST.in +0 -0
  22. {sciv-0.0.100 → sciv-0.0.102}/README.en.md +0 -0
  23. {sciv-0.0.100 → sciv-0.0.102}/README.md +0 -0
  24. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/__init__.py +0 -0
  25. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/file/__init__.py +0 -0
  26. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/file/_read_.py +0 -0
  27. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/file/_write_.py +0 -0
  28. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/model/__init__.py +0 -0
  29. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/__init__.py +0 -0
  30. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_barcode_.py +0 -0
  31. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_bubble_.py +0 -0
  32. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_heat_map_.py +0 -0
  33. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_radar_.py +0 -0
  34. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_scatter_.py +0 -0
  35. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/plot/_venn_.py +0 -0
  36. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/preprocessing/__init__.py +0 -0
  37. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/preprocessing/_anndata_.py +0 -0
  38. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/preprocessing/_gencode_.py +0 -0
  39. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/preprocessing/_gsea_.py +0 -0
  40. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/preprocessing/_snapatac_.py +0 -0
  41. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/tool/__init__.py +0 -0
  42. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/tool/_matrix_.py +0 -0
  43. {sciv-0.0.100 → sciv-0.0.102}/src/sciv/util/_constant_.py +0 -0
  44. {sciv-0.0.100 → sciv-0.0.102}/tests/scivTest/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sciv
3
- Version: 0.0.100
3
+ Version: 0.0.102
4
4
  Summary: Unveiling the pivotal cell types involved in variant function regulation at a single-cell resolution
5
5
  Project-URL: github, https://github.com/YuZhengM/sciv
6
6
  Author-email: Zheng-Min Yu <yuzmbio@163.com>
@@ -48,6 +48,7 @@ Requires-Dist: numba==0.60.0
48
48
  Requires-Dist: numpy==1.26.4
49
49
  Requires-Dist: palettable==3.3.3
50
50
  Requires-Dist: pandas==1.5.3
51
+ Requires-Dist: psutil==7.2.1
51
52
  Requires-Dist: pycomplexheatmap==1.6.1
52
53
  Requires-Dist: pynndescent==0.5.13
53
54
  Requires-Dist: scanpy==1.11.5
@@ -3,7 +3,7 @@ requires = ["hatchling"]
3
3
  build-backend = "hatchling.build"
4
4
  [project]
5
5
  name = "sciv"
6
- version = "0.0.100"
6
+ version = "0.0.102"
7
7
  authors = [
8
8
  { name = "Zheng-Min Yu", email = "yuzmbio@163.com" },
9
9
  ]
@@ -48,7 +48,8 @@ dependencies = [
48
48
  "louvain==0.8.2",
49
49
  "numba==0.60.0",
50
50
  "pynndescent==0.5.13",
51
- "fa2-modified==0.4"
51
+ "fa2-modified==0.4",
52
+ "psutil==7.2.1"
52
53
  ]
53
54
  classifiers = [
54
55
  "Programming Language :: Python :: 3",
@@ -28,4 +28,5 @@ louvain==0.8.2
28
28
  snapatac2==2.7.1
29
29
  numba==0.60.0
30
30
  pynndescent==0.5.13
31
- fa2-modified==0.4
31
+ fa2-modified==0.4
32
+ psutil==7.2.1
@@ -21,7 +21,7 @@ __name__: str = "model_core"
21
21
 
22
22
 
23
23
  def _run_random_walk_(random_walk: RandomWalk, is_ablation: bool, is_simple: bool) -> AnnData:
24
- start_time = time.time()
24
+ start_time = time.perf_counter()
25
25
 
26
26
  if not random_walk.is_run_core:
27
27
  random_walk.run_core()
@@ -57,7 +57,7 @@ def _run_random_walk_(random_walk: RandomWalk, is_ablation: bool, is_simple: boo
57
57
  if not random_walk.is_run_en_ablation_m_knn:
58
58
  random_walk.run_en_ablation_m_knn()
59
59
 
60
- random_walk.elapsed_time += time.time() - start_time
60
+ random_walk.elapsed_time += time.perf_counter() - start_time
61
61
 
62
62
  return random_walk.trs_adata
63
63
 
@@ -181,7 +181,7 @@ def core(
181
181
  """
182
182
 
183
183
  # start time
184
- start_time = time.time()
184
+ start_time = time.perf_counter()
185
185
 
186
186
  if len(variants.keys()) == 0:
187
187
  ul.log(__name__).error("The number of mutations is empty.")
@@ -591,7 +591,7 @@ def core(
591
591
  random_walk_time = random_walk.elapsed_time
592
592
 
593
593
  # end time
594
- elapsed_time = time.time() - start_time
594
+ elapsed_time = time.perf_counter() - start_time
595
595
  step_time = poisson_vi_time + overlap_time + init_score_time + smknn_time + random_walk_time
596
596
 
597
597
  if elapsed_time < step_time:
@@ -76,6 +76,7 @@ def two_bar(
76
76
  bottom: float = 0,
77
77
  rotation: float = 65,
78
78
  text_left_move: float = 0.15,
79
+ y_limit: Tuple[float, float] = (0, 1),
79
80
  title: str = None,
80
81
  output: path = None,
81
82
  show: bool = True,
@@ -91,6 +92,9 @@ def two_bar(
91
92
 
92
93
  ax.legend()
93
94
 
95
+ ax.set_ylim(y_limit)
96
+
97
+ ax.set_xticks(range(len(ax_x)))
94
98
  ax.set_xticklabels(labels=list(ax_x), rotation=rotation)
95
99
 
96
100
  # Draw numerical values
@@ -103,6 +107,13 @@ def two_bar(
103
107
  color=text_color
104
108
  )
105
109
 
110
+ for spine in ["top", "left", "right", "bottom"]:
111
+ ax.spines[spine].set_linewidth(1)
112
+
113
+ ax.spines['bottom'].set_linewidth(1)
114
+ ax.grid(axis='y', ls='--', c='gray')
115
+ ax.set_axisbelow(True)
116
+
106
117
  plot_end(fig, title, x_name, y_name, output, show, close)
107
118
 
108
119
 
@@ -123,6 +134,7 @@ def class_bar(
123
134
  rotation: float = 65,
124
135
  title: str = None,
125
136
  text_left_move: float = 0.15,
137
+ y_limit: Tuple[float, float] = (0, 1),
126
138
  output: path = None,
127
139
  show: bool = True,
128
140
  close: bool = False,
@@ -159,6 +171,7 @@ def class_bar(
159
171
  bottom=bottom,
160
172
  rotation=rotation,
161
173
  text_left_move=text_left_move,
174
+ y_limit=y_limit,
162
175
  title=title,
163
176
  output=output,
164
177
  show=show,
@@ -185,6 +198,7 @@ def bar_trait(
185
198
  rotation: float = 65,
186
199
  title: str = None,
187
200
  text_left_move: float = 0.15,
201
+ y_limit: Tuple[float, float] = (0, 1),
188
202
  output: path = None,
189
203
  show: bool = True,
190
204
  close: bool = False,
@@ -216,6 +230,7 @@ def bar_trait(
216
230
  bottom=bottom,
217
231
  rotation=rotation,
218
232
  text_left_move=text_left_move,
233
+ y_limit=y_limit,
219
234
  text_color=text_color,
220
235
  output=os.path.join(output, f"cell_{trait_}_enrichment_bar.pdf") if output is not None else None,
221
236
  show=show,
@@ -224,6 +239,7 @@ def bar_trait(
224
239
  )
225
240
 
226
241
  trait_list = list(set(trait_df[trait_column_name]))
242
+
227
243
  # judge trait
228
244
  if trait_name != "All" and trait_name not in trait_list:
229
245
  ul.log(__name__).error(
@@ -237,8 +253,10 @@ def bar_trait(
237
253
 
238
254
  # plot
239
255
  if trait_name == "All":
256
+
240
257
  for trait in trait_list:
241
258
  trait_plot(trait_=trait, cell_df_=trait_df)
259
+
242
260
  else:
243
261
  trait_plot(trait_name, trait_df)
244
262
 
@@ -376,10 +394,9 @@ def bar_significance(
376
394
  y=y,
377
395
  hue=legend,
378
396
  hue_order=hue_order,
379
- ci=ci,
397
+ errorbar=('ci', ci),
380
398
  capsize=capsize,
381
- errwidth=line_width,
382
- errcolor=errcolor,
399
+ err_kws={'color': errcolor, 'linewidth': line_width},
383
400
  ax=ax,
384
401
  palette=palette,
385
402
  edgecolor=errcolor,
@@ -437,7 +454,7 @@ def bar_significance(
437
454
  ax.set_axisbelow(True)
438
455
 
439
456
  if x_rotation != 0:
440
- ax.set_xticklabels(labels=ax.get_xticklabels(), rotation=x_rotation)
457
+ ax.tick_params(axis='x', rotation=x_rotation)
441
458
 
442
459
  plt.legend(loc='upper left', bbox_to_anchor=(0.0, legend_gap), ncol=2)
443
460
 
@@ -52,7 +52,6 @@ def box_base(
52
52
 
53
53
  if "color" in df_columns:
54
54
  new_df_color: DataFrame = df.groupby(group_columns, as_index=False)["color"].first()
55
-
56
55
  new_df = new_df.merge(new_df_color, how="left", on=clusters)
57
56
 
58
57
  colors: list = []
@@ -61,22 +60,31 @@ def box_base(
61
60
  if is_sort:
62
61
  new_df.sort_values([value], ascending=False, inplace=True)
63
62
  y_names: Union[list, None] = list(new_df[clusters])
63
+
64
64
  if "color" in df_columns:
65
- colors = new_df["color"]
65
+ colors = list(new_df["color"])
66
+
66
67
  else:
67
68
  new_df.index = new_df[clusters]
69
+
68
70
  if order_names is not None:
69
71
  y_names: list = order_names
72
+
70
73
  if "color" in df_columns:
74
+
71
75
  for i in order_names:
76
+
72
77
  for j, c in zip(new_df[clusters], new_df["color"]):
78
+
73
79
  if i == j:
74
80
  colors.append(c)
75
81
  break
82
+
76
83
  else:
77
84
  y_names = new_df[clusters]
85
+
78
86
  if "color" in df_columns:
79
- colors = new_df["color"]
87
+ colors = list(new_df["color"])
80
88
 
81
89
  # scatter
82
90
  sns.boxplot(
@@ -103,6 +111,7 @@ def box_base(
103
111
  line.set_linewidth(line_width)
104
112
 
105
113
  # set coordinate
114
+ ax.set_xticks(range(len(y_names)))
106
115
  ax.set_xticklabels(labels=y_names, rotation=rotation)
107
116
  ax.spines['top'].set_linewidth(line_width)
108
117
  ax.spines['bottom'].set_linewidth(line_width)
@@ -1,7 +1,7 @@
1
1
  # -*- coding: UTF-8 -*-
2
2
 
3
3
  import os
4
- from typing import Tuple, Literal, Optional, Union
4
+ from typing import Tuple, Literal, Optional, Union, Any
5
5
 
6
6
  import pandas as pd
7
7
  from anndata import AnnData
@@ -314,9 +314,11 @@ def rate_bar_plot(
314
314
  rotation: float = 65,
315
315
  title: str = None,
316
316
  text_left_move: float = 0.15,
317
+ y_limit: Tuple[float, float] = (0, 1),
317
318
  plot_output: path = None,
318
319
  show: bool = True,
319
- close: bool = False
320
+ close: bool = False,
321
+ **kwargs: Any
320
322
  ) -> None:
321
323
 
322
324
  if dir_name is not None:
@@ -349,9 +351,11 @@ def rate_bar_plot(
349
351
  bottom=bottom,
350
352
  rotation=rotation,
351
353
  text_left_move=text_left_move,
354
+ y_limit=y_limit,
352
355
  output=new_path if plot_output is not None else None,
353
356
  show=show,
354
- close=close
357
+ close=close,
358
+ **kwargs
355
359
  )
356
360
 
357
361
 
@@ -143,8 +143,10 @@ def communities_graph(
143
143
  partition: list = [0 for _ in range(g.number_of_nodes())]
144
144
 
145
145
  for c_i, nodes in enumerate(communities):
146
+
146
147
  for i in nodes:
147
148
  partition[i] = type_colors[start_color_index + color_index * color_step_size + c_i]
149
+
148
150
  color_index += 1
149
151
 
150
152
  pos = nx.spring_layout(g)
@@ -42,7 +42,7 @@ def kde(
42
42
  # Random sampling
43
43
  if axis == -1:
44
44
  matrix = down_sampling_data(data.X, sample_number)
45
- sns.kdeplot(matrix, shade=True, fill=True)
45
+ sns.kdeplot(matrix, fill=True)
46
46
  elif axis == 0:
47
47
  col_number = data.shape[1]
48
48
  if data.shape[0] * data.shape[1] > sample_number:
@@ -50,10 +50,10 @@ def kde(
50
50
 
51
51
  for i in tqdm(range(col_number)):
52
52
  _vector_ = down_sampling_data(data.X[:, i], row_number)
53
- sns.kdeplot(np.array(_vector_).flatten(), shade=True, fill=True, **kwargs)
53
+ sns.kdeplot(np.array(_vector_).flatten(), fill=True, **kwargs)
54
54
  else:
55
55
  for i in tqdm(range(col_number)):
56
- sns.kdeplot(np.array(data.X[:, i]).flatten(), shade=True, fill=True, **kwargs)
56
+ sns.kdeplot(np.array(data.X[:, i]).flatten(), fill=True, **kwargs)
57
57
 
58
58
  if is_legend:
59
59
  ax.legend(list(adata.var.index))
@@ -65,10 +65,10 @@ def kde(
65
65
 
66
66
  for i in tqdm(range(row_number)):
67
67
  _vector_ = down_sampling_data(data.X[i, :], col_number)
68
- sns.kdeplot(np.array(_vector_).flatten(), shade=True, fill=True, **kwargs)
68
+ sns.kdeplot(np.array(_vector_).flatten(), fill=True, **kwargs)
69
69
  else:
70
70
  for i in tqdm(range(row_number)):
71
- sns.kdeplot(np.array(data.X[i, :]).flatten(), shade=True, fill=True, **kwargs)
71
+ sns.kdeplot(np.array(data.X[i, :]).flatten(), fill=True, **kwargs)
72
72
 
73
73
  if is_legend:
74
74
  ax.legend(list(adata.obs.index))
@@ -119,7 +119,7 @@ def stability_line(
119
119
  locator = mdates.DayLocator(interval=1)
120
120
  chart.xaxis.set_major_locator(locator)
121
121
 
122
- ax.set_xticklabels(ax.get_xticklabels(), rotation=x_name_rotation)
122
+ ax.tick_params(axis='x', rotation=x_name_rotation)
123
123
  else:
124
124
  plt.xticks(x_ticks, rotation=x_name_rotation)
125
125
 
@@ -115,6 +115,7 @@ def pie_label(
115
115
  startangle=90,
116
116
  labeldistance=label_distance,
117
117
  pctdistance=pct_distance,
118
+ wedgeprops=dict(linewidth=0),
118
119
  **kwargs
119
120
  )
120
121
  ax.pie(
@@ -122,7 +123,7 @@ def pie_label(
122
123
  colors=['white'],
123
124
  radius=radius,
124
125
  startangle=90,
125
- wedgeprops=dict(width=radius, edgecolor='w'),
126
+ wedgeprops=dict(width=radius, edgecolor='w', linewidth=0),
126
127
  **kwargs
127
128
  )
128
129
  ax.text(0, 0, "{:.2f}%".format(top_x[0] / top_sum * 100), ha='center', va='center', fontsize=fontsize)
@@ -48,7 +48,8 @@ def violin_base(
48
48
 
49
49
  if hue is not None and hue not in df_columns:
50
50
  ul.log(__name__).error(
51
- f"The `hue` ({hue}) parameter must be in the `df` parameter data column name ({df_columns})")
51
+ f"The `hue` ({hue}) parameter must be in the `df` parameter data column name ({df_columns})"
52
+ )
52
53
  raise ValueError(f"The `hue` ({hue}) parameter must be in the `df` parameter data column name ({df_columns})")
53
54
 
54
55
  fig, ax = plot_start(width, height, bottom, output, show)
@@ -67,20 +68,29 @@ def violin_base(
67
68
  if is_sort:
68
69
  new_df.sort_values([value], ascending=False, inplace=True)
69
70
  y_names: Union[list, None] = list(new_df[clusters])
71
+
70
72
  if "color" in df_columns:
71
73
  colors = list(new_df["color"])
74
+
72
75
  else:
73
76
  new_df.index = new_df[clusters]
77
+
74
78
  if order_names is not None:
75
79
  y_names: list = order_names
80
+
76
81
  if "color" in df_columns:
82
+
77
83
  for i in order_names:
84
+
78
85
  for j, c in zip(new_df[clusters], new_df["color"]):
86
+
79
87
  if i == j:
80
88
  colors.append(c)
81
89
  break
90
+
82
91
  else:
83
92
  y_names = list(new_df[clusters])
93
+
84
94
  if "color" in df_columns:
85
95
  colors = list(new_df["color"])
86
96
 
@@ -1,4 +1,5 @@
1
1
  # -*- coding: UTF-8 -*-
2
+
2
3
  import time
3
4
  import warnings
4
5
  from typing import Optional, Literal
@@ -47,7 +48,7 @@ def filter_data(
47
48
  """
48
49
 
49
50
  # start time
50
- start_time = time.time()
51
+ start_time = time.perf_counter()
51
52
 
52
53
  import scanpy as sc
53
54
 
@@ -109,7 +110,7 @@ def filter_data(
109
110
  )
110
111
  ul.log(__name__).info(f"Size of filtered scATAC-seq data: {filter_adata.shape}")
111
112
  filter_adata.uns["step"] = 0
112
- filter_adata.uns["elapsed_time"] = time.time() - start_time
113
+ filter_adata.uns["elapsed_time"] = time.perf_counter() - start_time
113
114
 
114
115
  return filter_adata
115
116
 
@@ -50,7 +50,7 @@ def poisson_vi(
50
50
  """
51
51
  ul.log(__name__).info("Start PoissonVI")
52
52
 
53
- start_time = time.time()
53
+ start_time = time.perf_counter()
54
54
 
55
55
  import scvi
56
56
  import scanpy as sc
@@ -241,7 +241,7 @@ def poisson_vi(
241
241
  da_peaks_adata.layers["emp_prob1"] = matrix_ep1
242
242
  da_peaks_adata.uns["latent_name"] = latent_name
243
243
  da_peaks_adata.uns["dp_delta"] = dp_delta
244
- da_peaks_adata.uns["elapsed_time"] = time.time() - start_time
244
+ da_peaks_adata.uns["elapsed_time"] = time.perf_counter() - start_time
245
245
 
246
246
  adata.uns["step"] = 1
247
247
 
@@ -455,11 +455,11 @@ def semi_mutual_knn_weight(
455
455
  del data
456
456
  np.fill_diagonal(new_data, 0)
457
457
 
458
- def _knn_k_(_mat: matrix_data, k: int):
458
+ def _knn_k_(_mat: matrix_data, k: int, info: str = "LOG"):
459
459
  n_rows = _mat.shape[0]
460
460
  adj = sparse.lil_matrix((n_rows, n_rows), dtype=np.int8)
461
461
 
462
- ul.log(__name__).info("Calculate the k-nearest neighbors of each node.")
462
+ ul.log(__name__).info(f"Calculate the k-nearest neighbors of each node. ({info})")
463
463
 
464
464
  for i in tqdm(range(n_rows)):
465
465
  row = np.array(_mat[i]).ravel()
@@ -477,7 +477,7 @@ def semi_mutual_knn_weight(
477
477
 
478
478
  return adj.tocsr()
479
479
 
480
- def _knn(_mat: matrix_data, k: int) -> matrix_data:
480
+ def _knn(_mat: matrix_data, k: int, info: str = "LOG") -> matrix_data:
481
481
  """
482
482
  Return k-nearest-neighbor 0/1 adjacency matrix (int8 to save memory).
483
483
  Supports both sparse and dense inputs.
@@ -486,11 +486,11 @@ def semi_mutual_knn_weight(
486
486
  if sparse.issparse(_mat):
487
487
  # Sparse path: sort each row's data to find the k-th largest
488
488
  _mat = _mat.tocsr(copy=False)
489
- return _knn_k_(_mat, k)
489
+ return _knn_k_(_mat, k, info)
490
490
  else:
491
491
 
492
492
  if is_for:
493
- return _knn_k_(_mat, k)
493
+ return _knn_k_(_mat, k, info)
494
494
  else:
495
495
  # Dense path: vectorized thresholding
496
496
  kth_val = np.sort(_mat, axis=1)[:, -(k + 1)]
@@ -499,12 +499,12 @@ def semi_mutual_knn_weight(
499
499
  return adj
500
500
 
501
501
  # Compute adjacency matrices for AND/OR logic
502
- adj_and = _knn(new_data, neighbors)
502
+ adj_and = _knn(new_data, neighbors, "AND")
503
503
 
504
504
  if neighbors == or_neighbors:
505
505
  adj_or = adj_and
506
506
  else:
507
- adj_or = _knn(new_data, or_neighbors)
507
+ adj_or = _knn(new_data, or_neighbors, "OR")
508
508
 
509
509
  # Symmetrize
510
510
  if sparse.issparse(adj_and):
@@ -519,7 +519,12 @@ def semi_mutual_knn_weight(
519
519
 
520
520
  # Ensure full connectivity if required
521
521
  if is_mknn_fully_connected:
522
- adj_1nn = _knn(new_data, 1)
522
+ adj_1nn = _knn(new_data, 1, "ONE")
523
+
524
+ if sparse.issparse(adj_1nn):
525
+ adj_1nn = adj_1nn.maximum(adj_1nn.T)
526
+ else:
527
+ adj_1nn = np.maximum(adj_1nn, adj_1nn.T)
523
528
 
524
529
  if sparse.issparse(adj_and):
525
530
  adj_and = adj_and.maximum(adj_1nn)
@@ -908,7 +913,7 @@ def overlap_sum(regions: AnnData, variants: dict, trait_info: DataFrame, n_jobs:
908
913
  :return: overlap data
909
914
  """
910
915
 
911
- start_time = time.time()
916
+ start_time = time.perf_counter()
912
917
 
913
918
  # Unique feature set
914
919
  label_all = regions.var.index.tolist()
@@ -1020,7 +1025,7 @@ def overlap_sum(regions: AnnData, variants: dict, trait_info: DataFrame, n_jobs:
1020
1025
 
1021
1026
  overlap_adata = AnnData(overlap_sparse, var=trait_info, obs=regions.var)
1022
1027
  overlap_adata.uns["is_overlap"] = True
1023
- overlap_adata.uns["elapsed_time"] = time.time() - start_time
1028
+ overlap_adata.uns["elapsed_time"] = time.perf_counter() - start_time
1024
1029
 
1025
1030
  return overlap_adata
1026
1031
 
@@ -1124,7 +1129,7 @@ def calculate_init_score_weight(
1124
1129
  :return: Initial TRS with weight.
1125
1130
  """
1126
1131
 
1127
- start_time = time.time()
1132
+ start_time = time.perf_counter()
1128
1133
 
1129
1134
  if "is_overlap" not in overlap_adata.uns:
1130
1135
  ul.log(__name__).warning(
@@ -1253,7 +1258,7 @@ def calculate_init_score_weight(
1253
1258
  del _init_trs_ncw_, _cell_type_weight_
1254
1259
 
1255
1260
  init_trs_adata.uns["is_sample"] = is_simple
1256
- init_trs_adata.uns["elapsed_time"] = time.time() - start_time
1261
+ init_trs_adata.uns["elapsed_time"] = time.perf_counter() - start_time
1257
1262
  return init_trs_adata
1258
1263
 
1259
1264
 
@@ -1309,7 +1314,7 @@ def obtain_cell_cell_network(
1309
1314
  :return: Cell similarity data.
1310
1315
  """
1311
1316
 
1312
- start_time = time.time()
1317
+ start_time = time.perf_counter()
1313
1318
 
1314
1319
  from sklearn.metrics.pairwise import laplacian_kernel, rbf_kernel
1315
1320
 
@@ -1368,7 +1373,7 @@ def obtain_cell_cell_network(
1368
1373
  if not is_simple:
1369
1374
  cc_data.layers["cell_mutual_knn"] = to_sparse(cell_mutual_knn)
1370
1375
 
1371
- cc_data.uns["elapsed_time"] = time.time() - start_time
1376
+ cc_data.uns["elapsed_time"] = time.perf_counter() - start_time
1372
1377
 
1373
1378
  return cc_data
1374
1379
 
@@ -282,7 +282,7 @@ class RandomWalk:
282
282
  """
283
283
  ul.log(__name__).info("Random walk with weighted seed cells.")
284
284
 
285
- start_time = time.time()
285
+ start_time = time.perf_counter()
286
286
 
287
287
  # judge length
288
288
  if cc_adata.shape[0] != init_status.shape[0]:
@@ -459,7 +459,7 @@ class RandomWalk:
459
459
  del self.cell_affinity
460
460
  del init_status
461
461
 
462
- self.elapsed_time = time.time() - start_time
462
+ self.elapsed_time = time.perf_counter() - start_time
463
463
 
464
464
  def _random_walk_(
465
465
  self,
@@ -29,6 +29,7 @@ from ._constant_ import (
29
29
  from ._core_ import (
30
30
  file_method,
31
31
  log,
32
+ track_with_memory,
32
33
  to_dense,
33
34
  to_sparse,
34
35
  sum_min_max,
@@ -58,6 +59,7 @@ __all__ = [
58
59
  "log_file_path",
59
60
  "file_method",
60
61
  "log",
62
+ "track_with_memory",
61
63
  "path",
62
64
  "plot_color_types",
63
65
  "sparse_array",
@@ -4,7 +4,11 @@ import math
4
4
  import os
5
5
  import random
6
6
  import string
7
- from typing import Tuple, Union, Literal
7
+ import threading
8
+ import time
9
+ from functools import wraps
10
+ from typing import Tuple, Union, Literal, Callable, Any
11
+ import psutil
8
12
 
9
13
  import numpy as np
10
14
  import pandas as pd
@@ -35,6 +39,71 @@ def log(name: str = None) -> Logger:
35
39
  return Logger(name, log_path=os.path.join(ul.log_file_path, name), is_form_file=ul.is_form_log_file)
36
40
 
37
41
 
42
+ def track_with_memory(is_monitor: bool = False , interval: float = 60) -> Callable:
43
+ """
44
+ Decorator: Records memory usage at fixed intervals during function execution and returns the result, elapsed time, and memory list.
45
+
46
+ Parameters
47
+ ----------
48
+ is_monitor : bool, optional
49
+ Whether to enable memory monitoring, default is False.
50
+ interval : float, optional
51
+ Sampling interval (seconds), default is 60 seconds.
52
+
53
+ Returns
54
+ -------
55
+ Callable
56
+ Decorator function; when the wrapped function is called, it returns a dictionary containing:
57
+ - 'result': the original function's return value
58
+ - 'time': function execution time (seconds) if is_monitor is True, otherwise None.
59
+ - 'memory': list of sampled memory usage (bytes) if is_monitor is True, otherwise None.
60
+ """
61
+
62
+ def decorator(func) -> Callable:
63
+ @wraps(func)
64
+ def wrapper(*args, **kwargs) -> Union[Any, dict]:
65
+
66
+ if not is_monitor:
67
+ return func(*args, **kwargs)
68
+
69
+ process = psutil.Process(os.getpid())
70
+
71
+ stop_monitor = False
72
+
73
+ mem_list = []
74
+
75
+ def monitor():
76
+ nonlocal stop_monitor
77
+
78
+ while not stop_monitor:
79
+ current_mem = process.memory_info().rss
80
+ mem_list.append(current_mem)
81
+
82
+ time.sleep(interval)
83
+
84
+ t = threading.Thread(target=monitor, daemon=True)
85
+ t.start()
86
+
87
+ start_time = time.perf_counter()
88
+ _result_ = func(*args, **kwargs)
89
+ end_time = time.perf_counter()
90
+
91
+ stop_monitor = True
92
+ t.join()
93
+
94
+ exec_time = end_time - start_time
95
+
96
+ return {
97
+ 'result': _result_,
98
+ 'time': exec_time,
99
+ 'memory': mem_list
100
+ }
101
+
102
+ return wrapper
103
+
104
+ return decorator
105
+
106
+
38
107
  def to_dense(sm: matrix_data, is_array: bool = False) -> dense_data:
39
108
  """
40
109
  Convert sparse matrix to dense matrix
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes