mcDETECT 1.0.10__py3-none-any.whl → 1.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcDETECT might be problematic. Click here for more details.
- mcDETECT/__init__.py +1 -1
- mcDETECT/model.py +80 -78
- {mcdetect-1.0.10.dist-info → mcdetect-1.0.12.dist-info}/METADATA +1 -1
- mcdetect-1.0.12.dist-info/RECORD +7 -0
- {mcdetect-1.0.10.dist-info → mcdetect-1.0.12.dist-info}/WHEEL +1 -1
- mcdetect-1.0.10.dist-info/RECORD +0 -7
- {mcdetect-1.0.10.dist-info → mcdetect-1.0.12.dist-info}/LICENSE +0 -0
- {mcdetect-1.0.10.dist-info → mcdetect-1.0.12.dist-info}/top_level.txt +0 -0
mcDETECT/__init__.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "1.0.
|
|
1
|
+
__version__ = "1.0.12"
|
|
2
2
|
from .model import closest, mcDETECT
|
mcDETECT/model.py
CHANGED
|
@@ -68,8 +68,8 @@ class mcDETECT:
|
|
|
68
68
|
def construct_grid(self, grid_len = None):
|
|
69
69
|
if grid_len is None:
|
|
70
70
|
grid_len = self.grid_len
|
|
71
|
-
x_min, x_max = np.min(self.transcripts[
|
|
72
|
-
y_min, y_max = np.min(self.transcripts[
|
|
71
|
+
x_min, x_max = np.min(self.transcripts["global_x"]), np.max(self.transcripts["global_x"])
|
|
72
|
+
y_min, y_max = np.min(self.transcripts["global_y"]), np.max(self.transcripts["global_y"])
|
|
73
73
|
x_min = np.floor(x_min / grid_len) * grid_len
|
|
74
74
|
x_max = np.ceil(x_max / grid_len) * grid_len
|
|
75
75
|
y_min = np.floor(y_min / grid_len) * grid_len
|
|
@@ -82,14 +82,14 @@ class mcDETECT:
|
|
|
82
82
|
# [INNER] calculate tissue area, input for poisson_select()
|
|
83
83
|
def tissue_area(self):
|
|
84
84
|
x_bins, y_bins = self.construct_grid(grid_len = None)
|
|
85
|
-
hist, _, _ = np.histogram2d(self.transcripts[
|
|
85
|
+
hist, _, _ = np.histogram2d(self.transcripts["global_x"], self.transcripts["global_y"], bins = [x_bins, y_bins])
|
|
86
86
|
area = np.count_nonzero(hist) * (self.grid_len ** 2)
|
|
87
87
|
return area
|
|
88
88
|
|
|
89
89
|
|
|
90
90
|
# [INNER] calculate optimal min_samples, input for dbscan()
|
|
91
91
|
def poisson_select(self, gene_name):
|
|
92
|
-
num_trans = np.sum(self.transcripts[
|
|
92
|
+
num_trans = np.sum(self.transcripts["target"] == gene_name)
|
|
93
93
|
bg_density = num_trans / self.tissue_area()
|
|
94
94
|
cutoff_density = poisson.ppf(self.cutoff_prob, mu = self.alpha * bg_density * (np.pi * self.eps ** 2))
|
|
95
95
|
optimal_m = int(max(cutoff_density, self.low_bound))
|
|
@@ -97,32 +97,32 @@ class mcDETECT:
|
|
|
97
97
|
|
|
98
98
|
|
|
99
99
|
# [INTERMEDIATE] dictionary, low- and high-in-nucleus spheres for each synaptic marker
|
|
100
|
-
def dbscan(self, target_names = None, write_csv = False, write_path =
|
|
100
|
+
def dbscan(self, target_names = None, write_csv = False, write_path = "./"):
|
|
101
101
|
|
|
102
|
-
if self.type !=
|
|
103
|
-
z_grid = list(np.unique(self.transcripts[
|
|
102
|
+
if self.type != "Xenium":
|
|
103
|
+
z_grid = list(np.unique(self.transcripts["global_z"]))
|
|
104
104
|
z_grid.sort()
|
|
105
105
|
|
|
106
106
|
if target_names is None:
|
|
107
107
|
target_names = self.syn_genes
|
|
108
|
-
transcripts = self.transcripts[self.transcripts[
|
|
108
|
+
transcripts = self.transcripts[self.transcripts["target"].isin(target_names)]
|
|
109
109
|
|
|
110
110
|
num_individual, data_low, data_high = [], {}, {}
|
|
111
111
|
|
|
112
112
|
for j in target_names:
|
|
113
113
|
|
|
114
114
|
# split transcripts
|
|
115
|
-
target = transcripts[transcripts[
|
|
116
|
-
others = transcripts[transcripts[
|
|
117
|
-
tree = make_tree(d1 = np.array(others[
|
|
115
|
+
target = transcripts[transcripts["target"] == j]
|
|
116
|
+
others = transcripts[transcripts["target"] != j]
|
|
117
|
+
tree = make_tree(d1 = np.array(others["global_x"]), d2 = np.array(others["global_y"]), d3 = np.array(others["global_z"]))
|
|
118
118
|
|
|
119
119
|
# 3D DBSCAN
|
|
120
120
|
if self.minspl is None:
|
|
121
121
|
min_spl = self.poisson_select(j)
|
|
122
122
|
else:
|
|
123
123
|
min_spl = self.minspl
|
|
124
|
-
X = np.array(target[[
|
|
125
|
-
db = DBSCAN(eps = self.eps, min_samples = min_spl, algorithm =
|
|
124
|
+
X = np.array(target[["global_x", "global_y", "global_z"]])
|
|
125
|
+
db = DBSCAN(eps = self.eps, min_samples = min_spl, algorithm = "kd_tree").fit(X)
|
|
126
126
|
labels = db.labels_
|
|
127
127
|
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
|
|
128
128
|
|
|
@@ -133,12 +133,12 @@ class mcDETECT:
|
|
|
133
133
|
|
|
134
134
|
# find minimum enclosing spheres
|
|
135
135
|
temp = target[labels == k]
|
|
136
|
-
temp_in_nucleus = np.sum(temp[
|
|
136
|
+
temp_in_nucleus = np.sum(temp["overlaps_nucleus"])
|
|
137
137
|
temp_size = temp.shape[0]
|
|
138
|
-
temp = temp[[
|
|
138
|
+
temp = temp[["global_x", "global_y", "global_z"]]
|
|
139
139
|
temp = temp.drop_duplicates()
|
|
140
140
|
center, r2 = miniball.get_bounding_ball(np.array(temp), epsilon=1e-8)
|
|
141
|
-
if self.type !=
|
|
141
|
+
if self.type != "Xenium":
|
|
142
142
|
closest_z = closest(z_grid, center[2])
|
|
143
143
|
else:
|
|
144
144
|
closest_z = center[2]
|
|
@@ -146,9 +146,9 @@ class mcDETECT:
|
|
|
146
146
|
# calculate size, composition, and in-nucleus score
|
|
147
147
|
other_idx = tree.query_ball_point([center[0], center[1], center[2]], np.sqrt(r2))
|
|
148
148
|
other_trans = others.iloc[other_idx]
|
|
149
|
-
other_in_nucleus = np.sum(other_trans[
|
|
149
|
+
other_in_nucleus = np.sum(other_trans["overlaps_nucleus"])
|
|
150
150
|
other_size = other_trans.shape[0]
|
|
151
|
-
other_comp = len(np.unique(other_trans[
|
|
151
|
+
other_comp = len(np.unique(other_trans["target"]))
|
|
152
152
|
total_size = temp_size + other_size
|
|
153
153
|
total_comp = 1 + other_comp
|
|
154
154
|
local_score = (temp_in_nucleus + other_in_nucleus) / total_size
|
|
@@ -165,16 +165,17 @@ class mcDETECT:
|
|
|
165
165
|
|
|
166
166
|
# basic features for all spheres from each synaptic marker
|
|
167
167
|
sphere = pd.DataFrame(list(zip(sphere_x, sphere_y, sphere_z, layer_z, sphere_r, sphere_size, sphere_comp, sphere_score)),
|
|
168
|
-
columns = [
|
|
169
|
-
sphere[
|
|
168
|
+
columns = ["sphere_x", "sphere_y", "sphere_z", "layer_z", "sphere_r", "size", "comp", "in_nucleus"])
|
|
169
|
+
sphere["gene"] = [j] * sphere.shape[0]
|
|
170
|
+
sphere = sphere.astype({"sphere_x": float, "sphere_y": float, "sphere_z": float, "layer_z": int, "sphere_r": float, "size": float, "comp": float, "in_nucleus": int, "gene": str})
|
|
170
171
|
|
|
171
172
|
# split low- and high-in-nucleus spheres
|
|
172
|
-
sphere_low = sphere[(sphere[
|
|
173
|
-
sphere_high = sphere[(sphere[
|
|
173
|
+
sphere_low = sphere[(sphere["sphere_r"] < self.size_thr) & (sphere["in_nucleus"] < self.in_nucleus_thr[0])]
|
|
174
|
+
sphere_high = sphere[(sphere["sphere_r"] < self.size_thr) & (sphere["in_nucleus"] > self.in_nucleus_thr[1])]
|
|
174
175
|
|
|
175
176
|
if write_csv:
|
|
176
|
-
sphere_low.to_csv(write_path + j +
|
|
177
|
-
sphere_high.to_csv(write_path + j +
|
|
177
|
+
sphere_low.to_csv(write_path + j + " sphere.csv", index=0)
|
|
178
|
+
sphere_high.to_csv(write_path + j + " sphere_high.csv", index=0)
|
|
178
179
|
|
|
179
180
|
num_individual.append(sphere_low.shape[0])
|
|
180
181
|
data_low[target_names.index(j)] = sphere_low
|
|
@@ -186,16 +187,16 @@ class mcDETECT:
|
|
|
186
187
|
|
|
187
188
|
# [INNER] merge points from two overlapped spheres, input for remove_overlaps()
|
|
188
189
|
def find_points(self, sphere_a, sphere_b):
|
|
189
|
-
transcripts = self.transcripts[self.transcripts[
|
|
190
|
-
tree_temp = make_tree(d1 = np.array(transcripts[
|
|
191
|
-
idx_a = tree_temp.query_ball_point([sphere_a[
|
|
190
|
+
transcripts = self.transcripts[self.transcripts["target"].isin(self.syn_genes)]
|
|
191
|
+
tree_temp = make_tree(d1 = np.array(transcripts["global_x"]), d2 = np.array(transcripts["global_y"]), d3 = np.array(transcripts["global_z"]))
|
|
192
|
+
idx_a = tree_temp.query_ball_point([sphere_a["sphere_x"], sphere_a["sphere_y"], sphere_a["sphere_z"]], sphere_a["sphere_r"])
|
|
192
193
|
points_a = transcripts.iloc[idx_a]
|
|
193
|
-
points_a = points_a[points_a[
|
|
194
|
-
idx_b = tree_temp.query_ball_point([sphere_b[
|
|
194
|
+
points_a = points_a[points_a["target"] == sphere_a["gene"]]
|
|
195
|
+
idx_b = tree_temp.query_ball_point([sphere_b["sphere_x"], sphere_b["sphere_y"], sphere_b["sphere_z"]], sphere_b["sphere_r"])
|
|
195
196
|
points_b = transcripts.iloc[idx_b]
|
|
196
|
-
points_b = points_b[points_b[
|
|
197
|
+
points_b = points_b[points_b["target"] == sphere_b["gene"]]
|
|
197
198
|
points = pd.concat([points_a, points_b])
|
|
198
|
-
points = points[[
|
|
199
|
+
points = points[["global_x", "global_y", "global_z"]]
|
|
199
200
|
return points
|
|
200
201
|
|
|
201
202
|
|
|
@@ -239,10 +240,10 @@ class mcDETECT:
|
|
|
239
240
|
elif not c1 and c2_1: # replace A with new sphere and remove B
|
|
240
241
|
points_union = np.array(self.find_points(sphere_a, sphere_b))
|
|
241
242
|
new_center, new_radius = miniball.get_bounding_ball(points_union, epsilon=1e-8)
|
|
242
|
-
set_a.loc[i,
|
|
243
|
-
set_a.loc[i,
|
|
244
|
-
set_a.loc[i,
|
|
245
|
-
set_a.loc[i,
|
|
243
|
+
set_a.loc[i, "sphere_x"] = new_center[0]
|
|
244
|
+
set_a.loc[i, "sphere_y"] = new_center[1]
|
|
245
|
+
set_a.loc[i, "sphere_z"] = new_center[2]
|
|
246
|
+
set_a.loc[i, "sphere_r"] = self.s * new_radius
|
|
246
247
|
set_b.drop(index = j, inplace = True)
|
|
247
248
|
|
|
248
249
|
set_a = set_a.reset_index(drop = True)
|
|
@@ -268,36 +269,36 @@ class mcDETECT:
|
|
|
268
269
|
adata_low = self.profile(sphere_low, self.nc_genes)
|
|
269
270
|
adata_high = self.profile(sphere_high, self.nc_genes)
|
|
270
271
|
adata = anndata.concat([adata_low, adata_high], axis = 0, merge = "same")
|
|
271
|
-
adata.var[
|
|
272
|
+
adata.var["genes"] = adata.var.index
|
|
272
273
|
adata.obs_keys = list(np.arange(adata.shape[0]))
|
|
273
|
-
adata.obs[
|
|
274
|
-
adata.obs[
|
|
274
|
+
adata.obs["type"] = ["low"] * adata_low.shape[0] + ["high"] * adata_high.shape[0]
|
|
275
|
+
adata.obs["type"] = pd.Categorical(adata.obs["type"], categories = ["low", "high"], ordered = True)
|
|
275
276
|
|
|
276
277
|
# DE analysis of negative control genes
|
|
277
|
-
sc.tl.rank_genes_groups(adata,
|
|
278
|
-
names = adata.uns[
|
|
278
|
+
sc.tl.rank_genes_groups(adata, "type", method = "t-test")
|
|
279
|
+
names = adata.uns["rank_genes_groups"]["names"]
|
|
279
280
|
names = pd.DataFrame(names)
|
|
280
|
-
logfc = adata.uns[
|
|
281
|
+
logfc = adata.uns["rank_genes_groups"]["logfoldchanges"]
|
|
281
282
|
logfc = pd.DataFrame(logfc)
|
|
282
|
-
pvals = adata.uns[
|
|
283
|
+
pvals = adata.uns["rank_genes_groups"]["pvals"]
|
|
283
284
|
pvals = pd.DataFrame(pvals)
|
|
284
285
|
|
|
285
286
|
# select top upregulated negative control genes
|
|
286
|
-
df = pd.DataFrame({
|
|
287
|
-
df = df[df[
|
|
288
|
-
df = df.sort_values(by = [
|
|
289
|
-
nc_genes_final = list(df[
|
|
287
|
+
df = pd.DataFrame({"names": names["high"], "logfc": logfc["high"], "pvals": pvals["high"]})
|
|
288
|
+
df = df[df["logfc"] >= 0]
|
|
289
|
+
df = df.sort_values(by = ["pvals"], ascending = True)
|
|
290
|
+
nc_genes_final = list(df["names"].head(self.nc_top))
|
|
290
291
|
|
|
291
292
|
# negative control filtering
|
|
292
|
-
nc_transcripts_final = self.transcripts[self.transcripts[
|
|
293
|
-
tree = make_tree(d1 = np.array(nc_transcripts_final[
|
|
293
|
+
nc_transcripts_final = self.transcripts[self.transcripts["target"].isin(nc_genes_final)]
|
|
294
|
+
tree = make_tree(d1 = np.array(nc_transcripts_final["global_x"]), d2 = np.array(nc_transcripts_final["global_y"]), d3 = np.array(nc_transcripts_final["global_z"]))
|
|
294
295
|
pass_idx = [0] * sphere_low.shape[0]
|
|
295
296
|
for i in range(sphere_low.shape[0]):
|
|
296
297
|
temp = sphere_low.iloc[i]
|
|
297
|
-
nc_idx = tree.query_ball_point([temp[
|
|
298
|
+
nc_idx = tree.query_ball_point([temp["sphere_x"], temp["sphere_y"], temp["sphere_z"]], temp["sphere_r"])
|
|
298
299
|
if len(nc_idx) == 0:
|
|
299
300
|
pass_idx[i] = 1
|
|
300
|
-
elif len(nc_idx) / temp[
|
|
301
|
+
elif len(nc_idx) / temp["size"] < self.nc_thr:
|
|
301
302
|
pass_idx[i] = 2
|
|
302
303
|
sphere = sphere_low[np.array(pass_idx) != 0]
|
|
303
304
|
sphere = sphere.reset_index(drop = True)
|
|
@@ -323,29 +324,30 @@ class mcDETECT:
|
|
|
323
324
|
def profile(self, synapse, genes = None, print_itr = False):
|
|
324
325
|
|
|
325
326
|
if genes is None:
|
|
326
|
-
genes = list(np.unique(self.transcripts[
|
|
327
|
+
genes = list(np.unique(self.transcripts["target"]))
|
|
327
328
|
transcripts = self.transcripts
|
|
328
329
|
else:
|
|
329
|
-
transcripts = self.transcripts[self.transcripts[
|
|
330
|
-
tree = make_tree(d1 = np.array(transcripts[
|
|
330
|
+
transcripts = self.transcripts[self.transcripts["target"].isin(genes)]
|
|
331
|
+
tree = make_tree(d1 = np.array(transcripts["global_x"]), d2 = np.array(transcripts["global_y"]), d3 = np.array(transcripts["global_z"]))
|
|
331
332
|
|
|
332
333
|
# construct gene count matrix
|
|
333
334
|
X = np.zeros((len(genes), synapse.shape[0]))
|
|
334
335
|
for i in range(synapse.shape[0]):
|
|
335
336
|
temp = synapse.iloc[i]
|
|
336
|
-
target_idx = tree.query_ball_point([temp[
|
|
337
|
+
target_idx = tree.query_ball_point([temp["sphere_x"], temp["sphere_y"], temp["layer_z"]], temp["sphere_r"])
|
|
337
338
|
target_trans = transcripts.iloc[target_idx]
|
|
338
|
-
target_gene = list(target_trans[
|
|
339
|
+
target_gene = list(target_trans["target"])
|
|
339
340
|
for j in np.unique(target_gene):
|
|
340
341
|
X[genes.index(j), i] = target_gene.count(j)
|
|
341
342
|
if (print_itr) & (i % 5000 == 0):
|
|
342
|
-
print(
|
|
343
|
+
print("{} out of {} synapses profiled!".format(i, synapse.shape[0]))
|
|
343
344
|
|
|
344
345
|
# construct spatial transcriptome profile
|
|
345
346
|
adata = anndata.AnnData(X = np.transpose(X), obs = synapse)
|
|
346
|
-
adata.obs[
|
|
347
|
-
adata.obs
|
|
348
|
-
adata.
|
|
347
|
+
adata.obs["synapse_id"] = ["syn_{}".format(i) for i in range(synapse.shape[0])]
|
|
348
|
+
adata.obs["synapse_id"] = adata.obs["synapse_id"].astype(str)
|
|
349
|
+
adata.obs.rename(columns = {"sphere_x": "global_x", "sphere_y": "global_y", "sphere_z": "global_z"}, inplace = True)
|
|
350
|
+
adata.var["genes"] = genes
|
|
349
351
|
adata.var_names = genes
|
|
350
352
|
adata.var_keys = genes
|
|
351
353
|
return adata
|
|
@@ -355,10 +357,10 @@ class mcDETECT:
|
|
|
355
357
|
def spot_expression(self, grid_len, genes = None):
|
|
356
358
|
|
|
357
359
|
if genes is None:
|
|
358
|
-
genes = list(np.unique(self.transcripts[
|
|
360
|
+
genes = list(np.unique(self.transcripts["target"]))
|
|
359
361
|
transcripts = self.transcripts
|
|
360
362
|
else:
|
|
361
|
-
transcripts = self.transcripts[self.transcripts[
|
|
363
|
+
transcripts = self.transcripts[self.transcripts["target"].isin(genes)]
|
|
362
364
|
|
|
363
365
|
# construct bins
|
|
364
366
|
x_bins, y_bins = self.construct_grid(grid_len = grid_len)
|
|
@@ -377,8 +379,8 @@ class mcDETECT:
|
|
|
377
379
|
|
|
378
380
|
# count matrix
|
|
379
381
|
for k_idx, k in enumerate(genes):
|
|
380
|
-
target_gene = transcripts[transcripts[
|
|
381
|
-
count_gene, _, _ = np.histogram2d(target_gene[
|
|
382
|
+
target_gene = transcripts[transcripts["target"] == k]
|
|
383
|
+
count_gene, _, _ = np.histogram2d(target_gene["global_x"], target_gene["global_y"], bins = [x_bins, y_bins])
|
|
382
384
|
X[k_idx, :] = count_gene.flatten()
|
|
383
385
|
if k_idx % 100 == 0:
|
|
384
386
|
print("{} out of {} genes profiled!".format(k_idx, len(genes)))
|
|
@@ -386,15 +388,15 @@ class mcDETECT:
|
|
|
386
388
|
# spot id
|
|
387
389
|
spot_id = []
|
|
388
390
|
for i in range(len(global_x)):
|
|
389
|
-
id =
|
|
391
|
+
id = "spot_" + str(i)
|
|
390
392
|
spot_id.append(id)
|
|
391
393
|
|
|
392
394
|
# assemble data
|
|
393
395
|
adata = anndata.AnnData(X = np.transpose(X))
|
|
394
|
-
adata.obs[
|
|
395
|
-
adata.obs[
|
|
396
|
-
adata.obs[
|
|
397
|
-
adata.var[
|
|
396
|
+
adata.obs["spot_id"] = spot_id
|
|
397
|
+
adata.obs["global_x"] = global_x
|
|
398
|
+
adata.obs["global_y"] = global_y
|
|
399
|
+
adata.var["genes"] = genes
|
|
398
400
|
adata.var_names = genes
|
|
399
401
|
adata.var_keys = genes
|
|
400
402
|
return adata
|
|
@@ -403,7 +405,7 @@ class mcDETECT:
|
|
|
403
405
|
# [MAIN] anndata, spot-level synapse metadata
|
|
404
406
|
def spot_synapse(self, synapse, spot):
|
|
405
407
|
|
|
406
|
-
x_grid, y_grid = list(np.unique(spot.obs[
|
|
408
|
+
x_grid, y_grid = list(np.unique(spot.obs["global_x"])), list(np.unique(spot.obs["global_y"]))
|
|
407
409
|
diameter = x_grid[1] - x_grid[0]
|
|
408
410
|
|
|
409
411
|
indicator, synapse_count, synapse_radius, synapse_size, synapse_score = [], [], [], [], []
|
|
@@ -413,7 +415,7 @@ class mcDETECT:
|
|
|
413
415
|
for j in y_grid:
|
|
414
416
|
y_min_temp = j
|
|
415
417
|
y_max_temp = j + diameter
|
|
416
|
-
syn_temp = synapse[(synapse[
|
|
418
|
+
syn_temp = synapse[(synapse["sphere_x"] > x_min_temp) & (synapse["sphere_x"] < x_max_temp) & (synapse["sphere_y"] > y_min_temp) & (synapse["sphere_y"] < y_max_temp)]
|
|
417
419
|
indicator.append(int(syn_temp.shape[0] > 0))
|
|
418
420
|
synapse_count.append(syn_temp.shape[0])
|
|
419
421
|
if syn_temp.shape[0] == 0:
|
|
@@ -421,13 +423,13 @@ class mcDETECT:
|
|
|
421
423
|
synapse_size.append(0)
|
|
422
424
|
synapse_score.append(0)
|
|
423
425
|
else:
|
|
424
|
-
synapse_radius.append(np.nanmean(syn_temp[
|
|
425
|
-
synapse_size.append(np.nanmean(syn_temp[
|
|
426
|
-
synapse_score.append(np.nanmean(syn_temp[
|
|
426
|
+
synapse_radius.append(np.nanmean(syn_temp["sphere_r"]))
|
|
427
|
+
synapse_size.append(np.nanmean(syn_temp["size"]))
|
|
428
|
+
synapse_score.append(np.nanmean(syn_temp["in_nucleus"]))
|
|
427
429
|
|
|
428
|
-
spot.obs[
|
|
429
|
-
spot.obs[
|
|
430
|
-
spot.obs[
|
|
431
|
-
spot.obs[
|
|
432
|
-
spot.obs[
|
|
430
|
+
spot.obs["indicator"] = indicator
|
|
431
|
+
spot.obs["syn_count"] = synapse_count
|
|
432
|
+
spot.obs["syn_radius"] = synapse_radius
|
|
433
|
+
spot.obs["syn_size"] = synapse_size
|
|
434
|
+
spot.obs["syn_score"] = synapse_score
|
|
433
435
|
return spot
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: mcDETECT
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.12
|
|
4
4
|
Summary: mcDETECT: Decoding 3D Spatial Synaptic Transcriptomes with Subcellular-Resolution Spatial Transcriptomics
|
|
5
5
|
Home-page: https://github.com/chen-yang-yuan/mcDETECT
|
|
6
6
|
Author: Chenyang Yuan
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
mcDETECT/__init__.py,sha256=8DC3jJ35kT7b51bP9HtbDsCRc8_vT6nUaXCZBaSM5Tg,59
|
|
2
|
+
mcDETECT/model.py,sha256=pl6BOByor3Czj1UbxQX7_VzBUyNhz1tG_z7IGz2nR80,21462
|
|
3
|
+
mcdetect-1.0.12.dist-info/LICENSE,sha256=uxq-shEWOGTIGVnQLmpElILmfCkuUhFZRAMnZUiKvtg,1070
|
|
4
|
+
mcdetect-1.0.12.dist-info/METADATA,sha256=AJjMolAwV98Px9PioTv0U_iJl0ypTKMFRgSvCQbBkAg,2820
|
|
5
|
+
mcdetect-1.0.12.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
|
|
6
|
+
mcdetect-1.0.12.dist-info/top_level.txt,sha256=WwzBojt5U-T2hZ8llO6XgpM9OFIBkWQQldQKu19O8EY,9
|
|
7
|
+
mcdetect-1.0.12.dist-info/RECORD,,
|
mcdetect-1.0.10.dist-info/RECORD
DELETED
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
mcDETECT/__init__.py,sha256=DDCNNCllOaq158zLqk0lLeCCY2lsM9Ku0lQOIchh4sQ,59
|
|
2
|
-
mcDETECT/model.py,sha256=L9iQyLuvZzXDxL4Zv4xJGo3o2YgbL49UtLXMmVHhxYY,21201
|
|
3
|
-
mcdetect-1.0.10.dist-info/LICENSE,sha256=uxq-shEWOGTIGVnQLmpElILmfCkuUhFZRAMnZUiKvtg,1070
|
|
4
|
-
mcdetect-1.0.10.dist-info/METADATA,sha256=dDP_ZSa4AdBSHtknxbD1ZK4FxO_VP6FqYXnYmEwxTkg,2820
|
|
5
|
-
mcdetect-1.0.10.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
|
6
|
-
mcdetect-1.0.10.dist-info/top_level.txt,sha256=WwzBojt5U-T2hZ8llO6XgpM9OFIBkWQQldQKu19O8EY,9
|
|
7
|
-
mcdetect-1.0.10.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|