SURE-tools 2.1.20__tar.gz → 2.1.22__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of SURE-tools might be problematic. Click here for more details.
- {sure_tools-2.1.20 → sure_tools-2.1.22}/PKG-INFO +1 -1
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/PerturbFlow.py +8 -13
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/SURE.py +0 -1
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/utils/custom_mlp.py +26 -17
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE_tools.egg-info/PKG-INFO +1 -1
- {sure_tools-2.1.20 → sure_tools-2.1.22}/setup.py +1 -1
- {sure_tools-2.1.20 → sure_tools-2.1.22}/LICENSE +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/README.md +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/__init__.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/assembly/__init__.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/assembly/assembly.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/assembly/atlas.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/atac/__init__.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/atac/utils.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/codebook/__init__.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/codebook/codebook.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/flow/__init__.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/flow/flow_stats.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/flow/plot_quiver.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/perturb/__init__.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/perturb/perturb.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/utils/__init__.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/utils/queue.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE/utils/utils.py +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE_tools.egg-info/SOURCES.txt +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE_tools.egg-info/dependency_links.txt +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE_tools.egg-info/entry_points.txt +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE_tools.egg-info/requires.txt +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/SURE_tools.egg-info/top_level.txt +0 -0
- {sure_tools-2.1.20 → sure_tools-2.1.22}/setup.cfg +0 -0
|
@@ -196,20 +196,15 @@ class PerturbFlow(nn.Module):
|
|
|
196
196
|
if self.cell_factor_size>0:
|
|
197
197
|
self.cell_factor_effect = nn.ModuleList()
|
|
198
198
|
for i in np.arange(self.cell_factor_size):
|
|
199
|
-
#self.cell_factor_effect.append(MLP(
|
|
200
|
-
# [self.latent_dim+1] + hidden_sizes + [self.latent_dim],
|
|
201
|
-
# activation=activate_fct,
|
|
202
|
-
# output_activation=None,
|
|
203
|
-
# post_layer_fct=post_layer_fct,
|
|
204
|
-
# post_act_fct=post_act_fct,
|
|
205
|
-
# allow_broadcast=self.allow_broadcast,
|
|
206
|
-
# use_cuda=self.use_cuda,
|
|
207
|
-
# bias=False,
|
|
208
|
-
#)
|
|
209
|
-
#)
|
|
210
199
|
self.cell_factor_effect.append(ZeroBiasMLP(
|
|
211
|
-
self.latent_dim+1
|
|
212
|
-
|
|
200
|
+
[self.latent_dim+1] + hidden_sizes + [self.latent_dim],
|
|
201
|
+
activation=activate_fct,
|
|
202
|
+
output_activation=None,
|
|
203
|
+
post_layer_fct=post_layer_fct,
|
|
204
|
+
post_act_fct=post_act_fct,
|
|
205
|
+
allow_broadcast=self.allow_broadcast,
|
|
206
|
+
use_cuda=self.use_cuda,
|
|
207
|
+
)
|
|
213
208
|
)
|
|
214
209
|
|
|
215
210
|
self.decoder_concentrate = MLP(
|
|
@@ -212,24 +212,33 @@ class MLP(nn.Module):
|
|
|
212
212
|
|
|
213
213
|
|
|
214
214
|
|
|
215
|
+
|
|
215
216
|
class ZeroBiasMLP(nn.Module):
|
|
216
|
-
def __init__(
|
|
217
|
+
def __init__(
|
|
218
|
+
self,
|
|
219
|
+
mlp_sizes,
|
|
220
|
+
activation=nn.ReLU,
|
|
221
|
+
output_activation=None,
|
|
222
|
+
post_layer_fct=lambda layer_ix, total_layers, layer: None,
|
|
223
|
+
post_act_fct=lambda layer_ix, total_layers, layer: None,
|
|
224
|
+
allow_broadcast=False,
|
|
225
|
+
use_cuda=False,
|
|
226
|
+
):
|
|
227
|
+
# init the module object
|
|
217
228
|
super().__init__()
|
|
218
|
-
|
|
219
|
-
|
|
229
|
+
self.mlp = MLP(mlp_sizes=mlp_sizes,
|
|
230
|
+
activation=activation,
|
|
231
|
+
output_activation=output_activation,
|
|
232
|
+
post_layer_fct=post_layer_fct,
|
|
233
|
+
post_act_fct=post_act_fct,
|
|
234
|
+
allow_broadcast=allow_broadcast,
|
|
235
|
+
use_cuda=use_cuda,
|
|
236
|
+
bias=False)
|
|
220
237
|
|
|
221
|
-
|
|
222
|
-
for i in range(len(dims)-1):
|
|
223
|
-
linear = nn.Linear(dims[i], dims[i+1])
|
|
224
|
-
nn.init.zeros_(linear.bias) # 初始化偏置为0
|
|
225
|
-
layers.append(linear)
|
|
226
|
-
if i < len(dims)-2:
|
|
227
|
-
layers.append(nn.ReLU()) # 激活函数
|
|
228
|
-
|
|
229
|
-
self.net = nn.Sequential(*layers)
|
|
230
|
-
|
|
238
|
+
# pass through our sequential for the output!
|
|
231
239
|
def forward(self, x):
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
240
|
+
y = self.mlp(x)
|
|
241
|
+
mask = torch.zeros_like(y)
|
|
242
|
+
mask[x[1]>0,:] = 1
|
|
243
|
+
return y*mask
|
|
244
|
+
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|