SURE-tools 2.1.21__tar.gz → 2.1.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of SURE-tools might be problematic. Click here for more details.
- {sure_tools-2.1.21 → sure_tools-2.1.24}/PKG-INFO +1 -1
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/PerturbFlow.py +1 -1
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/SURE.py +2 -2
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/utils/custom_mlp.py +30 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE_tools.egg-info/PKG-INFO +1 -1
- {sure_tools-2.1.21 → sure_tools-2.1.24}/setup.py +1 -1
- {sure_tools-2.1.21 → sure_tools-2.1.24}/LICENSE +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/README.md +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/__init__.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/assembly/__init__.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/assembly/assembly.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/assembly/atlas.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/atac/__init__.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/atac/utils.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/codebook/__init__.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/codebook/codebook.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/flow/__init__.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/flow/flow_stats.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/flow/plot_quiver.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/perturb/__init__.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/perturb/perturb.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/utils/__init__.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/utils/queue.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE/utils/utils.py +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE_tools.egg-info/SOURCES.txt +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE_tools.egg-info/dependency_links.txt +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE_tools.egg-info/entry_points.txt +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE_tools.egg-info/requires.txt +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/SURE_tools.egg-info/top_level.txt +0 -0
- {sure_tools-2.1.21 → sure_tools-2.1.24}/setup.cfg +0 -0
|
@@ -196,7 +196,7 @@ class PerturbFlow(nn.Module):
|
|
|
196
196
|
if self.cell_factor_size>0:
|
|
197
197
|
self.cell_factor_effect = nn.ModuleList()
|
|
198
198
|
for i in np.arange(self.cell_factor_size):
|
|
199
|
-
self.cell_factor_effect.append(
|
|
199
|
+
self.cell_factor_effect.append(ZeroBiasMLP(
|
|
200
200
|
[self.latent_dim+1] + hidden_sizes + [self.latent_dim],
|
|
201
201
|
activation=activate_fct,
|
|
202
202
|
output_activation=None,
|
|
@@ -10,7 +10,7 @@ from torch.distributions.utils import logits_to_probs, probs_to_logits, clamp_pr
|
|
|
10
10
|
from torch.distributions import constraints
|
|
11
11
|
from torch.distributions.transforms import SoftmaxTransform
|
|
12
12
|
|
|
13
|
-
from .utils.custom_mlp import MLP, Exp
|
|
13
|
+
from .utils.custom_mlp import MLP, Exp, ZeroBiasMLP
|
|
14
14
|
from .utils.utils import CustomDataset, CustomDataset2, CustomDataset4, tensor_to_numpy, convert_to_tensor
|
|
15
15
|
|
|
16
16
|
|
|
@@ -232,7 +232,7 @@ class SURE(nn.Module):
|
|
|
232
232
|
)
|
|
233
233
|
|
|
234
234
|
if self.cell_factor_size>0:
|
|
235
|
-
self.cell_factor_effect =
|
|
235
|
+
self.cell_factor_effect = ZeroBiasMLP(
|
|
236
236
|
[self.latent_dim + self.cell_factor_size] + self.decoder_hidden_layers + [self.latent_dim],
|
|
237
237
|
activation=activate_fct,
|
|
238
238
|
output_activation=None,
|
|
@@ -212,3 +212,33 @@ class MLP(nn.Module):
|
|
|
212
212
|
|
|
213
213
|
|
|
214
214
|
|
|
215
|
+
|
|
216
|
+
class ZeroBiasMLP(nn.Module):
|
|
217
|
+
def __init__(
|
|
218
|
+
self,
|
|
219
|
+
mlp_sizes,
|
|
220
|
+
activation=nn.ReLU,
|
|
221
|
+
output_activation=None,
|
|
222
|
+
post_layer_fct=lambda layer_ix, total_layers, layer: None,
|
|
223
|
+
post_act_fct=lambda layer_ix, total_layers, layer: None,
|
|
224
|
+
allow_broadcast=False,
|
|
225
|
+
use_cuda=False,
|
|
226
|
+
):
|
|
227
|
+
# init the module object
|
|
228
|
+
super().__init__()
|
|
229
|
+
self.mlp = MLP(mlp_sizes=mlp_sizes,
|
|
230
|
+
activation=activation,
|
|
231
|
+
output_activation=output_activation,
|
|
232
|
+
post_layer_fct=post_layer_fct,
|
|
233
|
+
post_act_fct=post_act_fct,
|
|
234
|
+
allow_broadcast=allow_broadcast,
|
|
235
|
+
use_cuda=use_cuda,
|
|
236
|
+
bias=False)
|
|
237
|
+
|
|
238
|
+
# pass through our sequential for the output!
|
|
239
|
+
def forward(self, x):
|
|
240
|
+
y = self.mlp(x)
|
|
241
|
+
mask = torch.zeros_like(y)
|
|
242
|
+
mask[x[1][:,0]>0,:] = 1
|
|
243
|
+
return y*mask
|
|
244
|
+
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|