SURE-tools 2.1.15__tar.gz → 2.1.16__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of SURE-tools might be problematic. Click here for more details.
- {sure_tools-2.1.15 → sure_tools-2.1.16}/PKG-INFO +1 -1
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/utils/custom_mlp.py +6 -4
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE_tools.egg-info/PKG-INFO +1 -1
- {sure_tools-2.1.15 → sure_tools-2.1.16}/setup.py +1 -1
- {sure_tools-2.1.15 → sure_tools-2.1.16}/LICENSE +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/README.md +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/PerturbFlow.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/SURE.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/__init__.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/assembly/__init__.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/assembly/assembly.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/assembly/atlas.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/atac/__init__.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/atac/utils.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/codebook/__init__.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/codebook/codebook.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/flow/__init__.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/flow/flow_stats.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/flow/plot_quiver.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/perturb/__init__.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/perturb/perturb.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/utils/__init__.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/utils/queue.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE/utils/utils.py +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE_tools.egg-info/SOURCES.txt +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE_tools.egg-info/dependency_links.txt +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE_tools.egg-info/entry_points.txt +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE_tools.egg-info/requires.txt +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/SURE_tools.egg-info/top_level.txt +0 -0
- {sure_tools-2.1.15 → sure_tools-2.1.16}/setup.cfg +0 -0
|
@@ -85,6 +85,7 @@ class MLP(nn.Module):
|
|
|
85
85
|
post_act_fct=lambda layer_ix, total_layers, layer: None,
|
|
86
86
|
allow_broadcast=False,
|
|
87
87
|
use_cuda=False,
|
|
88
|
+
bias=True,
|
|
88
89
|
):
|
|
89
90
|
# init the module object
|
|
90
91
|
super().__init__()
|
|
@@ -114,11 +115,12 @@ class MLP(nn.Module):
|
|
|
114
115
|
assert type(layer_size) == int, "Hidden layer sizes must be ints"
|
|
115
116
|
|
|
116
117
|
# get our nn layer module (in this case nn.Linear by default)
|
|
117
|
-
cur_linear_layer = nn.Linear(last_layer_size, layer_size)
|
|
118
|
+
cur_linear_layer = nn.Linear(last_layer_size, layer_size, bias=bias)
|
|
118
119
|
|
|
119
120
|
# for numerical stability -- initialize the layer properly
|
|
120
121
|
cur_linear_layer.weight.data.normal_(0, 0.001)
|
|
121
|
-
|
|
122
|
+
if bias:
|
|
123
|
+
cur_linear_layer.bias.data.normal_(0, 0.001)
|
|
122
124
|
|
|
123
125
|
# use GPUs to share data during training (if available)
|
|
124
126
|
if use_cuda:
|
|
@@ -160,7 +162,7 @@ class MLP(nn.Module):
|
|
|
160
162
|
), "output_size must be int, list, tuple"
|
|
161
163
|
|
|
162
164
|
if type(output_size) == int:
|
|
163
|
-
all_modules.append(nn.Linear(last_layer_size, output_size))
|
|
165
|
+
all_modules.append(nn.Linear(last_layer_size, output_size, bias=bias))
|
|
164
166
|
if output_activation is not None:
|
|
165
167
|
all_modules.append(
|
|
166
168
|
call_nn_op(output_activation)
|
|
@@ -179,7 +181,7 @@ class MLP(nn.Module):
|
|
|
179
181
|
split_layer = []
|
|
180
182
|
|
|
181
183
|
# we have an activation function
|
|
182
|
-
split_layer.append(nn.Linear(last_layer_size, out_size))
|
|
184
|
+
split_layer.append(nn.Linear(last_layer_size, out_size, bias=bias))
|
|
183
185
|
|
|
184
186
|
# then we get our output activation (either we repeat all or we index into a same sized array)
|
|
185
187
|
act_out_fct = (
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|