octopi 1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of octopi might be problematic. Click here for more details.
- octopi/__init__.py +1 -0
- octopi/datasets/cached_datset.py +1 -1
- octopi/datasets/generators.py +1 -1
- octopi/datasets/io.py +200 -0
- octopi/datasets/multi_config_generator.py +1 -1
- octopi/entry_points/common.py +5 -5
- octopi/entry_points/create_slurm_submission.py +1 -1
- octopi/entry_points/run_create_targets.py +6 -6
- octopi/entry_points/run_evaluate.py +4 -3
- octopi/entry_points/run_extract_mb_picks.py +5 -5
- octopi/entry_points/run_localize.py +8 -9
- octopi/entry_points/run_optuna.py +7 -7
- octopi/entry_points/run_segment_predict.py +4 -4
- octopi/entry_points/run_train.py +7 -8
- octopi/extract/localize.py +11 -19
- octopi/extract/membranebound_extract.py +11 -10
- octopi/extract/midpoint_extract.py +3 -3
- octopi/models/common.py +1 -1
- octopi/processing/create_targets_from_picks.py +3 -4
- octopi/processing/evaluate.py +24 -11
- octopi/processing/importers.py +4 -4
- octopi/pytorch/hyper_search.py +2 -3
- octopi/pytorch/model_search_submitter.py +4 -4
- octopi/pytorch/segmentation.py +141 -190
- octopi/pytorch/segmentation_multigpu.py +162 -0
- octopi/pytorch/trainer.py +2 -2
- octopi/utils/__init__.py +0 -0
- octopi/utils/config.py +57 -0
- octopi/utils/io.py +128 -0
- octopi/{utils.py → utils/parsers.py} +10 -84
- octopi/{stopping_criteria.py → utils/stopping_criteria.py} +3 -3
- octopi/{visualization_tools.py → utils/visualization_tools.py} +4 -4
- octopi/workflows.py +236 -0
- {octopi-1.1.dist-info → octopi-1.2.0.dist-info}/METADATA +41 -29
- octopi-1.2.0.dist-info/RECORD +62 -0
- {octopi-1.1.dist-info → octopi-1.2.0.dist-info}/WHEEL +1 -1
- octopi-1.2.0.dist-info/entry_points.txt +3 -0
- {octopi-1.1.dist-info → octopi-1.2.0.dist-info/licenses}/LICENSE +3 -3
- octopi/io.py +0 -457
- octopi/processing/my_metrics.py +0 -26
- octopi/processing/writers.py +0 -102
- octopi-1.1.dist-info/RECORD +0 -59
- octopi-1.1.dist-info/entry_points.txt +0 -4
- /octopi/{losses.py → utils/losses.py} +0 -0
- /octopi/{submit_slurm.py → utils/submit_slurm.py} +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from scipy.spatial.transform import Rotation as R
|
|
2
|
-
from
|
|
2
|
+
from copick_utils.io import readers
|
|
3
3
|
import scipy.ndimage as ndi
|
|
4
4
|
from typing import Tuple
|
|
5
5
|
import numpy as np
|
|
@@ -36,7 +36,7 @@ def process_membrane_bound_extract(run,
|
|
|
36
36
|
new_session_id = str(int(save_session_id) + 1) # Convert to string after increment
|
|
37
37
|
|
|
38
38
|
# Need Better Error Handing for Missing Picks
|
|
39
|
-
coordinates =
|
|
39
|
+
coordinates = readers.coordinates(
|
|
40
40
|
run,
|
|
41
41
|
picks_info[0], picks_info[1], picks_info[2],
|
|
42
42
|
voxel_size,
|
|
@@ -54,12 +54,13 @@ def process_membrane_bound_extract(run,
|
|
|
54
54
|
if membrane_info is None:
|
|
55
55
|
# Flag to distinguish between organelle and membrane segmentation
|
|
56
56
|
membranes_provided = False
|
|
57
|
-
seg =
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
57
|
+
seg = readers.segmentation(
|
|
58
|
+
run,
|
|
59
|
+
voxel_size,
|
|
60
|
+
organelle_info[0],
|
|
61
|
+
user_id=organelle_info[1],
|
|
62
|
+
session_id=organelle_info[2],
|
|
63
|
+
raise_error=False)
|
|
63
64
|
# If No Segmentation is Found, Return
|
|
64
65
|
if seg is None: return
|
|
65
66
|
elif nPoints == 0 or np.unique(seg).max() == 0:
|
|
@@ -68,7 +69,7 @@ def process_membrane_bound_extract(run,
|
|
|
68
69
|
else:
|
|
69
70
|
# Read both Organelle and Membrane Segmentations
|
|
70
71
|
membranes_provided = True
|
|
71
|
-
seg =
|
|
72
|
+
seg = readers.segmentation(
|
|
72
73
|
run,
|
|
73
74
|
voxel_size,
|
|
74
75
|
membrane_info[0],
|
|
@@ -76,7 +77,7 @@ def process_membrane_bound_extract(run,
|
|
|
76
77
|
session_id=membrane_info[2],
|
|
77
78
|
raise_error=False)
|
|
78
79
|
|
|
79
|
-
organelle_seg =
|
|
80
|
+
organelle_seg = readers.segmentation(
|
|
80
81
|
run,
|
|
81
82
|
voxel_size,
|
|
82
83
|
organelle_info[0],
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from octopi.extract import membranebound_extract as extract
|
|
2
2
|
from scipy.spatial.transform import Rotation as R
|
|
3
|
-
from
|
|
3
|
+
from copick_utils.io import readers
|
|
4
4
|
from scipy.spatial import cKDTree
|
|
5
5
|
from typing import Tuple
|
|
6
6
|
import numpy as np
|
|
@@ -28,7 +28,7 @@ def process_midpoint_extract(
|
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
# Pull Picks that Are used for Midpoint Extraction
|
|
31
|
-
coordinates =
|
|
31
|
+
coordinates = readers.coordinates(
|
|
32
32
|
run,
|
|
33
33
|
picks_info[0], picks_info[1], picks_info[2],
|
|
34
34
|
voxel_size
|
|
@@ -40,7 +40,7 @@ def process_midpoint_extract(
|
|
|
40
40
|
save_picks_info[2] = save_session_id
|
|
41
41
|
|
|
42
42
|
# Get Organelle Segmentation
|
|
43
|
-
seg =
|
|
43
|
+
seg = readers.segmentation(
|
|
44
44
|
run,
|
|
45
45
|
voxel_size,
|
|
46
46
|
organelle_info[0],
|
octopi/models/common.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from octopi.processing.segmentation_from_picks import from_picks
|
|
2
|
-
|
|
3
|
-
from octopi import io
|
|
2
|
+
from copick_utils.io import readers, writers
|
|
4
3
|
from typing import List
|
|
5
4
|
from tqdm import tqdm
|
|
6
5
|
import numpy as np
|
|
@@ -56,7 +55,7 @@ def generate_targets(
|
|
|
56
55
|
run = root.get_run(runID)
|
|
57
56
|
|
|
58
57
|
# Get Tomogram
|
|
59
|
-
tomo =
|
|
58
|
+
tomo = readers.tomogram(run, voxel_size, tomo_algorithm)
|
|
60
59
|
|
|
61
60
|
# Initialize Target Volume
|
|
62
61
|
target = np.zeros(tomo.shape, dtype=np.uint8)
|
|
@@ -107,7 +106,7 @@ def generate_targets(
|
|
|
107
106
|
# Write Segmentation for non-empty targets
|
|
108
107
|
if target.max() > 0 and numPicks > 0:
|
|
109
108
|
tqdm.write(f'Annotating {numPicks} picks in {runID}...')
|
|
110
|
-
|
|
109
|
+
writers.segmentation(run, target, target_user_name,
|
|
111
110
|
name = target_segmentation_name, session_id= target_session_id,
|
|
112
111
|
voxel_size = voxel_size)
|
|
113
112
|
print('Creation of targets complete!')
|
octopi/processing/evaluate.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
from
|
|
1
|
+
from copick_utils.io import readers
|
|
2
2
|
from scipy.spatial import distance
|
|
3
|
+
import copick, json, os, yaml
|
|
3
4
|
from typing import List
|
|
4
|
-
import copick, json, os
|
|
5
5
|
import numpy as np
|
|
6
6
|
|
|
7
7
|
class evaluator:
|
|
@@ -95,12 +95,12 @@ class evaluator:
|
|
|
95
95
|
for name, radius in self.objects:
|
|
96
96
|
|
|
97
97
|
# Get Ground Truth and Predicted Coordinates
|
|
98
|
-
gt_coordinates =
|
|
98
|
+
gt_coordinates = readers.coordinates(
|
|
99
99
|
run, name,
|
|
100
100
|
self.ground_truth_user_id, self.ground_truth_session_id,
|
|
101
101
|
self.voxel_size, raise_error=False
|
|
102
102
|
)
|
|
103
|
-
pred_coordinates =
|
|
103
|
+
pred_coordinates = readers.coordinates(
|
|
104
104
|
run, name,
|
|
105
105
|
self.prediction_user_id, self.predict_session_id,
|
|
106
106
|
self.voxel_size, raise_error=False
|
|
@@ -202,14 +202,27 @@ class evaluator:
|
|
|
202
202
|
}
|
|
203
203
|
|
|
204
204
|
os.makedirs(save_path, exist_ok=True)
|
|
205
|
-
summary_metrics = { "input": self.input_params,
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
205
|
+
summary_metrics = { "input": self.input_params,
|
|
206
|
+
"final_fbeta_score": final_fbeta,
|
|
207
|
+
"aggregated_particle_scores": { # Optionally add per-particle details
|
|
208
|
+
name: {
|
|
209
|
+
"tp": counts['total_tp'],
|
|
210
|
+
"fp": counts['total_fp'],
|
|
211
|
+
"fn": counts['total_fn'],
|
|
212
|
+
"weight": self.weights.get(name, 1)
|
|
213
|
+
} for name, counts in aggregated_counts.items()
|
|
214
|
+
},
|
|
215
|
+
"summary_metrics": final_summary_metrics,
|
|
216
|
+
"parameters": self.parameters, }
|
|
217
|
+
|
|
218
|
+
# Save average metrics to YAML file
|
|
219
|
+
with open(os.path.join(save_path, 'average_metrics.yaml'), 'w') as f:
|
|
220
|
+
yaml.dump(summary_metrics, f, indent=4, default_flow_style=False, sort_keys=False)
|
|
221
|
+
print(f'\nAverage Metrics saved to {os.path.join(save_path, "average_metrics.yaml")}')
|
|
210
222
|
|
|
211
|
-
detailed_metrics = { "input": self.input_params,
|
|
212
|
-
|
|
223
|
+
detailed_metrics = { "input": self.input_params,
|
|
224
|
+
"metrics": metrics,
|
|
225
|
+
"parameters": self.parameters, }
|
|
213
226
|
with open(os.path.join(save_path, 'metrics.json'), 'w') as f:
|
|
214
227
|
json.dump(detailed_metrics, f, indent=4)
|
|
215
228
|
print(f'Metrics saved to {os.path.join(save_path, "metrics.json")}')
|
octopi/processing/importers.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from octopi.processing.downsample import FourierRescale
|
|
2
2
|
import copick, argparse, mrcfile, glob, os
|
|
3
|
-
import octopi.processing.writers as write
|
|
4
3
|
from octopi.entry_points import common
|
|
4
|
+
from copick_utils.io import writers
|
|
5
5
|
from tqdm import tqdm
|
|
6
6
|
|
|
7
7
|
def from_dataportal(
|
|
@@ -57,10 +57,10 @@ def from_dataportal(
|
|
|
57
57
|
|
|
58
58
|
# If we want to save the tomograms at a different voxel size, we need to rescale the tomograms
|
|
59
59
|
if output_voxel_size is None:
|
|
60
|
-
|
|
60
|
+
writers.tomogram(run, vol, input_voxel_size, target_tomo_type)
|
|
61
61
|
else:
|
|
62
62
|
vol = rescale.run(vol)
|
|
63
|
-
|
|
63
|
+
writers.tomogram(run, vol, output_voxel_size, target_tomo_type)
|
|
64
64
|
|
|
65
65
|
print(f'Downloading Complete!! Downloaded {len(root.runs)} runs')
|
|
66
66
|
|
|
@@ -168,7 +168,7 @@ def from_mrcs(
|
|
|
168
168
|
voxel_size_to_write = input_voxel_size
|
|
169
169
|
|
|
170
170
|
# Write the tomogram
|
|
171
|
-
|
|
171
|
+
writers.tomogram(run, vol, voxel_size_to_write, target_tomo_type)
|
|
172
172
|
print(f"Processed {len(mrc_files)} files from {mrcs_path}")
|
|
173
173
|
|
|
174
174
|
|
octopi/pytorch/hyper_search.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
from monai.losses import FocalLoss, TverskyLoss
|
|
2
1
|
from monai.metrics import ConfusionMatrixMetric
|
|
3
2
|
from octopi.pytorch import trainer
|
|
4
3
|
from mlflow.tracking import MlflowClient
|
|
5
4
|
from octopi.models import common
|
|
6
|
-
from octopi import io, losses
|
|
7
5
|
import torch, mlflow, optuna, gc
|
|
6
|
+
from octopi.utils import io
|
|
8
7
|
|
|
9
8
|
class BayesianModelSearch:
|
|
10
9
|
|
|
@@ -207,7 +206,7 @@ class BayesianModelSearch:
|
|
|
207
206
|
if score > best_score_so_far:
|
|
208
207
|
torch.save(model_trainer.model_weights, f'{self.results_dir}/best_model.pth')
|
|
209
208
|
io.save_parameters_to_yaml(self.model_builder, model_trainer, self.data_generator,
|
|
210
|
-
f'{self.results_dir}/
|
|
209
|
+
f'{self.results_dir}/model_config.yaml')
|
|
211
210
|
|
|
212
211
|
def get_best_score(self, trial):
|
|
213
212
|
"""Retrieve the best score from the trial."""
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from octopi.datasets import generators, multi_config_generator
|
|
2
|
+
from octopi.utils import config, parsers
|
|
2
3
|
from octopi.pytorch import hyper_search
|
|
3
4
|
import torch, mlflow, optuna
|
|
4
|
-
from octopi import utils
|
|
5
5
|
from typing import List
|
|
6
6
|
import pandas as pd
|
|
7
7
|
|
|
@@ -75,7 +75,7 @@ class ModelSearchSubmit:
|
|
|
75
75
|
self.data_generator = None
|
|
76
76
|
|
|
77
77
|
# Set random seed for reproducibility
|
|
78
|
-
|
|
78
|
+
config.set_seed(self.random_seed)
|
|
79
79
|
|
|
80
80
|
# Initialize dataset generator
|
|
81
81
|
self._initialize_data_generator()
|
|
@@ -108,7 +108,7 @@ class ModelSearchSubmit:
|
|
|
108
108
|
)
|
|
109
109
|
|
|
110
110
|
# Split datasets into training and validation
|
|
111
|
-
ratios =
|
|
111
|
+
ratios = parsers.parse_data_split(self.data_split)
|
|
112
112
|
self.data_generator.get_data_splits(
|
|
113
113
|
trainRunIDs=self.trainRunIDs,
|
|
114
114
|
validateRunIDs=self.validateRunIDs,
|
|
@@ -134,7 +134,7 @@ class ModelSearchSubmit:
|
|
|
134
134
|
|
|
135
135
|
# Set up MLflow tracking
|
|
136
136
|
try:
|
|
137
|
-
tracking_uri =
|
|
137
|
+
tracking_uri = config.mlflow_setup()
|
|
138
138
|
mlflow.set_tracking_uri(tracking_uri)
|
|
139
139
|
except Exception as e:
|
|
140
140
|
print(f'Failed to set up MLflow tracking: {e}')
|