octopi 1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of octopi might be problematic. Click here for more details.

Files changed (45) hide show
  1. octopi/__init__.py +1 -0
  2. octopi/datasets/cached_datset.py +1 -1
  3. octopi/datasets/generators.py +1 -1
  4. octopi/datasets/io.py +200 -0
  5. octopi/datasets/multi_config_generator.py +1 -1
  6. octopi/entry_points/common.py +5 -5
  7. octopi/entry_points/create_slurm_submission.py +1 -1
  8. octopi/entry_points/run_create_targets.py +6 -6
  9. octopi/entry_points/run_evaluate.py +4 -3
  10. octopi/entry_points/run_extract_mb_picks.py +5 -5
  11. octopi/entry_points/run_localize.py +8 -9
  12. octopi/entry_points/run_optuna.py +7 -7
  13. octopi/entry_points/run_segment_predict.py +4 -4
  14. octopi/entry_points/run_train.py +7 -8
  15. octopi/extract/localize.py +11 -19
  16. octopi/extract/membranebound_extract.py +11 -10
  17. octopi/extract/midpoint_extract.py +3 -3
  18. octopi/models/common.py +1 -1
  19. octopi/processing/create_targets_from_picks.py +3 -4
  20. octopi/processing/evaluate.py +24 -11
  21. octopi/processing/importers.py +4 -4
  22. octopi/pytorch/hyper_search.py +2 -3
  23. octopi/pytorch/model_search_submitter.py +4 -4
  24. octopi/pytorch/segmentation.py +141 -190
  25. octopi/pytorch/segmentation_multigpu.py +162 -0
  26. octopi/pytorch/trainer.py +2 -2
  27. octopi/utils/__init__.py +0 -0
  28. octopi/utils/config.py +57 -0
  29. octopi/utils/io.py +128 -0
  30. octopi/{utils.py → utils/parsers.py} +10 -84
  31. octopi/{stopping_criteria.py → utils/stopping_criteria.py} +3 -3
  32. octopi/{visualization_tools.py → utils/visualization_tools.py} +4 -4
  33. octopi/workflows.py +236 -0
  34. {octopi-1.1.dist-info → octopi-1.2.0.dist-info}/METADATA +41 -29
  35. octopi-1.2.0.dist-info/RECORD +62 -0
  36. {octopi-1.1.dist-info → octopi-1.2.0.dist-info}/WHEEL +1 -1
  37. octopi-1.2.0.dist-info/entry_points.txt +3 -0
  38. {octopi-1.1.dist-info → octopi-1.2.0.dist-info/licenses}/LICENSE +3 -3
  39. octopi/io.py +0 -457
  40. octopi/processing/my_metrics.py +0 -26
  41. octopi/processing/writers.py +0 -102
  42. octopi-1.1.dist-info/RECORD +0 -59
  43. octopi-1.1.dist-info/entry_points.txt +0 -4
  44. /octopi/{losses.py → utils/losses.py} +0 -0
  45. /octopi/{submit_slurm.py → utils/submit_slurm.py} +0 -0
@@ -1,5 +1,5 @@
1
1
  from scipy.spatial.transform import Rotation as R
2
- from octopi import utils, io
2
+ from copick_utils.io import readers
3
3
  import scipy.ndimage as ndi
4
4
  from typing import Tuple
5
5
  import numpy as np
@@ -36,7 +36,7 @@ def process_membrane_bound_extract(run,
36
36
  new_session_id = str(int(save_session_id) + 1) # Convert to string after increment
37
37
 
38
38
  # Need Better Error Handing for Missing Picks
39
- coordinates = io.get_copick_coordinates(
39
+ coordinates = readers.coordinates(
40
40
  run,
41
41
  picks_info[0], picks_info[1], picks_info[2],
42
42
  voxel_size,
@@ -54,12 +54,13 @@ def process_membrane_bound_extract(run,
54
54
  if membrane_info is None:
55
55
  # Flag to distinguish between organelle and membrane segmentation
56
56
  membranes_provided = False
57
- seg = io.get_segmentation_array(run,
58
- voxel_size,
59
- organelle_info[0],
60
- user_id=organelle_info[1],
61
- session_id=organelle_info[2],
62
- raise_error=False)
57
+ seg = readers.segmentation(
58
+ run,
59
+ voxel_size,
60
+ organelle_info[0],
61
+ user_id=organelle_info[1],
62
+ session_id=organelle_info[2],
63
+ raise_error=False)
63
64
  # If No Segmentation is Found, Return
64
65
  if seg is None: return
65
66
  elif nPoints == 0 or np.unique(seg).max() == 0:
@@ -68,7 +69,7 @@ def process_membrane_bound_extract(run,
68
69
  else:
69
70
  # Read both Organelle and Membrane Segmentations
70
71
  membranes_provided = True
71
- seg = io.get_segmentation_array(
72
+ seg = readers.segmentation(
72
73
  run,
73
74
  voxel_size,
74
75
  membrane_info[0],
@@ -76,7 +77,7 @@ def process_membrane_bound_extract(run,
76
77
  session_id=membrane_info[2],
77
78
  raise_error=False)
78
79
 
79
- organelle_seg = io.get_segmentation_array(
80
+ organelle_seg = readers.segmentation(
80
81
  run,
81
82
  voxel_size,
82
83
  organelle_info[0],
@@ -1,6 +1,6 @@
1
1
  from octopi.extract import membranebound_extract as extract
2
2
  from scipy.spatial.transform import Rotation as R
3
- from octopi import io
3
+ from copick_utils.io import readers
4
4
  from scipy.spatial import cKDTree
5
5
  from typing import Tuple
6
6
  import numpy as np
@@ -28,7 +28,7 @@ def process_midpoint_extract(
28
28
  """
29
29
 
30
30
  # Pull Picks that Are used for Midpoint Extraction
31
- coordinates = io.get_copick_coordinates(
31
+ coordinates = readers.coordinates(
32
32
  run,
33
33
  picks_info[0], picks_info[1], picks_info[2],
34
34
  voxel_size
@@ -40,7 +40,7 @@ def process_midpoint_extract(
40
40
  save_picks_info[2] = save_session_id
41
41
 
42
42
  # Get Organelle Segmentation
43
- seg = io.get_segmentation_array(
43
+ seg = readers.segmentation(
44
44
  run,
45
45
  voxel_size,
46
46
  organelle_info[0],
octopi/models/common.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from monai.losses import FocalLoss, TverskyLoss
2
- from octopi import losses
2
+ from octopi.utils import losses
3
3
  from octopi.models import (
4
4
  Unet, AttentionUnet, MedNeXt, SegResNet
5
5
  )
@@ -1,6 +1,5 @@
1
1
  from octopi.processing.segmentation_from_picks import from_picks
2
- import octopi.processing.writers as write
3
- from octopi import io
2
+ from copick_utils.io import readers, writers
4
3
  from typing import List
5
4
  from tqdm import tqdm
6
5
  import numpy as np
@@ -56,7 +55,7 @@ def generate_targets(
56
55
  run = root.get_run(runID)
57
56
 
58
57
  # Get Tomogram
59
- tomo = io.get_tomogram_array(run, voxel_size, tomo_algorithm)
58
+ tomo = readers.tomogram(run, voxel_size, tomo_algorithm)
60
59
 
61
60
  # Initialize Target Volume
62
61
  target = np.zeros(tomo.shape, dtype=np.uint8)
@@ -107,7 +106,7 @@ def generate_targets(
107
106
  # Write Segmentation for non-empty targets
108
107
  if target.max() > 0 and numPicks > 0:
109
108
  tqdm.write(f'Annotating {numPicks} picks in {runID}...')
110
- write.segmentation(run, target, target_user_name,
109
+ writers.segmentation(run, target, target_user_name,
111
110
  name = target_segmentation_name, session_id= target_session_id,
112
111
  voxel_size = voxel_size)
113
112
  print('Creation of targets complete!')
@@ -1,7 +1,7 @@
1
- from octopi import utils, io
1
+ from copick_utils.io import readers
2
2
  from scipy.spatial import distance
3
+ import copick, json, os, yaml
3
4
  from typing import List
4
- import copick, json, os
5
5
  import numpy as np
6
6
 
7
7
  class evaluator:
@@ -95,12 +95,12 @@ class evaluator:
95
95
  for name, radius in self.objects:
96
96
 
97
97
  # Get Ground Truth and Predicted Coordinates
98
- gt_coordinates = io.get_copick_coordinates(
98
+ gt_coordinates = readers.coordinates(
99
99
  run, name,
100
100
  self.ground_truth_user_id, self.ground_truth_session_id,
101
101
  self.voxel_size, raise_error=False
102
102
  )
103
- pred_coordinates = io.get_copick_coordinates(
103
+ pred_coordinates = readers.coordinates(
104
104
  run, name,
105
105
  self.prediction_user_id, self.predict_session_id,
106
106
  self.voxel_size, raise_error=False
@@ -202,14 +202,27 @@ class evaluator:
202
202
  }
203
203
 
204
204
  os.makedirs(save_path, exist_ok=True)
205
- summary_metrics = { "input": self.input_params, "parameters": self.parameters,
206
- "summary_metrics": final_summary_metrics }
207
- with open(os.path.join(save_path, 'average_metrics.json'), 'w') as f:
208
- json.dump(summary_metrics, f, indent=4)
209
- print(f'\nAverage Metrics saved to {os.path.join(save_path, "average_metrics.json")}')
205
+ summary_metrics = { "input": self.input_params,
206
+ "final_fbeta_score": final_fbeta,
207
+ "aggregated_particle_scores": { # Optionally add per-particle details
208
+ name: {
209
+ "tp": counts['total_tp'],
210
+ "fp": counts['total_fp'],
211
+ "fn": counts['total_fn'],
212
+ "weight": self.weights.get(name, 1)
213
+ } for name, counts in aggregated_counts.items()
214
+ },
215
+ "summary_metrics": final_summary_metrics,
216
+ "parameters": self.parameters, }
217
+
218
+ # Save average metrics to YAML file
219
+ with open(os.path.join(save_path, 'average_metrics.yaml'), 'w') as f:
220
+ yaml.dump(summary_metrics, f, indent=4, default_flow_style=False, sort_keys=False)
221
+ print(f'\nAverage Metrics saved to {os.path.join(save_path, "average_metrics.yaml")}')
210
222
 
211
- detailed_metrics = { "input": self.input_params, "parameters": self.parameters,
212
- "metrics": metrics }
223
+ detailed_metrics = { "input": self.input_params,
224
+ "metrics": metrics,
225
+ "parameters": self.parameters, }
213
226
  with open(os.path.join(save_path, 'metrics.json'), 'w') as f:
214
227
  json.dump(detailed_metrics, f, indent=4)
215
228
  print(f'Metrics saved to {os.path.join(save_path, "metrics.json")}')
@@ -1,7 +1,7 @@
1
1
  from octopi.processing.downsample import FourierRescale
2
2
  import copick, argparse, mrcfile, glob, os
3
- import octopi.processing.writers as write
4
3
  from octopi.entry_points import common
4
+ from copick_utils.io import writers
5
5
  from tqdm import tqdm
6
6
 
7
7
  def from_dataportal(
@@ -57,10 +57,10 @@ def from_dataportal(
57
57
 
58
58
  # If we want to save the tomograms at a different voxel size, we need to rescale the tomograms
59
59
  if output_voxel_size is None:
60
- write.tomogram(run, vol, input_voxel_size, target_tomo_type)
60
+ writers.tomogram(run, vol, input_voxel_size, target_tomo_type)
61
61
  else:
62
62
  vol = rescale.run(vol)
63
- write.tomogram(run, vol, output_voxel_size, target_tomo_type)
63
+ writers.tomogram(run, vol, output_voxel_size, target_tomo_type)
64
64
 
65
65
  print(f'Downloading Complete!! Downloaded {len(root.runs)} runs')
66
66
 
@@ -168,7 +168,7 @@ def from_mrcs(
168
168
  voxel_size_to_write = input_voxel_size
169
169
 
170
170
  # Write the tomogram
171
- write.tomogram(run, vol, voxel_size_to_write, target_tomo_type)
171
+ writers.tomogram(run, vol, voxel_size_to_write, target_tomo_type)
172
172
  print(f"Processed {len(mrc_files)} files from {mrcs_path}")
173
173
 
174
174
 
@@ -1,10 +1,9 @@
1
- from monai.losses import FocalLoss, TverskyLoss
2
1
  from monai.metrics import ConfusionMatrixMetric
3
2
  from octopi.pytorch import trainer
4
3
  from mlflow.tracking import MlflowClient
5
4
  from octopi.models import common
6
- from octopi import io, losses
7
5
  import torch, mlflow, optuna, gc
6
+ from octopi.utils import io
8
7
 
9
8
  class BayesianModelSearch:
10
9
 
@@ -207,7 +206,7 @@ class BayesianModelSearch:
207
206
  if score > best_score_so_far:
208
207
  torch.save(model_trainer.model_weights, f'{self.results_dir}/best_model.pth')
209
208
  io.save_parameters_to_yaml(self.model_builder, model_trainer, self.data_generator,
210
- f'{self.results_dir}/best_model_config.yaml')
209
+ f'{self.results_dir}/model_config.yaml')
211
210
 
212
211
  def get_best_score(self, trial):
213
212
  """Retrieve the best score from the trial."""
@@ -1,7 +1,7 @@
1
1
  from octopi.datasets import generators, multi_config_generator
2
+ from octopi.utils import config, parsers
2
3
  from octopi.pytorch import hyper_search
3
4
  import torch, mlflow, optuna
4
- from octopi import utils
5
5
  from typing import List
6
6
  import pandas as pd
7
7
 
@@ -75,7 +75,7 @@ class ModelSearchSubmit:
75
75
  self.data_generator = None
76
76
 
77
77
  # Set random seed for reproducibility
78
- utils.set_seed(self.random_seed)
78
+ config.set_seed(self.random_seed)
79
79
 
80
80
  # Initialize dataset generator
81
81
  self._initialize_data_generator()
@@ -108,7 +108,7 @@ class ModelSearchSubmit:
108
108
  )
109
109
 
110
110
  # Split datasets into training and validation
111
- ratios = utils.parse_data_split(self.data_split)
111
+ ratios = parsers.parse_data_split(self.data_split)
112
112
  self.data_generator.get_data_splits(
113
113
  trainRunIDs=self.trainRunIDs,
114
114
  validateRunIDs=self.validateRunIDs,
@@ -134,7 +134,7 @@ class ModelSearchSubmit:
134
134
 
135
135
  # Set up MLflow tracking
136
136
  try:
137
- tracking_uri = utils.mlflow_setup()
137
+ tracking_uri = config.mlflow_setup()
138
138
  mlflow.set_tracking_uri(tracking_uri)
139
139
  except Exception as e:
140
140
  print(f'Failed to set up MLflow tracking: {e}')