octopi 1.0__py3-none-any.whl → 1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of octopi might be problematic. Click here for more details.

@@ -8,9 +8,9 @@ def add_model_parameters(parser, octopi = False):
8
8
 
9
9
  # Add U-Net model parameters
10
10
  parser.add_argument("--Nclass", type=int, required=False, default=3, help="Number of prediction classes in the model")
11
- parser.add_argument("--channels", type=utils.parse_int_list, required=False, default='32,64,128,128', help="List of channel sizes")
11
+ parser.add_argument("--channels", type=utils.parse_int_list, required=False, default='32,64,96,96', help="List of channel sizes")
12
12
  parser.add_argument("--strides", type=utils.parse_int_list, required=False, default='2,2,1', help="List of stride sizes")
13
- parser.add_argument("--res-units", type=int, required=False, default=2, help="Number of residual units in the UNet")
13
+ parser.add_argument("--res-units", type=int, required=False, default=1, help="Number of residual units in the UNet")
14
14
  parser.add_argument("--dim-in", type=int, required=False, default=96, help="Input dimension for the UNet model")
15
15
 
16
16
  def inference_model_parameters(parser):
@@ -24,7 +24,7 @@ def add_train_parameters(parser, octopi = False):
24
24
  """
25
25
  Add training parameters to the parser.
26
26
  """
27
- parser.add_argument("--num-epochs", type=int, required=False, default=100, help="Number of training epochs")
27
+ parser.add_argument("--num-epochs", type=int, required=False, default=1000, help="Number of training epochs")
28
28
  parser.add_argument("--val-interval", type=int, required=False, default=10, help="Interval for validation metric calculations")
29
29
  parser.add_argument("--tomo-batch-size", type=int, required=False, default=15, help="Number of tomograms to load per epoch for training")
30
30
  parser.add_argument("--best-metric", type=str, default='avg_f1', required=False, help="Metric to Monitor for Determining Best Model. To track fBetaN, use fBetaN with N as the beta-value.")
@@ -32,8 +32,8 @@ def add_train_parameters(parser, octopi = False):
32
32
  if not octopi:
33
33
  parser.add_argument("--num-tomo-crops", type=int, required=False, default=16, help="Number of tomogram crops to use per patch")
34
34
  parser.add_argument("--lr", type=float, required=False, default=1e-3, help="Learning rate for the optimizer")
35
- parser.add_argument("--tversky-alpha", type=float, required=False, default=0.5, help="Alpha parameter for the Tversky loss")
36
- parser.add_argument("--model-save-path", required=True, help="Path to model save directory")
35
+ parser.add_argument("--tversky-alpha", type=float, required=False, default=0.3, help="Alpha parameter for the Tversky loss")
36
+ parser.add_argument("--model-save-path", required=False, default='results', help="Path to model save directory")
37
37
  else:
38
38
  parser.add_argument("--num-trials", type=int, default=10, required=False, help="Number of trials for architecture search (default: 10).")
39
39
 
@@ -16,19 +16,27 @@ def create_train_script(args):
16
16
 
17
17
  command = f"""
18
18
  octopi train \\
19
+ {strconfigs} \\
19
20
  --model-save-path {args.model_save_path} \\
20
- --target-info {args.target_info} \\
21
- --voxel-size {args.voxel_size} --tomo-algorithm {args.tomo_algorithm} --Nclass {args.Nclass} \\
22
- --best-metric {args.best_metric} --num-epochs {args.num_epochs} --val-interval {args.val_interval} \\
21
+ --target-info {','.join(args.target_info)} \\
22
+ --voxel-size {args.voxel_size} --tomo-alg {args.tomo_alg} --Nclass {args.Nclass} \\
23
23
  --tomo-batch-size {args.tomo_batch_size} --num-tomo-crops {args.num_tomo_crops} \\
24
- {strconfigs}
25
- """
24
+ --best-metric {args.best_metric} --num-epochs {args.num_epochs} --val-interval {args.val_interval} \\
25
+ """
26
26
 
27
27
  # If a model config is provided, use it to build the model
28
28
  if args.model_config is not None:
29
29
  command += f" --model-config {args.model_config}"
30
30
  else:
31
- command += f" --tversky-alpha {args.tversky_alpha} --channels {args.channels} --strides {args.strides} --dim-in {args.dim_in} --res-units {args.res_units}"
31
+ channels = ",".join(map(str, args.channels))
32
+ strides = ",".join(map(str, args.strides))
33
+ command += (
34
+ f" --tversky-alpha {args.tversky_alpha}"
35
+ f" --channels {channels}"
36
+ f" --strides {strides}"
37
+ f" --dim-in {args.dim_in}"
38
+ f" --res-units {args.res_units}"
39
+ )
32
40
 
33
41
  # If Model Weights are provided, use them to initialize the model
34
42
  if args.model_weights is not None and args.model_config is not None:
@@ -240,4 +248,4 @@ def download_dataportal_slurm():
240
248
  """
241
249
  parser_description = "Create a SLURM script for downloading tomograms from the Dataportal"
242
250
  args = cli_dataportal_parser(parser_description, add_slurm=True)
243
- create_download_dataportal_script(args)
251
+ create_download_dataportal_script(args)
@@ -30,46 +30,23 @@ def extract_membrane_bound_picks(
30
30
  if n_procs is None:
31
31
  n_procs = min(mp.cpu_count(), n_run_ids)
32
32
  print(f"Using {n_procs} processes to parallelize across {n_run_ids} run IDs.")
33
-
34
- # Initialize tqdm progress bar
35
- with tqdm(total=n_run_ids, desc="Membrane-Protein Isolation", unit="run") as pbar:
36
- for _iz in range(0, n_run_ids, n_procs):
37
-
38
- start_idx = _iz
39
- end_idx = min(_iz + n_procs, n_run_ids) # Ensure end_idx does not exceed n_run_ids
40
- print(f"\nProcessing runIDs from {start_idx} -> {end_idx } (out of {n_run_ids})")
41
-
42
- processes = []
43
- for _in in range(n_procs):
44
- _iz_this = _iz + _in
45
- if _iz_this >= n_run_ids:
46
- break
47
- run_id = run_ids[_iz_this]
48
- run = root.get_run(run_id)
49
- p = mp.Process(
50
- target=extract.process_membrane_bound_extract,
51
- args=(run,
52
- voxel_size,
53
- picks_info,
54
- membrane_info,
55
- organelle_info,
56
- save_user_id,
57
- save_session_id,
58
- distance_threshold),
59
- )
60
- processes.append(p)
61
-
62
- for p in processes:
63
- p.start()
64
-
65
- for p in processes:
66
- p.join()
67
-
68
- for p in processes:
69
- p.close()
70
-
71
- # Update tqdm progress bar
72
- pbar.update(len(processes))
33
+
34
+ # Run Membrane-Protein Isolation - Main Parallelization Loop
35
+ with mp.Pool(processes=n_procs) as pool:
36
+ with tqdm(total=n_run_ids, desc="Membrane-Protein Isolation", unit="run") as pbar:
37
+ worker_func = lambda run_id: extract.process_membrane_bound_extract(
38
+ root.get_run(run_id),
39
+ voxel_size,
40
+ picks_info,
41
+ membrane_info,
42
+ organelle_info,
43
+ save_user_id,
44
+ save_session_id,
45
+ distance_threshold
46
+ )
47
+
48
+ for _ in pool.imap_unordered(worker_func, run_ids, chunksize=1):
49
+ pbar.update(1)
73
50
 
74
51
  print('Extraction of Membrane-Bound Proteins Complete!')
75
52
 
@@ -5,6 +5,7 @@ import copick, argparse, pprint
5
5
  from typing import List, Tuple
6
6
  import multiprocess as mp
7
7
  from tqdm import tqdm
8
+ import os
8
9
 
9
10
  def pick_particles(
10
11
  copick_config_path: str,
@@ -40,56 +41,39 @@ def pick_particles(
40
41
  print(', '.join([f'{obj[0]} (Label: {obj[1]})' for obj in objects]) + '\n')
41
42
 
42
43
  # Either Specify Input RunIDs or Run on All RunIDs
43
- if runIDs: print('Running Localization on the Following RunIDs: ' + ', '.join(runIDs) + '\n')
44
- run_ids = runIDs if runIDs else [run.name for run in root.runs]
44
+ if runIDs:
45
+ print('Running Localization on the Following RunIDs: ' + ', '.join(runIDs) + '\n')
46
+ run_ids = runIDs
47
+ else:
48
+ run_ids = [run.name for run in root.runs if run.get_voxel_spacing(voxel_size) is not None]
49
+ skipped_run_ids = [run.name for run in root.runs if run.get_voxel_spacing(voxel_size) is None]
50
+
51
+ if skipped_run_ids:
52
+ print(f"Warning: skipping runs with no voxel spacing {voxel_size}: {skipped_run_ids}")
53
+
54
+ # Nprocesses shouldnt exceed computation resource or number of available runs
45
55
  n_run_ids = len(run_ids)
56
+ n_procs = min(mp.mp.cpu_count(), n_procs, n_run_ids)
46
57
 
47
- # Determine the number of processes to use
48
- if n_procs is None:
49
- n_procs = min(int(mp.cpu_count()//4), n_run_ids)
58
+ # Run Localization - Main Parallelization Loop
50
59
  print(f"Using {n_procs} processes to parallelize across {n_run_ids} run IDs.")
51
-
52
- # Initialize tqdm progress bar
53
- with tqdm(total=n_run_ids, desc="Localization", unit="run") as pbar:
54
- for _iz in range(0, n_run_ids, n_procs):
55
-
56
- start_idx = _iz
57
- end_idx = min(_iz + n_procs, n_run_ids) # Ensure end_idx does not exceed n_run_ids
58
- print(f"\nProcessing runIDs from {start_idx} -> {end_idx } (out of {n_run_ids})")
59
-
60
- processes = []
61
- for _in in range(n_procs):
62
- _iz_this = _iz + _in
63
- if _iz_this >= n_run_ids:
64
- break
65
- run_id = run_ids[_iz_this]
66
- run = root.get_run(run_id)
67
- p = mp.Process(
68
- target=localize.processs_localization,
69
- args=(run,
70
- objects,
71
- seg_info,
72
- method,
73
- voxel_size,
74
- filter_size,
75
- radius_min_scale,
76
- radius_max_scale,
77
- pick_session_id,
78
- pick_user_id),
79
- )
80
- processes.append(p)
81
-
82
- for p in processes:
83
- p.start()
84
-
85
- for p in processes:
86
- p.join()
87
-
88
- for p in processes:
89
- p.close()
90
-
91
- # Update tqdm progress bar
92
- pbar.update(len(processes))
60
+ with mp.Pool(processes=n_procs) as pool:
61
+ with tqdm(total=n_run_ids, desc="Localization", unit="run") as pbar:
62
+ worker_func = lambda run_id: localize.processs_localization(
63
+ root.get_run(run_id),
64
+ objects,
65
+ seg_info,
66
+ method,
67
+ voxel_size,
68
+ filter_size,
69
+ radius_min_scale,
70
+ radius_max_scale,
71
+ pick_session_id,
72
+ pick_user_id
73
+ )
74
+
75
+ for _ in pool.imap_unordered(worker_func, run_ids, chunksize=1):
76
+ pbar.update(1)
93
77
 
94
78
  print('Localization Complete!')
95
79
 
@@ -110,7 +94,7 @@ def localize_parser(parser_description, add_slurm: bool = False):
110
94
  localize_group.add_argument("--radius-max-scale", type=float, default=1.0, required=False, help="Maximum radius scale for particles.")
111
95
  localize_group.add_argument("--filter-size", type=int, default=10, required=False, help="Filter size for localization.")
112
96
  localize_group.add_argument("--pick-objects", type=utils.parse_list, default=None, required=False, help="Specific Objects to Find Picks for.")
113
- localize_group.add_argument("--n-procs", type=int, default=None, required=False, help="Number of CPU processes to parallelize runs across. Defaults to the max number of cores available or available runs.")
97
+ localize_group.add_argument("--n-procs", type=int, default=8, required=False, help="Number of CPU processes to parallelize runs across. Defaults to the max number of cores available or available runs.")
114
98
 
115
99
  output_group = parser.add_argument_group("Output Arguments")
116
100
  output_group.add_argument("--pick-session-id", type=str, default='1', required=False, help="Session ID for the particle picks.")
@@ -9,7 +9,7 @@ from octopi import io
9
9
  import scipy.ndimage as ndi
10
10
  from tqdm import tqdm
11
11
  import numpy as np
12
- import math
12
+ import gc
13
13
 
14
14
  def processs_localization(run,
15
15
  objects,
@@ -107,7 +107,7 @@ def extract_particle_centroids_via_watershed(
107
107
  max_particle_size = (4 / 3) * np.pi * (max_particle_radius ** 3)
108
108
 
109
109
  # Create a binary mask for the specific segmentation label
110
- binary_mask = (segmentation == segmentation_idx).astype(int)
110
+ binary_mask = (segmentation == segmentation_idx).astype(np.uint8)
111
111
 
112
112
  # Skip if the segmentation label is not present
113
113
  if np.sum(binary_mask) == 0:
@@ -117,7 +117,12 @@ def extract_particle_centroids_via_watershed(
117
117
  # Structuring element for erosion and dilation
118
118
  struct_elem = ball(1)
119
119
  eroded = binary_erosion(binary_mask, struct_elem)
120
+ del binary_mask
121
+ gc.collect()
122
+
120
123
  dilated = binary_dilation(eroded, struct_elem)
124
+ del eroded
125
+ gc.collect()
121
126
 
122
127
  # Distance transform and local maxima detection
123
128
  distance = ndi.distance_transform_edt(dilated)
@@ -125,7 +130,14 @@ def extract_particle_centroids_via_watershed(
125
130
 
126
131
  # Watershed segmentation
127
132
  markers, _ = ndi.label(local_max)
133
+ del local_max
134
+ markers = markers.astype(np.uint8)
135
+ gc.collect()
136
+
128
137
  watershed_labels = watershed(-distance, markers, mask=dilated)
138
+ del distance, markers, dilated
139
+ watershed_labels = watershed_labels.astype(np.uint8)
140
+ gc.collect()
129
141
 
130
142
  # Extract region properties and filter based on particle size
131
143
  all_centroids = []
@@ -135,6 +147,9 @@ def extract_particle_centroids_via_watershed(
135
147
  # Option 1: Use all centroids
136
148
  all_centroids.append(region.centroid)
137
149
 
150
+ del watershed_labels
151
+ gc.collect()
152
+
138
153
  return all_centroids
139
154
 
140
155
  def extract_particle_centroids_via_com(
octopi/io.py CHANGED
@@ -137,7 +137,7 @@ def get_segmentation_array(run,
137
137
  # No Segmentations Are Available, Result in Error
138
138
  if len(seg) == 0:
139
139
  # Get all available segmentations with their metadata
140
- available_segs = run.get_segmentations(voxel_size=voxel_spacing)
140
+ available_segs = run.get_segmentations(voxel_size=float(voxel_spacing))
141
141
  seg_info = [(s.name, s.user_id, s.session_id) for s in available_segs]
142
142
 
143
143
  # Format the information for display
octopi/main.py CHANGED
@@ -33,7 +33,7 @@ def cli_main():
33
33
  "create-targets": (create_targets, "Generate segmentation targets from coordinates."),
34
34
  "train": (train_model, "Train a single U-Net model."),
35
35
  "model-explore": (model_explore, "Explore model architectures with Optuna / Bayesian Optimization."),
36
- "inference": (inference, "Perform segmentation inference on tomograms."),
36
+ "segment": (inference, "Perform segmentation inference on tomograms."),
37
37
  "localize": (localize, "Perform localization of particles in tomograms."),
38
38
  "extract-mb-picks": (extract_mb_picks, "Extract MB Picks from tomograms."),
39
39
  "evaluate": (evaluate, "Evaluate the performance of a model."),
@@ -42,7 +42,11 @@ def generate_targets(
42
42
 
43
43
  # If runIDs are not provided, load all runs
44
44
  if run_ids is None:
45
- run_ids = [run.name for run in root.runs]
45
+ run_ids = [run.name for run in root.runs if run.get_voxel_spacing(voxel_size) is not None]
46
+ skipped_run_ids = [run.name for run in root.runs if run.get_voxel_spacing(voxel_size) is None]
47
+
48
+ if skipped_run_ids:
49
+ print(f"Warning: skipping runs with no voxel spacing {voxel_size}: {skipped_run_ids}")
46
50
 
47
51
  # Iterate Over All Runs
48
52
  for runID in tqdm(run_ids):
@@ -87,6 +91,9 @@ def generate_targets(
87
91
  session_id=train_targets[target_name]["session_id"],
88
92
  )
89
93
 
94
+ # Filter out empty picks
95
+ query = [pick for pick in query if pick.points is not None]
96
+
90
97
  # Add Picks to Target
91
98
  for pick in query:
92
99
  numPicks += len(pick.points)
@@ -102,11 +102,6 @@ class FourierRescale:
102
102
  """
103
103
  in_depth, in_height, in_width = volume.shape[-3:]
104
104
 
105
- # Check if dimensions are odd
106
- d_is_odd = in_depth % 2
107
- h_is_odd = in_height % 2
108
- w_is_odd = in_width % 2
109
-
110
105
  # Calculate new dimensions
111
106
  extent_depth = in_depth * self.input_voxel_size[0]
112
107
  extent_height = in_height * self.input_voxel_size[1]
@@ -121,9 +116,10 @@ class FourierRescale:
121
116
  new_height = new_height - (new_height % 2)
122
117
  new_width = new_width - (new_width % 2)
123
118
 
124
- # Calculate starting points with odd/even correction
125
- start_d = (in_depth - new_depth) // 2 + (d_is_odd)
126
- start_h = (in_height - new_height) // 2 + (h_is_odd)
127
- start_w = (in_width - new_width) // 2 + (w_is_odd)
119
+ # Calculate starting points - properly centered around DC component
120
+ # No odd/even correction needed - just center the crop
121
+ start_d = (in_depth - new_depth) // 2
122
+ start_h = (in_height - new_height) // 2
123
+ start_w = (in_width - new_width) // 2
128
124
 
129
- return start_d, start_h, start_w, new_depth, new_height, new_width
125
+ return start_d, start_h, start_w, new_depth, new_height, new_width
@@ -16,16 +16,16 @@ class ModelSearchSubmit:
16
16
  voxel_size: float,
17
17
  Nclass: int,
18
18
  model_type: str,
19
- mlflow_experiment_name: str,
20
- random_seed: int,
21
- num_epochs: int,
22
- num_trials: int,
23
- tomo_batch_size: int,
24
- best_metric: str,
25
- val_interval: int,
26
- trainRunIDs: List[str],
27
- validateRunIDs: List[str],
28
- data_split: str
19
+ best_metric: str = 'avg_f1',
20
+ num_epochs: int = 1000,
21
+ num_trials: int = 100,
22
+ data_split: str = 0.8,
23
+ random_seed: int = 42,
24
+ val_interval: int = 10,
25
+ tomo_batch_size: int = 15,
26
+ trainRunIDs: List[str] = None,
27
+ validateRunIDs: List[str] = None,
28
+ mlflow_experiment_name: str = 'explore',
29
29
  ):
30
30
  """
31
31
  Initialize the ModelSearch class for architecture search with Optuna.
@@ -207,7 +207,7 @@ class ModelSearchSubmit:
207
207
  # Run multi-GPU optimization
208
208
  study = self.get_optuna_study()
209
209
  study.optimize(
210
- lambda trial: BayesianModelSearch(self.data_generator, self.model_type).multi_gpu_objective(
210
+ lambda trial: hyper_search.BayesianModelSearch(self.data_generator, self.model_type).multi_gpu_objective(
211
211
  parent_run, trial,
212
212
  self.num_epochs,
213
213
  best_metric=self.best_metric,
@@ -193,8 +193,12 @@ class Predictor:
193
193
 
194
194
  # If runIDs are not provided, load all runs
195
195
  if runIDs is None:
196
- runIDs = [run.name for run in self.root.runs]
197
-
196
+ runIDs = [run.name for run in self.root.runs if run.get_voxel_spacing(voxel_spacing) is not None]
197
+ skippedRunIDs = [run.name for run in self.root.runs if run.get_voxel_spacing(voxel_spacing) is None]
198
+
199
+ if skippedRunIDs:
200
+ print(f"Warning: skipping runs with no voxel spacing {voxel_spacing}: {skippedRunIDs}")
201
+
198
202
  # Iterate over batches of runIDs
199
203
  for i in range(0, len(runIDs), num_tomos_per_batch):
200
204
 
@@ -227,9 +231,9 @@ class Predictor:
227
231
  lambda x: torch.rot90(x, k=1, dims=(3, 4)), # 90° rotation
228
232
  lambda x: torch.rot90(x, k=2, dims=(3, 4)), # 180° rotation
229
233
  lambda x: torch.rot90(x, k=3, dims=(3, 4)), # 270° rotation
230
- # Flip(spatial_axis=0), # Flip along x-axis (depth)
231
- # Flip(spatial_axis=1), # Flip along y-axis (height)
232
- # Flip(spatial_axis=2), # Flip along z-axis (width)
234
+ # lambda x: torch.flip(x, dims=(3,)), # Flip along height (spatial_axis=1)
235
+ # lambda x: torch.flip(x, dims=(4,)), # Flip along width (spatial_axis=2)
236
+ # lambda x: torch.flip(x, dims=(3, 4)), # Flip along both height and width
233
237
  ]
234
238
 
235
239
  # Define inverse transformations (flip back to original orientation)
@@ -238,9 +242,9 @@ class Predictor:
238
242
  lambda x: torch.rot90(x, k=-1, dims=(2, 3)), # Inverse of 90° (i.e. -90°)
239
243
  lambda x: torch.rot90(x, k=-2, dims=(2, 3)), # Inverse of 180° (i.e. -180°)
240
244
  lambda x: torch.rot90(x, k=-3, dims=(2, 3)), # Inverse of 270° (i.e. -270°)
241
- # Flip(spatial_axis=0), # Undo Flip along x-axis
242
- # Flip(spatial_axis=1), # Undo Flip along y-axis
243
- # Flip(spatial_axis=2), # Undo Flip along z-axis
245
+ # lambda x: torch.flip(x, dims=(2,)), # Same as forward
246
+ # lambda x: torch.flip(x, dims=(3,)), # Same as forward
247
+ # lambda x: torch.flip(x, dims=(2, 3)), # Same as forward
244
248
  ]
245
249
 
246
250
  ###################################################################################################################################################
octopi/pytorch/trainer.py CHANGED
@@ -101,6 +101,9 @@ class ModelTrainer:
101
101
  device=self.device
102
102
  )
103
103
 
104
+ del val_inputs
105
+ torch.cuda.empty_cache()
106
+
104
107
  # Compute the loss for this batch
105
108
  loss = self.loss_function(val_outputs, val_labels) # Assuming self.loss_function is defined
106
109
  val_loss += loss.item() # Accumulate the loss
@@ -112,6 +115,9 @@ class ModelTrainer:
112
115
  # Compute metrics
113
116
  self.metrics_function(y_pred=metric_val_outputs, y=metric_val_labels)
114
117
 
118
+ del val_labels, val_outputs, metric_val_outputs, metric_val_labels
119
+ torch.cuda.empty_cache()
120
+
115
121
  # # Contains recall, precision, and f1 for each class
116
122
  metric_values = self.metrics_function.aggregate(reduction='mean_batch')
117
123
 
@@ -435,4 +441,4 @@ class ModelTrainer:
435
441
  best_metric = 'avg_f1'
436
442
 
437
443
  return best_metric
438
-
444
+
@@ -0,0 +1,108 @@
1
+ Metadata-Version: 2.3
2
+ Name: octopi
3
+ Version: 1.1
4
+ Summary: Model architecture exploration for cryoET particle picking
5
+ License: MIT
6
+ Author: Jonathan Schwartz
7
+ Requires-Python: >=3.9,<4.0
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
15
+ Requires-Dist: copick
16
+ Requires-Dist: ipywidgets
17
+ Requires-Dist: kaleido
18
+ Requires-Dist: matplotlib
19
+ Requires-Dist: mlflow (==2.17.0)
20
+ Requires-Dist: monai-weekly (==1.5.dev2448)
21
+ Requires-Dist: mrcfile
22
+ Requires-Dist: multiprocess
23
+ Requires-Dist: nibabel
24
+ Requires-Dist: optuna (==4.0.0)
25
+ Requires-Dist: optuna-integration[botorch,pytorch-lightning]
26
+ Requires-Dist: pandas
27
+ Requires-Dist: plotly
28
+ Requires-Dist: python-dotenv
29
+ Requires-Dist: pytorch-lightning (==2.4.0)
30
+ Requires-Dist: requests (>=2.25.1,<3.0.0)
31
+ Requires-Dist: seaborn
32
+ Requires-Dist: torch-ema
33
+ Requires-Dist: tqdm
34
+ Project-URL: Documentation, https://chanzuckerberg.github.io/octopi/
35
+ Project-URL: Homepage, https://github.com/chanzuckerberg/octopi
36
+ Project-URL: Issues, https://github.com/chanzuckerberg/octopi/issues
37
+ Description-Content-Type: text/markdown
38
+
39
+ # OCTOPI 🐙🐙🐙
40
+
41
+ [![License](https://img.shields.io/pypi/l/octopi.svg?color=green)](https://github.com/chanzuckerberg/octopi/raw/main/LICENSE)
42
+ [![PyPI](https://img.shields.io/pypi/v/octopi.svg?color=green)](https://pypi.org/project/octopi)
43
+ [![Python Version](https://img.shields.io/pypi/pyversions/octopi.svg?color=green)](https://www.python.org/)
44
+
45
+ **O**bject dete**CT**ion **O**f **P**rote**I**ns. A deep learning framework for Cryo-ET 3D particle picking with autonomous model exploration capabilities.
46
+
47
+ ## 🚀 Introduction
48
+
49
+ octopi addresses a critical bottleneck in cryo-electron tomography (cryo-ET) research: the efficient identification and extraction of proteins within complex cellular environments. As advances in cryo-ET enable the collection of thousands of tomograms, the need for automated, accurate particle picking has become increasingly urgent.
50
+
51
+ Our deep learning-based pipeline streamlines the training and execution of 3D autoencoder models specifically designed for cryo-ET particle picking. Built on [copick](https://github.com/copick/copick), a storage-agnostic API, octopi seamlessly accesses tomograms and segmentations across local and remote environments.
52
+
53
+ ## 🧩 Core Features
54
+
55
+ - **3D U-Net Training**: Train and evaluate custom 3D U-Net models for particle segmentation
56
+ - **Automatic Architecture Search**: Explore optimal model configurations using Bayesian optimization via Optuna
57
+ - **Flexible Data Access**: Seamlessly work with tomograms from local storage or remote data portals
58
+ - **HPC Ready**: Built-in support for SLURM-based clusters
59
+ - **Experiment Tracking**: Integrated MLflow support for monitoring training and optimization
60
+ - **Dual Interface**: Use via command-line or Python API
61
+
62
+ ## 🚀 Quick Start
63
+
64
+ ### Installation
65
+
66
+ ```bash
67
+ pip install octopi
68
+ ```
69
+
70
+ ### Basic Usage
71
+
72
+ octopi provides two main command-line interfaces:
73
+
74
+ ```bash
75
+ # Main CLI for training, inference, and data processing
76
+ octopi --help
77
+ ```
78
+
79
+ The main `octopi` command provides subcommands for:
80
+ - Data import and preprocessing
81
+ - Training label preparation
82
+ - Model training and exploration
83
+ - Inference and particle localization
84
+
85
+ ```bash
86
+ # HPC-specific CLI for submitting jobs to SLURM clusters
87
+ octopi-slurm --help
88
+ ```
89
+
90
+ The `octopi-slurm` command provides utilities for:
91
+ - Submitting training jobs to SLURM clusters
92
+ - Managing distributed inference tasks
93
+ - Handling batch processing on HPC systems
94
+
95
+ ## 📚 Documentation
96
+
97
+ For detailed documentation, tutorials, CLI and API reference, visit our [documentation](https://chanzuckerberg.github.io/octopi/).
98
+
99
+ ## 🤝 Contributing
100
+
101
+ This project adheres to the Contributor Covenant code of conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to opensource@chanzuckerberg.com.
102
+
103
+ ## 🔒 Security
104
+
105
+ If you believe you have found a security issue, please responsibly disclose by contacting us at security@chanzuckerberg.com.
106
+
107
+
108
+
@@ -7,23 +7,23 @@ octopi/datasets/generators.py,sha256=aqsIhipkG6bBzwpUlvP_N5m2Je5vs4Vq7gQN1z2uKPc
7
7
  octopi/datasets/mixup.py,sha256=BJUAmM7ItZWFChs8glnd8RNSXR5qGW7DHscbcVc3TsU,1575
8
8
  octopi/datasets/multi_config_generator.py,sha256=SIYqz3Xps4gyWgUo02W1KbObM4ye14dIHJi25XlIIRc,10805
9
9
  octopi/entry_points/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- octopi/entry_points/common.py,sha256=lh2mPNkhMtqMoSqPPajQ-saiGXiOR-vMiFuuolJWWvI,5332
11
- octopi/entry_points/create_slurm_submission.py,sha256=h3pllxmPnG6QUujfop4CNE8xYNsfWrAEkavRCi9UGzg,8720
10
+ octopi/entry_points/common.py,sha256=o1H_rDkiflyOMkJr85aUgso31X4ib4wFAfqfh-CZqho,5351
11
+ octopi/entry_points/create_slurm_submission.py,sha256=Fk9Bhn81SAf0SZW5ZkoOdV6afycw8aLk_tjkgYymEGk,8908
12
12
  octopi/entry_points/run_create_targets.py,sha256=IacDeL9k3sCRbeVuyn32IffjrdQe31VAbS4CxTWpHFk,11226
13
13
  octopi/entry_points/run_evaluate.py,sha256=I90kP_GOtAO7zbuEY9ptZ8Y-g1wZa41N9a2ZGbijEcs,2826
14
- octopi/entry_points/run_extract_mb_picks.py,sha256=iULXtjoL06SApeychQGty5lWAINeNt_1D-zGeJIXT6k,5583
14
+ octopi/entry_points/run_extract_mb_picks.py,sha256=8TFShlDUyFgDwFTmPeMyJPAMBsmuwNhG155d3bVaRgU,4854
15
15
  octopi/entry_points/run_extract_midpoint.py,sha256=O6GdkSD7oXIpSVqizOc6PHhv9nunz3j0RucmYQ2yryM,5742
16
- octopi/entry_points/run_localize.py,sha256=0TKaZD7Uk8ud2ZIlcsB9-6S6vPDlOKIdtCxe_sWnzY4,8678
16
+ octopi/entry_points/run_localize.py,sha256=V_BhugnvvoTHf9mTZdQIbTLVKEwfGnuUAsykuGiSgkA,8233
17
17
  octopi/entry_points/run_optuna.py,sha256=LQXQ6W3v8MhJ9EoKbtFc84Muy3ofjzo-0SyUQaMAaMs,5743
18
18
  octopi/entry_points/run_segment_predict.py,sha256=46CfxDWxbKAZT8o2xr4JiYAwLxVpxXLwGylZU8WXgMc,5442
19
19
  octopi/entry_points/run_train.py,sha256=zdeZxtwK0fJytex9TzZBXxpQxF8b3ljpWeYdBsuOR8k,8094
20
20
  octopi/extract/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- octopi/extract/localize.py,sha256=jvVDl2b6cQZyuxUfLaB4WgMLY53ENCKq2K2CVw1FYUU,9515
21
+ octopi/extract/localize.py,sha256=7kPblW-Hzkxkrca4bEL0LJoAdKJTCL28lzzDhTGff3g,9815
22
22
  octopi/extract/membranebound_extract.py,sha256=VdlsiytoaKBk4wdhORJU8yRSOARFN2zvnF6PZpJkGmo,11208
23
23
  octopi/extract/midpoint_extract.py,sha256=W8sVIAweqkQcF18ie7tvvvOeBzRTBdH95FzPtjMcWjc,7033
24
- octopi/io.py,sha256=c9ZLhFzhoXx2XJ3GmQM-Dx2CUkCOfHj-HWOq3QQvzJU,19558
24
+ octopi/io.py,sha256=EuUTrLArNiErjB-ad0EWEljXcTcpNqEHqPS4goa495s,19565
25
25
  octopi/losses.py,sha256=fs9yR80Hs-hL07WgVMkRy5N81vzP5m9XBCvzO76bIPU,3097
26
- octopi/main.py,sha256=03CqyB9iKCxpek1a2A-vMsIsEjWjW1Q9XbDqqDnWOW0,4866
26
+ octopi/main.py,sha256=ef_zpvopl6JiN4gOT_x2QghRJriqg4iwRzBnvuiBeTo,4864
27
27
  octopi/models/AttentionUnet.py,sha256=r185aXRtfXhN-n8FxA-Sjz18nqpxHH_2t2uadrH-Mgs,1991
28
28
  octopi/models/MedNeXt.py,sha256=9q0FsyrqTx211hCbDv0Lm2XflzXL_bGA4-76BscziGk,4875
29
29
  octopi/models/ModelTemplate.py,sha256=X80EOXwSovCjmVb7x-0_JmRjHfDfLByDdd60MrgFTyw,1084
@@ -33,8 +33,8 @@ octopi/models/UnetPlusPlus.py,sha256=fnV-SvJV8B432KJXQAtdwLy8Va6DJ4fRB_7a1mZiqTU
33
33
  octopi/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  octopi/models/common.py,sha256=kXE0GcQSdfbiP0PDaJBkAJClBqTqCamP-2bHkbe0uBg,2307
35
35
  octopi/processing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- octopi/processing/create_targets_from_picks.py,sha256=D7JlPIaq3ClzCKvK-Z8F-eUpUbUd7zGysJ2Z0AYQzzE,4010
37
- octopi/processing/downsample.py,sha256=zlZAgrKiPEltFZCLYgcAcVVtoKb_qMzV_eUv1UaBy4k,5498
36
+ octopi/processing/create_targets_from_picks.py,sha256=wcCUCbw_fbSZuOIR32TgrADPHVDuH4RTdmiSTR_aDUw,4400
37
+ octopi/processing/downsample.py,sha256=u3V2HULdKTLEthzI-MqJmI-NqpiyX0Vg61i77-j2MKQ,5392
38
38
  octopi/processing/evaluate.py,sha256=0w1iqmD8EXRdce-ctSoEJu5lAwYk1CjL4Qd_dpZt7Yw,13364
39
39
  octopi/processing/importers.py,sha256=TBgPlleGGOW8tJJLArrAFWxzvxH1qE-id_PTLRscgEs,8876
40
40
  octopi/processing/my_metrics.py,sha256=7ZhCEiSYkqbSpoTntAkCrr7y83u6-jI8k8d3P9TlrQA,1040
@@ -42,9 +42,9 @@ octopi/processing/segmentation_from_picks.py,sha256=jah1gAXEn09LIok1Cb8IeVN-fT3j
42
42
  octopi/processing/writers.py,sha256=YB6-0mwJ9sc_eJ3G_WItcqlBjFOpggyNhoRswKOZA6Q,3301
43
43
  octopi/pytorch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
44
  octopi/pytorch/hyper_search.py,sha256=xCJ_8qYaK80UMb2JrBvU31tEZ8-L7MBGBE9pb63-8a0,9431
45
- octopi/pytorch/model_search_submitter.py,sha256=2sgOrrQFdvvSrnfVgpt0uQmZ0H1MjudNyaDvLXSBSKY,11162
46
- octopi/pytorch/segmentation.py,sha256=hS9mUBAmHqTk7zB0lyCHerzir5IvTbXtOA7lRIOJk7w,13689
47
- octopi/pytorch/trainer.py,sha256=wP384rmpuXna1Ou3wksPXHGe2n3bZNfIUZWynbzLHGU,17549
45
+ octopi/pytorch/model_search_submitter.py,sha256=gpXI5RtXwC3T3wvAERWl0GNuhTGvzqW2guG6podgw6A,11247
46
+ octopi/pytorch/segmentation.py,sha256=1EWkOUb419U3Wwhxzc7L3BKUqH4NbzAW8othqT44TqA,14063
47
+ octopi/pytorch/trainer.py,sha256=VLDp1hiCKC_l1YfNFcnTx8alKjXggpXRTo-ZtjhLoSg,17748
48
48
  octopi/pytorch_lightning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
49
  octopi/pytorch_lightning/optuna_pl_ddp.py,sha256=ynD5i81IP-awr6OD9GDurjrQK-5Kc079qPaukphTHnA,11924
50
50
  octopi/pytorch_lightning/train_pl.py,sha256=igOHzU_mUdZRQGhoOGW5vmxJcHFcw5fAPHfVCIZ0eG4,10220
@@ -52,8 +52,8 @@ octopi/stopping_criteria.py,sha256=Fib1fiFqPQPLDxZQNUTgQInY8RWGNBzXkpqF_WJ2PSo,6
52
52
  octopi/submit_slurm.py,sha256=cRbJTESbPFCt6Cq4Hat2uPOQKFYMPcQxuNs0jc1ygUA,1945
53
53
  octopi/utils.py,sha256=iezl5ui2E_Qs00_HS4uPPh008pdyY6IXI9euLfkUq4s,9008
54
54
  octopi/visualization_tools.py,sha256=80Kj8yX09LEXJe3QqeithhlwEdoz9wOYRk7T1RFRmw4,7368
55
- octopi-1.0.dist-info/LICENSE,sha256=zYaYdrEn2O4KTO8sLySIhHOKerzA69toCj2ywuvHT7Q,1816
56
- octopi-1.0.dist-info/METADATA,sha256=lOJwRTgwySLUtMLeaaTegM20fCxZMqtu73tVyMXhqS4,9483
57
- octopi-1.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
58
- octopi-1.0.dist-info/entry_points.txt,sha256=FnDGURPvbr2Hl7n0LPN-5jHtvrKA87MNvhCFO-BwAfg,87
59
- octopi-1.0.dist-info/RECORD,,
55
+ octopi-1.1.dist-info/LICENSE,sha256=zYaYdrEn2O4KTO8sLySIhHOKerzA69toCj2ywuvHT7Q,1816
56
+ octopi-1.1.dist-info/METADATA,sha256=8G-sdRk50HjWdu4Y_2alWs95IQYQvTiFPcn0EvkqdtA,4270
57
+ octopi-1.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
58
+ octopi-1.1.dist-info/entry_points.txt,sha256=FnDGURPvbr2Hl7n0LPN-5jHtvrKA87MNvhCFO-BwAfg,87
59
+ octopi-1.1.dist-info/RECORD,,
@@ -1,209 +0,0 @@
1
- Metadata-Version: 2.3
2
- Name: octopi
3
- Version: 1.0
4
- Summary: Model architecture exploration for cryoET particle picking
5
- License: MIT
6
- Author: Jonathan Schwartz
7
- Requires-Python: >=3.9,<4.0
8
- Classifier: License :: OSI Approved :: MIT License
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: Programming Language :: Python :: 3.9
11
- Classifier: Programming Language :: Python :: 3.10
12
- Classifier: Programming Language :: Python :: 3.11
13
- Classifier: Programming Language :: Python :: 3.12
14
- Classifier: Programming Language :: Python :: 3.13
15
- Requires-Dist: copick
16
- Requires-Dist: ipywidgets
17
- Requires-Dist: kaleido
18
- Requires-Dist: matplotlib
19
- Requires-Dist: mlflow (==2.17.0)
20
- Requires-Dist: monai-weekly (==1.5.dev2448)
21
- Requires-Dist: mrcfile
22
- Requires-Dist: multiprocess
23
- Requires-Dist: nibabel
24
- Requires-Dist: optuna (==4.0.0)
25
- Requires-Dist: optuna-integration[botorch,pytorch-lightning]
26
- Requires-Dist: pandas
27
- Requires-Dist: plotly
28
- Requires-Dist: python-dotenv
29
- Requires-Dist: pytorch-lightning (==2.4.0)
30
- Requires-Dist: requests (>=2.25.1,<3.0.0)
31
- Requires-Dist: seaborn
32
- Requires-Dist: torch-ema
33
- Requires-Dist: tqdm
34
- Description-Content-Type: text/markdown
35
-
36
- # OCTOPI 🐙🐙🐙
37
- **O**bject dete**CT**ion **O**f **P**rote**I**ns. A deep learning framework for Cryo-ET 3D particle picking with autonomous model exploration capabilities.
38
-
39
- ## 🚀 Introduction
40
-
41
- octopi addresses a critical bottleneck in cryo-electron tomography (cryo-ET) research: the efficient identification and extraction of proteins within complex cellular environments. As advances in cryo-ET enable the collection of thousands of tomograms, the need for automated, accurate particle picking has become increasingly urgent.
42
-
43
- Our deep learning-based pipeline streamlines the training and execution of 3D autoencoder models specifically designed for cryo-ET particle picking. Built on [copick](https://github.com/copick/copick), a storage-agnostic API, octopi seamlessly accesses tomograms and segmentations across local and remote environments.
44
-
45
- ## 🧩 Features
46
-
47
- octopi offers a modular, deep learning-driven pipeline for:
48
- * Training and evaluating custom 3D U-Net models for particle segmentation.
49
- * Automatically exploring model architectures using Bayesian optimization via Optuna.
50
- * Performing inference for both semantic segmentation and particle localization.
51
-
52
- octopi empowers researchers to navigate the dense, intricate landscapes of cryo-ET datasets with unprecedented precision and efficiency without manual trial and error.
53
-
54
- ## Getting Started
55
- ### Installation
56
-
57
- *Octopi* is available on PyPI.
58
- ```
59
- pip install octopi
60
- ```
61
-
62
- ## 📚 Usage
63
-
64
- octopi provides a clean, scriptable command-line interface. Run the following command to view all available subcommands:
65
- ```
66
- octopi --help
67
- ```
68
- Each subcommand supports its own --help flag for detailed usage. To see practical examples of how to interface directly with the octopi API, explore the notebooks/ folder.
69
-
70
- If you're running octopi on an HPC cluster, several SLURM-compatible submission commands are available. You can view them by running:
71
- ```
72
- octopi-slurm --help
73
- ```
74
- This provides utilities for submitting training, inference, and localization jobs in SLURM-based environments.
75
-
76
- ### 📥 Data Import & Preprocessing
77
-
78
- To train or run inference with octopi, your tomograms must be organized inside a CoPick project. octopi supports two primary methods for data ingestion, both of which include optional Fourier cropping to reduce resolution and accelerate downstream processing.
79
-
80
- If your tomograms are already processed and stored locally in .mrc format (e.g., from Warp, IMOD, or AreTomo), you can import them into a new or existing CoPick project using:
81
-
82
- ```
83
- octopi import-mrc-volumes \
84
- --input-folder /path/to/mrc/files --config /path/to/config.json \
85
- --target-tomo-type denoised --input-voxel-size --output-voxel-size 10
86
- ```
87
-
88
- octopi also can process tomograms that are hosted on the data portal. Users can download tomograms onto their own remote machine especially if they would like to downsample the tomograms to a lower resolution for speed and memory. You can download and process the tomograms using:
89
- ```
90
- octopi download-dataportal \
91
- --config /path/to/config.json --datasetID 10445 --overlay-path path/to/saved/zarrs \
92
- --input-voxel-size 5 --output-voxel-size 10 \
93
- --dataportal-name wbp --target-tomotype wbp
94
- ```
95
-
96
- ### 📁 Training Labels Preparation
97
-
98
- Use `octopi create-targets` to create semantic masks for proteins of interest using annotation metadata. In this example lets generate picks segmentations for dataset 10439 from the CZ cryoET Dataportal (only need to run this step once).
99
- ```
100
- octopi create-targets \
101
- --config config.json \
102
- --target apoferritin --target beta-galactosidase,slabpick,1 \
103
- --target ribosome,pytom,0 --target virus-like-particle,pytom,0 \
104
- --seg-target membrane \
105
- --tomo-alg wbp --voxel-size 10 \
106
- --target-session-id 1 --target-segmentation-name remotetargets \
107
- --target-user-id train-octopi
108
- ```
109
-
110
- ### 🧠 Training a single 3D U-Net model
111
- Train a 3D U-Net model on the prepared datasets using the prepared target segmentations. We can use tomograms derived from multiple copick projects.
112
- ```
113
- octopi train-model \
114
- --config experiment,config1.json \
115
- --config simulation,config2.json \
116
- --voxel-size 10 --tomo-alg wbp --Nclass 8 \
117
- --tomo-batch-size 50 --num-epochs 100 --val-interval 10 \
118
- --target-info remotetargets,train-octopi,1
119
- ```
120
- Outputs will include model weights (.pth), logs, and training metrics.
121
-
122
- ### 🔍 Model exploration with Optuna
123
-
124
- octopi🐙 supports automatic neural architecture search using Optuna, enabling efficient discovery of optimal 3D U-Net configurations through Bayesian optimization. This allows users to maximize segmentation accuracy without manual tuning.
125
-
126
- To launch a model exploration job:
127
- ```
128
- octopi model-explore \
129
- --config experiment,/mnt/dataportal/ml_challenge/config.json \
130
- --config simulation,/mnt/dataportal/synthetic_ml_challenge/config.json \
131
- --voxel-size 10 --tomo-alg wbp --Nclass 8 \
132
- --model-save-path train_results
133
- ```
134
- Each trial evaluates a different architecture and logs:
135
- • Segmentation performance metrics
136
- • Model weights and configs
137
- • Training curves and validation loss
138
-
139
- 🔬 Trials are automatically tracked with MLflow and saved under the specified `--model-save-path`.
140
-
141
- #### Optuna Dashboard
142
-
143
- To quickly asses the exploration results and observe which trials results the best architectures, Optuna provides a dashboard that summarizes all the information on a dashboard. The instrucutions to access the dashboard are available here - https://optuna-dashboard.readthedocs.io/en/latest/getting-started.html, it is recommended to use either VS-Code extension or CLI.
144
-
145
- #### 📊 MLflow experiment tracking
146
-
147
- To use CZI cloud MLflow tracker, add a `.env` in the root directory like below. You can get a CZI MLflow access token from [here](https://mlflow.cw.use4-prod.si.czi.technology/api/2.0/mlflow/users/access-token) (note that a new token will be generated everytime you open this site).
148
- ```
149
- MLFLOW_TRACKING_USERNAME = <Your_CZ_email>
150
- MLFLOW_TRACKING_PASSWORD = <Your_mlflow_access_token>
151
- ```
152
-
153
- octopi supports MLflow for logging and visualizing model training and hyperparameter search results, including:
154
- • Training loss/validation metrics over time
155
- • Model hyperparameters and architecture details
156
- • Trial comparison (e.g., best performing model)
157
-
158
- You can use either a local MLflow instance, a remote (HPC) instance, or the CZI cloud server:
159
-
160
- #### 🧪 Local MLflow Dashboard
161
-
162
- To inspect results locally: `mlflow ui` and open http://localhost:5000 in your browser.
163
-
164
- #### 🖥️ HPC Cluster MLflow Access (Remote via SSH tunnel)
165
-
166
- If running octopi on a remote cluster (e.g., Biohub Bruno), forward the MLflow port.
167
- On your local machine:
168
- `ssh -L 5000:localhost:5000 remote_username@remote_host` (in the case of Bruno the remote would be `login01.czbiohub.org`).
169
-
170
- Then on the remote terminal (login node): ` mlflow ui --host 0.0.0.0 --port 5000` to launch the MLFlow dashboard on a local borwser.
171
-
172
- #### ☁️ CZI coreweave cluser
173
-
174
- For the CZI coreweave cluser, MLflow is already hosted. Go to the CZI [mlflow server](https://mlflow.cw.use4-prod.si.czi.technology/).
175
-
176
- 🔐 A .env file is required to authenticate (see Getting Started section).
177
- 📁 Be sure to register your project name in MLflow before launching runs.
178
-
179
- ### 🔮 Segmentation
180
- Generate segmentation prediction masks for tomograms in a given copick project.
181
- ```
182
- octopi inference \
183
- --config config.json \
184
- --seg-info predict,unet,1 \
185
- --model-config train_results/best_model_config.yaml \
186
- --model-weights train_results/best_model.pth \
187
- --voxel-size 10 --tomo-alg wbp --tomo-batch-size 25
188
- ```
189
- Output masks will be saved to the corresponding copick project under the `seg-info` input.
190
-
191
- ### 📍 Localization
192
- Convert the segmentation masks into particle coordinates.
193
- ```
194
- octopi localize \
195
- --config config.json \
196
- --pick-session-id 1 --pick-user-id unet \
197
- --seg-info predict,unet,1
198
- ```
199
-
200
- ## Contributing
201
-
202
- This project adheres to the Contributor Covenant code of conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to opensource@chanzuckerberg.com.
203
-
204
- ## Reporting Security Issues
205
-
206
- Please note: If you believe you have found a security issue, please responsibly disclose by contacting us at security@chanzuckerberg.com.
207
-
208
-
209
-
File without changes
File without changes