Rhapso 0.1.92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. Rhapso/__init__.py +1 -0
  2. Rhapso/data_prep/__init__.py +2 -0
  3. Rhapso/data_prep/n5_reader.py +188 -0
  4. Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
  5. Rhapso/data_prep/xml_to_dataframe.py +215 -0
  6. Rhapso/detection/__init__.py +5 -0
  7. Rhapso/detection/advanced_refinement.py +203 -0
  8. Rhapso/detection/difference_of_gaussian.py +324 -0
  9. Rhapso/detection/image_reader.py +117 -0
  10. Rhapso/detection/metadata_builder.py +130 -0
  11. Rhapso/detection/overlap_detection.py +327 -0
  12. Rhapso/detection/points_validation.py +49 -0
  13. Rhapso/detection/save_interest_points.py +265 -0
  14. Rhapso/detection/view_transform_models.py +67 -0
  15. Rhapso/fusion/__init__.py +0 -0
  16. Rhapso/fusion/affine_fusion/__init__.py +2 -0
  17. Rhapso/fusion/affine_fusion/blend.py +289 -0
  18. Rhapso/fusion/affine_fusion/fusion.py +601 -0
  19. Rhapso/fusion/affine_fusion/geometry.py +159 -0
  20. Rhapso/fusion/affine_fusion/io.py +546 -0
  21. Rhapso/fusion/affine_fusion/script_utils.py +111 -0
  22. Rhapso/fusion/affine_fusion/setup.py +4 -0
  23. Rhapso/fusion/affine_fusion_worker.py +234 -0
  24. Rhapso/fusion/multiscale/__init__.py +0 -0
  25. Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
  26. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
  27. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
  28. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
  29. Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
  30. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
  31. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
  32. Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
  33. Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
  34. Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
  35. Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
  36. Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
  37. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
  38. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
  39. Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
  40. Rhapso/fusion/multiscale_worker.py +113 -0
  41. Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
  42. Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
  43. Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
  44. Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
  45. Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
  46. Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
  47. Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
  48. Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
  49. Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
  50. Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
  51. Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
  52. Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
  53. Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
  54. Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
  55. Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
  56. Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
  57. Rhapso/matching/__init__.py +0 -0
  58. Rhapso/matching/load_and_transform_points.py +458 -0
  59. Rhapso/matching/ransac_matching.py +544 -0
  60. Rhapso/matching/save_matches.py +120 -0
  61. Rhapso/matching/xml_parser.py +302 -0
  62. Rhapso/pipelines/__init__.py +0 -0
  63. Rhapso/pipelines/ray/__init__.py +0 -0
  64. Rhapso/pipelines/ray/aws/__init__.py +0 -0
  65. Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
  66. Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
  67. Rhapso/pipelines/ray/evaluation.py +71 -0
  68. Rhapso/pipelines/ray/interest_point_detection.py +137 -0
  69. Rhapso/pipelines/ray/interest_point_matching.py +110 -0
  70. Rhapso/pipelines/ray/local/__init__.py +0 -0
  71. Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
  72. Rhapso/pipelines/ray/matching_stats.py +104 -0
  73. Rhapso/pipelines/ray/param/__init__.py +0 -0
  74. Rhapso/pipelines/ray/solver.py +120 -0
  75. Rhapso/pipelines/ray/split_dataset.py +78 -0
  76. Rhapso/solver/__init__.py +0 -0
  77. Rhapso/solver/compute_tiles.py +562 -0
  78. Rhapso/solver/concatenate_models.py +116 -0
  79. Rhapso/solver/connected_graphs.py +111 -0
  80. Rhapso/solver/data_prep.py +181 -0
  81. Rhapso/solver/global_optimization.py +410 -0
  82. Rhapso/solver/model_and_tile_setup.py +109 -0
  83. Rhapso/solver/pre_align_tiles.py +323 -0
  84. Rhapso/solver/save_results.py +97 -0
  85. Rhapso/solver/view_transforms.py +75 -0
  86. Rhapso/solver/xml_to_dataframe_solver.py +213 -0
  87. Rhapso/split_dataset/__init__.py +0 -0
  88. Rhapso/split_dataset/compute_grid_rules.py +78 -0
  89. Rhapso/split_dataset/save_points.py +101 -0
  90. Rhapso/split_dataset/save_xml.py +377 -0
  91. Rhapso/split_dataset/split_images.py +537 -0
  92. Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
  93. rhapso-0.1.92.dist-info/METADATA +39 -0
  94. rhapso-0.1.92.dist-info/RECORD +101 -0
  95. rhapso-0.1.92.dist-info/WHEEL +5 -0
  96. rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
  97. rhapso-0.1.92.dist-info/top_level.txt +2 -0
  98. tests/__init__.py +1 -0
  99. tests/test_detection.py +17 -0
  100. tests/test_matching.py +21 -0
  101. tests/test_solving.py +21 -0
@@ -0,0 +1,537 @@
1
+ from scipy.spatial import cKDTree
2
+ from copy import deepcopy
3
+ import time
4
+ import random
5
+ import numpy as np
6
+ import zarr
7
+ import s3fs
8
+ import math
9
+
10
+ class SplitImages:
11
+ def __init__(self, target_image_size, target_overlap, min_step_size, data_gloabl, n5_path, point_density, min_points, max_points,
12
+ error, excludeRadius):
13
+ self.target_image_size = target_image_size
14
+ self.target_overlap = target_overlap
15
+ self.min_step_size = min_step_size
16
+ self.data_global = data_gloabl
17
+ self.image_loader_df = data_gloabl['image_loader']
18
+ self.view_setups_df = data_gloabl['view_setups']
19
+ self.view_registrations_df = data_gloabl['view_registrations']
20
+ self.view_interest_points_df = data_gloabl['view_interest_points']
21
+ self.n5_path = n5_path
22
+ self.point_density = point_density
23
+ self.min_points = min_points
24
+ self.max_points = max_points
25
+ self.error = error
26
+ self.exclude_radius = excludeRadius
27
+ self.setup_definition = []
28
+
29
+ def intersect(self, interval, other_interval):
30
+ n = len(interval[0])
31
+ mins = [max(interval[0][d], other_interval[0][d]) for d in range(n)]
32
+ maxs = [min(interval[1][d], other_interval[1][d]) for d in range(n)]
33
+
34
+ return (mins, maxs)
35
+
36
+ def create_models(self, transform_list):
37
+ M = np.eye(4)
38
+ for tr in transform_list:
39
+ A = np.fromstring(tr["affine"].replace(",", " "), sep=" ").reshape(3, 4)
40
+ T = np.vstack([A, [0, 0, 0, 1]])
41
+ M = M @ T
42
+
43
+ vals = M[:3, :].ravel()
44
+ m00,m01,m02,m03, m10,m11,m12,m13, m20,m21,m22,m23 = map(float, vals)
45
+
46
+ model = {
47
+ "type": "AffineTransform3D",
48
+ "string": "3d-affine: (" + ", ".join(format(v, ".16g") for v in vals) + ")",
49
+ "a": {
50
+ "type": "AffineTransform3D$AffineMatrix3D",
51
+ "m00": m00, "m01": m01, "m02": m02, "m03": m03,
52
+ "m10": m10, "m11": m11, "m12": m12, "m13": m13,
53
+ "m20": m20, "m21": m21, "m22": m22, "m23": m23,
54
+ "m": [[m00, m01, m02, m03],
55
+ [m10, m11, m12, m13],
56
+ [m20, m21, m22, m23]],
57
+ },
58
+ "d0": {"type": "RealPoint", "string": f"({format(m00,'.16g')},{format(m10,'.16g')},{format(m20,'.16g')})",
59
+ "n": 3, "position": [m00, m10, m20]},
60
+ "d1": {"type": "RealPoint", "string": f"({format(m01,'.16g')},{format(m11,'.16g')},{format(m21,'.16g')})",
61
+ "n": 3, "position": [m01, m11, m21]},
62
+ "d2": {"type": "RealPoint", "string": f"({format(m02,'.16g')},{format(m12,'.16g')},{format(m22,'.16g')})",
63
+ "n": 3, "position": [m02, m12, m22]},
64
+ "ds": [[m00, m10, m20], [m01, m11, m21], [m02, m12, m22]],
65
+ }
66
+
67
+ return model
68
+
69
+ def localizing_zero_min_interval_iterator(self, dimensions):
70
+ dims = [int(d) for d in dimensions]
71
+ n = len(dims)
72
+ mn = [0] * n
73
+ mx = [d - 1 for d in dims]
74
+ steps = [1] * n
75
+ for d in range(1, n):
76
+ steps[d] = steps[d - 1] * dims[d - 1]
77
+ last_index = (steps[-1] * dims[-1] - 1) if n else -1
78
+ pos = mn.copy()
79
+ if n:
80
+ pos[0] = mn[0] - 1
81
+
82
+ return {
83
+ "dimensions": dims,
84
+ "index": -1,
85
+ "last_index": last_index,
86
+ "max": mx,
87
+ "min": mn,
88
+ "n": n,
89
+ "position": pos,
90
+ "steps": steps,
91
+ }
92
+
93
+ def split_dims(self, input, i, final_size, overlap):
94
+ dim_intervals = []
95
+ input_min = [0, 0, 0]
96
+ to_val = 0
97
+ from_val = input_min[i]
98
+
99
+ while to_val < input[i]:
100
+ to_val = min(input[i], from_val + final_size - 1)
101
+ dim_intervals.append((from_val, to_val))
102
+ from_val = to_val - overlap + 1
103
+
104
+ return dim_intervals
105
+
106
+ def last_image_size(self, l, s, o):
107
+ num = l - 2 * (s - o) - o
108
+ den = s - o
109
+ rem = num % den if num >= 0 else -((-num) % den)
110
+ size = o + rem
111
+ if size < 0:
112
+ size = l + size
113
+ return size
114
+
115
+ def distribute_intervals_fixed_overlap(self, input):
116
+ input = list(map(int, input.split()))
117
+
118
+ for i in range(len(input)):
119
+ if self.target_image_size[i] % self.min_step_size[i] != 0:
120
+ raise RuntimeError(f"target size {self.target_image_size[i]} not divisible by min step size {self.min_step_size[i]} for dim {i}")
121
+ elif self.target_overlap[i] % self.min_step_size[i] != 0:
122
+ raise RuntimeError(f"overlap {self.target_overlap[i]} not divisible by min step size {self.min_step_size[i]} for dim {i}")
123
+
124
+ interval_basis = []
125
+ for i in range(len(input)):
126
+ dim_intervals = []
127
+ length = input[i]
128
+
129
+ if length <= self.target_image_size[i]:
130
+ pass
131
+
132
+ else:
133
+ l = length
134
+ s = self.target_image_size[i]
135
+ o = self.target_overlap[i]
136
+ last_image_size = self.last_image_size(l, s, o)
137
+
138
+ final_size = 0
139
+ if last_image_size != s:
140
+ last_size = s
141
+ delta = 0
142
+ current_last_image_size = 0
143
+
144
+ if last_image_size <= s // 2:
145
+ while True:
146
+ last_size += self.min_step_size[i]
147
+ current_last_image_size = self.last_image_size(l, last_size, o)
148
+ delta = last_image_size - current_last_image_size
149
+ last_image_size = current_last_image_size
150
+ if delta <= 0: break
151
+
152
+ final_size = last_size
153
+
154
+ else:
155
+ while True:
156
+ last_size -= self.min_step_size[i]
157
+ current_last_image_size = self.last_image_size(l, last_size, o)
158
+ delta = last_image_size - current_last_image_size
159
+ last_image_size = current_last_image_size
160
+ if delta >= 0: break
161
+
162
+ final_size = last_size + self.min_step_size[i]
163
+
164
+ else:
165
+ final_size = s
166
+
167
+ split_dims = self.split_dims(input, i, final_size, self.target_overlap[i])
168
+ dim_intervals.extend(split_dims)
169
+
170
+ interval_basis.append(dim_intervals)
171
+
172
+ num_intervals = []
173
+ for i in range(len(input)):
174
+ num_intervals.append(len(interval_basis[i]))
175
+
176
+ cursor = self.localizing_zero_min_interval_iterator(num_intervals)
177
+ interval_list = []
178
+ current_interval = [0, 0, 0]
179
+
180
+ while cursor['index'] < cursor['last_index']:
181
+
182
+ # fwd
183
+ cursor['index'] = cursor['index'] + 1
184
+ for i in range(cursor['n']):
185
+ cursor['position'][i] = cursor['position'][i] + 1
186
+ if cursor['position'][i] > cursor['max'][i]:
187
+ cursor['position'][i] = 0
188
+ else:
189
+ break
190
+
191
+ # localize
192
+ for i in range(cursor['n']):
193
+ current_interval[i] = cursor['position'][i]
194
+
195
+ min_val = [0, 0, 0]
196
+ max_val = [0, 0, 0]
197
+
198
+ for i in range(len(input)):
199
+ min_max = interval_basis[i][current_interval[i]]
200
+ min_val[i] = min_max[0]
201
+ max_val[i] = min_max[1]
202
+
203
+ interval_list.append((min_val, max_val))
204
+
205
+ return interval_list
206
+
207
+ def max_interval_spread(self, old_setups_df):
208
+ max_val = 1
209
+ for _, row in old_setups_df.iterrows():
210
+ input = row['size']
211
+ intervals = self.distribute_intervals_fixed_overlap(input)
212
+ max_val = max(len(intervals), max_val)
213
+
214
+ return max_val
215
+
216
+ def is_empty(self, interval):
217
+ if interval is None:
218
+ return True
219
+ mins, maxs = interval
220
+ return any(mn > mx for mn, mx in zip(mins, maxs))
221
+
222
+ def contains(self, ip, interval):
223
+ for i in range(len(ip)):
224
+ if ip[i] < interval[0][i] or ip[i] > interval[1][i]:
225
+ return False
226
+
227
+ return True
228
+
229
+ def split_images(self, timepoints, interest_points, fake_label):
230
+ old_setups_df = deepcopy(self.view_setups_df)
231
+ old_registrations_df = deepcopy(self.view_registrations_df)
232
+
233
+ new_to_old_setup_id = {}
234
+ new_setup_id_to_interval = {}
235
+ new_setups = []
236
+ new_registrations = {}
237
+ new_interest_points = {}
238
+
239
+ new_id = 0
240
+ max_interval_spread = self.max_interval_spread(old_setups_df)
241
+ rnd = random.Random(23424459)
242
+
243
+ for _, row in old_setups_df.iterrows():
244
+ old_id = row['id']
245
+ angle = row['angle']
246
+ channel = row['channel']
247
+ vox_dim = row['voxel_size']
248
+ vox_unit = row['voxel_unit']
249
+ illumination = row['illumination']
250
+ input = row['size']
251
+ local_new_tile_id = 0
252
+
253
+ intervals = self.distribute_intervals_fixed_overlap(input)
254
+
255
+ interval_to_view_setup = {}
256
+ for i in range(len(intervals)):
257
+ interval = intervals[i]
258
+ new_to_old_setup_id[new_id] = old_id
259
+ new_setup_id_to_interval[new_id] = interval
260
+
261
+ size = [0, 0, 0]
262
+
263
+ for j in range(3):
264
+ size[j] = interval[1][j] - interval[0][j] + 1
265
+
266
+ new_dim = deepcopy(size)
267
+
268
+ location = [0, 0, 0]
269
+ for j in range(len(interval[0])):
270
+ location[j] += interval[0][j]
271
+
272
+ new_tile_id = int(old_id) * max_interval_spread + local_new_tile_id
273
+ local_new_tile_id += 1
274
+
275
+ new_tile = {
276
+ 'id': new_tile_id,
277
+ 'location': location,
278
+ 'name': str(new_tile_id)
279
+ }
280
+
281
+ new_illum = {
282
+ 'id': old_id,
283
+ 'name': "old_tile_" + old_id
284
+ }
285
+
286
+ new_setup = {
287
+ 'angle':str(angle),
288
+ 'attributes': {
289
+ 'illumination': new_illum,
290
+ 'channel': channel,
291
+ 'tile': new_tile,
292
+ 'angle': angle
293
+ },
294
+ 'channel': str(channel),
295
+ 'id': new_tile_id,
296
+ 'illumination': new_illum,
297
+ 'name': None,
298
+ 'size': new_dim,
299
+ 'tile': new_tile,
300
+ 'voxelSize': {
301
+ 'dimensions': vox_dim,
302
+ 'unit': vox_unit
303
+ }
304
+ }
305
+
306
+ new_setups.append(new_setup)
307
+ interval_key = (tuple(interval[0]), tuple(interval[1]))
308
+ interval_to_view_setup[interval_key] = new_setup
309
+
310
+ for t in timepoints:
311
+ old_view_id = f"timepoint: {t}, setup: {old_id}"
312
+ old_vr = (old_registrations_df['timepoint'] == str(t)) & (old_registrations_df['setup'] == str(old_id))
313
+ transform_list = old_registrations_df.loc[old_vr, ['name', 'type', 'affine']].to_dict('records')
314
+
315
+ mn, _ = interval
316
+ translation = f"1, 0, 0, {mn[0]}, 0, 1, 0, {mn[1]}, 0, 0, 1, {mn[2]}"
317
+
318
+ transform = {
319
+ 'name': 'Image Splitting',
320
+ 'affine': translation
321
+ }
322
+ transform_list.append(transform)
323
+
324
+ new_view_id = {
325
+ 'setup': new_id,
326
+ 'timepoint': t
327
+ }
328
+
329
+ new_view_id_key = f"timepoint: {t}, setup: {new_view_id['setup']}"
330
+
331
+ model = self.create_models(transform_list)
332
+
333
+ new_view_registration = {
334
+ 'model': model,
335
+ 'setup': new_view_id['setup'],
336
+ 'timepoint': t,
337
+ 'transformList': transform_list
338
+ }
339
+
340
+ new_registrations[(new_view_id_key)] = new_view_registration
341
+
342
+ new_v_ip_l = []
343
+
344
+ old_v_ip_l = {
345
+ 'points': interest_points[old_view_id],
346
+ 'setup': old_id,
347
+ 'timepoint': t,
348
+ }
349
+
350
+ id = 0
351
+ new_ip1 = []
352
+ old_ip_l1 = old_v_ip_l['points']
353
+ old_ip_1 = deepcopy(old_ip_l1['points'])
354
+
355
+ for ip in old_ip_1:
356
+ if self.contains(ip, interval):
357
+ l = deepcopy(ip)
358
+ for j in range(len(interval[0])):
359
+ l[j] -= interval[0][j]
360
+
361
+ new_ip1.append((id, l))
362
+ id += 1
363
+
364
+ new_ip_l1 = {
365
+ 'base_directory': old_ip_l1['base_path'],
366
+ 'corresponding_interest_points': None,
367
+ 'interest_points': new_ip1,
368
+ 'modified_corresponding_interest_points': None,
369
+ 'modified_interest_points': None,
370
+ 'n5_path': f"interestpoints.n5/tpId_{t}_viewSetupId_{new_view_id['setup']}/beads_split",
371
+ 'xml_n5_path': f"tpId_{t}_viewSetupId_{new_view_id['setup']}/{fake_label}",
372
+ "parameters": old_ip_l1['parameters_split']
373
+ }
374
+
375
+ new_v_ip_l.append({
376
+ 'label': "beads_split",
377
+ 'ip_list': new_ip_l1
378
+ })
379
+
380
+ new_ip = []
381
+ id = 0
382
+
383
+ for j in range(i):
384
+ other_interval = intervals[j]
385
+ intersection = self.intersect(interval, other_interval)
386
+
387
+ if not self.is_empty(intersection):
388
+ other_setup = interval_to_view_setup[(tuple(other_interval[0]), tuple(other_interval[1]))]
389
+ other_view_id = f"timepoint: {t}, setup: {other_setup['id']}"
390
+ other_ip_list = new_interest_points[other_view_id]
391
+
392
+ n = len(interval[0])
393
+ num_pixels = 1
394
+
395
+ for k in range(n):
396
+ num_pixels *= (intersection[1][k] - intersection[0][k] + 1)
397
+
398
+ num_points = min(self.max_points, max(self.min_points, math.ceil(self.point_density * num_pixels / (100.0*100.0*100.0))))
399
+ other_points = (next((x for x in other_ip_list if x.get("label") == fake_label), {"ip_list": {}})["ip_list"].get("interest_points") or [])
400
+ other_id = len(other_points)
401
+
402
+ tree2 = None
403
+ search2 = None
404
+
405
+ if self.exclude_radius > 0:
406
+ other_ip_global = []
407
+
408
+ for k, ip in enumerate(other_points):
409
+ l = deepcopy(ip[1])
410
+
411
+ for m in range(n):
412
+ l[m] = l[m] + other_interval[0][m]
413
+
414
+ other_ip_global.append((k, l))
415
+
416
+ if len(other_ip_global) > 0:
417
+ coords = np.vstack([l for _, l in other_ip_global])
418
+ tree2 = cKDTree(coords)
419
+
420
+ def search2(q_point_global, radius=self.exclude_radius):
421
+ idxs = tree2.query_ball_point(np.asarray(q_point_global, float), radius)
422
+ return [other_ip_global[k] for k in idxs]
423
+ else:
424
+ tree2 = None
425
+ search2 = None
426
+
427
+ else:
428
+ tree2 = None
429
+ search2 = None
430
+
431
+ tmp = [0.0] * n
432
+
433
+ for k in range(num_points):
434
+ p = [0.0] * n
435
+ op = [0.0] * n
436
+
437
+ for d in range(n):
438
+ l = rnd.random() * (intersection[1][d] - intersection[0][d] + 1) + intersection[0][d]
439
+ p[d] = (l + (rnd.random() - 0.5) * self.error) - interval[0][d]
440
+ op[d] = (l + (rnd.random() - 0.5) * self.error) - other_interval[0][d]
441
+ tmp[d] = l
442
+
443
+ num_neighbors = 0
444
+ if self.exclude_radius > 0:
445
+ tmp_ip = (0, np.asarray(tmp, dtype=float))
446
+
447
+ if search2 is not None:
448
+ neighbors = search2(tmp_ip[1], self.exclude_radius)
449
+ num_neighbors += len(neighbors)
450
+
451
+ if num_neighbors == 0:
452
+ new_ip.append((id, p))
453
+ other_points.append((other_id, op))
454
+ id += 1
455
+ other_id += 1
456
+
457
+ next(x for x in other_ip_list if x.get("label") == fake_label)["ip_list"]["interest_points"] = other_points
458
+
459
+ new_ip_l = {
460
+ 'base_directory': old_ip_l1['base_path'],
461
+ 'corresponding_interest_points': None,
462
+ 'interest_points': new_ip,
463
+ 'modified_corresponding_interest_points': None,
464
+ 'modified_interest_points': None,
465
+ 'n5_path': f"interestpoints.n5/tpId_{t}_viewSetupId_{new_view_id['setup']}/{fake_label}",
466
+ 'xml_n5_path': f"tpId_{t}_viewSetupId_{new_view_id['setup']}/{fake_label}",
467
+ "parameters": old_ip_l1['parameters_fake']
468
+ }
469
+
470
+ new_v_ip_l.append({
471
+ 'label': fake_label,
472
+ 'ip_list': new_ip_l
473
+ })
474
+
475
+ self.setup_definition.append({
476
+ 'interval': interval,
477
+ 'old_view': (t, old_id),
478
+ 'new_view': (t, new_id),
479
+ 'voxel_dim': vox_dim,
480
+ 'voxel_unit': vox_unit,
481
+ 'angle': angle,
482
+ 'channel': channel,
483
+ 'illumination': illumination,
484
+ 'old_models': transform_list
485
+ })
486
+
487
+ new_interest_points[new_view_id_key] = new_v_ip_l
488
+ new_id += 1
489
+
490
+ return new_interest_points
491
+
492
+ def load_interest_points(self, fake_label):
493
+ full_path = self.n5_path + "interestpoints.n5"
494
+ interest_points = {}
495
+
496
+ if full_path.startswith("s3://"):
497
+ path = full_path.rstrip("/")
498
+ s3 = s3fs.S3FileSystem(anon=False)
499
+ store = s3fs.S3Map(root=path, s3=s3, check=False)
500
+ root = zarr.open(store, mode="r")
501
+
502
+ else:
503
+ store = zarr.N5Store(full_path)
504
+ root = zarr.open(store, mode="r")
505
+
506
+ for _, row in self.view_interest_points_df.iterrows():
507
+ view_id = f"timepoint: {row['timepoint']}, setup: {row['setup']}"
508
+ interestpoints_prefix = f"{row['path']}/interestpoints/loc/"
509
+ fake_path = f"tpId_{row['timepoint']}_viewSetupId_{row['setup']}/{fake_label}"
510
+ split_path = f"tpId_{row['timepoint']}_viewSetupId_{row['setup']}/beads_split"
511
+ overlap_px = f"[{self.target_overlap[0]}, {self.target_overlap[1]}, {self.target_overlap[2]}]"
512
+
513
+ group = root[interestpoints_prefix]
514
+ data = group[:]
515
+
516
+ interest_points[view_id] = {
517
+ 'points': data,
518
+ 'n5_path_old': row['path'],
519
+ 'base_path': full_path,
520
+ 'n5_path_split_points': split_path,
521
+ 'n5_path_fake_points': fake_path,
522
+ 'parameters_split': row['params'],
523
+ 'parameters_fake': f"Fake points for image splitting: overlapPx={overlap_px}, targetSize={self.target_image_size}, minStepSize={self.min_step_size}, optimize=true, pointDensity={self.point_density}, minPoints={self.min_points}, maxPoints={self.max_points}, error={self.error}, excludeRadius={self.exclude_radius}"
524
+ }
525
+
526
+ return interest_points
527
+
528
+ def run(self):
529
+ timepoints = set()
530
+ for _, row in self.image_loader_df.iterrows():
531
+ timepoints.add(row['timepoint'])
532
+
533
+ fake_label = f"splitPoints_{int(time.time() * 1000)}"
534
+ interest_points = self.load_interest_points(fake_label)
535
+ new_split_interest_points = self.split_images(timepoints, interest_points, fake_label)
536
+
537
+ return new_split_interest_points, self.setup_definition