PYME-extra 1.0.4.post0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. PYMEcs/Acquire/Actions/__init__.py +0 -0
  2. PYMEcs/Acquire/Actions/custom.py +167 -0
  3. PYMEcs/Acquire/Hardware/LPthreadedSimple.py +248 -0
  4. PYMEcs/Acquire/Hardware/LPthreadedSimpleSim.py +246 -0
  5. PYMEcs/Acquire/Hardware/NikonTiFlaskServer.py +45 -0
  6. PYMEcs/Acquire/Hardware/NikonTiFlaskServerT.py +59 -0
  7. PYMEcs/Acquire/Hardware/NikonTiRESTClient.py +73 -0
  8. PYMEcs/Acquire/Hardware/NikonTiSim.py +35 -0
  9. PYMEcs/Acquire/Hardware/__init__.py +0 -0
  10. PYMEcs/Acquire/Hardware/driftTrackGUI.py +329 -0
  11. PYMEcs/Acquire/Hardware/driftTrackGUI_n.py +472 -0
  12. PYMEcs/Acquire/Hardware/driftTracking.py +424 -0
  13. PYMEcs/Acquire/Hardware/driftTracking_n.py +433 -0
  14. PYMEcs/Acquire/Hardware/fakeCamX.py +15 -0
  15. PYMEcs/Acquire/Hardware/offsetPiezoRESTCorrelLog.py +38 -0
  16. PYMEcs/Acquire/__init__.py +0 -0
  17. PYMEcs/Analysis/MBMcollection.py +552 -0
  18. PYMEcs/Analysis/MINFLUX.py +280 -0
  19. PYMEcs/Analysis/MapUtils.py +77 -0
  20. PYMEcs/Analysis/NPC.py +1176 -0
  21. PYMEcs/Analysis/Paraflux.py +218 -0
  22. PYMEcs/Analysis/Simpler.py +81 -0
  23. PYMEcs/Analysis/Sofi.py +140 -0
  24. PYMEcs/Analysis/__init__.py +0 -0
  25. PYMEcs/Analysis/decSofi.py +211 -0
  26. PYMEcs/Analysis/eventProperties.py +50 -0
  27. PYMEcs/Analysis/fitDarkTimes.py +569 -0
  28. PYMEcs/Analysis/objectVolumes.py +20 -0
  29. PYMEcs/Analysis/offlineTracker.py +130 -0
  30. PYMEcs/Analysis/stackTracker.py +180 -0
  31. PYMEcs/Analysis/timeSeries.py +63 -0
  32. PYMEcs/Analysis/trackFiducials.py +186 -0
  33. PYMEcs/Analysis/zerocross.py +91 -0
  34. PYMEcs/IO/MINFLUX.py +851 -0
  35. PYMEcs/IO/NPC.py +117 -0
  36. PYMEcs/IO/__init__.py +0 -0
  37. PYMEcs/IO/darkTimes.py +19 -0
  38. PYMEcs/IO/picasso.py +219 -0
  39. PYMEcs/IO/tabular.py +11 -0
  40. PYMEcs/__init__.py +0 -0
  41. PYMEcs/experimental/CalcZfactor.py +51 -0
  42. PYMEcs/experimental/FRC.py +338 -0
  43. PYMEcs/experimental/ImageJROItools.py +49 -0
  44. PYMEcs/experimental/MINFLUX.py +1537 -0
  45. PYMEcs/experimental/NPCcalcLM.py +560 -0
  46. PYMEcs/experimental/Simpler.py +369 -0
  47. PYMEcs/experimental/Sofi.py +78 -0
  48. PYMEcs/experimental/__init__.py +0 -0
  49. PYMEcs/experimental/binEventProperty.py +187 -0
  50. PYMEcs/experimental/chaining.py +23 -0
  51. PYMEcs/experimental/clusterTrack.py +179 -0
  52. PYMEcs/experimental/combine_maps.py +104 -0
  53. PYMEcs/experimental/eventProcessing.py +93 -0
  54. PYMEcs/experimental/fiducials.py +323 -0
  55. PYMEcs/experimental/fiducialsNew.py +402 -0
  56. PYMEcs/experimental/mapTools.py +271 -0
  57. PYMEcs/experimental/meas2DplotDh5view.py +107 -0
  58. PYMEcs/experimental/mortensen.py +131 -0
  59. PYMEcs/experimental/ncsDenoise.py +158 -0
  60. PYMEcs/experimental/onTimes.py +295 -0
  61. PYMEcs/experimental/procPoints.py +77 -0
  62. PYMEcs/experimental/pyme2caml.py +73 -0
  63. PYMEcs/experimental/qPAINT.py +965 -0
  64. PYMEcs/experimental/randMap.py +188 -0
  65. PYMEcs/experimental/regExtraCmaps.py +11 -0
  66. PYMEcs/experimental/selectROIfilterTable.py +72 -0
  67. PYMEcs/experimental/showErrs.py +51 -0
  68. PYMEcs/experimental/showErrsDh5view.py +58 -0
  69. PYMEcs/experimental/showShiftMap.py +56 -0
  70. PYMEcs/experimental/snrEvents.py +188 -0
  71. PYMEcs/experimental/specLabeling.py +51 -0
  72. PYMEcs/experimental/splitRender.py +246 -0
  73. PYMEcs/experimental/testChannelByName.py +36 -0
  74. PYMEcs/experimental/timedSpecies.py +28 -0
  75. PYMEcs/experimental/utils.py +31 -0
  76. PYMEcs/misc/ExtraCmaps.py +177 -0
  77. PYMEcs/misc/__init__.py +0 -0
  78. PYMEcs/misc/configUtils.py +169 -0
  79. PYMEcs/misc/guiMsgBoxes.py +27 -0
  80. PYMEcs/misc/mapUtils.py +230 -0
  81. PYMEcs/misc/matplotlib.py +136 -0
  82. PYMEcs/misc/rectsFromSVG.py +182 -0
  83. PYMEcs/misc/shellutils.py +1110 -0
  84. PYMEcs/misc/utils.py +205 -0
  85. PYMEcs/misc/versionCheck.py +20 -0
  86. PYMEcs/misc/zcInfo.py +90 -0
  87. PYMEcs/pyme_warnings.py +4 -0
  88. PYMEcs/recipes/__init__.py +0 -0
  89. PYMEcs/recipes/base.py +75 -0
  90. PYMEcs/recipes/localisations.py +2380 -0
  91. PYMEcs/recipes/manipulate_yaml.py +83 -0
  92. PYMEcs/recipes/output.py +177 -0
  93. PYMEcs/recipes/processing.py +247 -0
  94. PYMEcs/recipes/simpler.py +290 -0
  95. PYMEcs/version.py +2 -0
  96. pyme_extra-1.0.4.post0.dist-info/METADATA +114 -0
  97. pyme_extra-1.0.4.post0.dist-info/RECORD +101 -0
  98. pyme_extra-1.0.4.post0.dist-info/WHEEL +5 -0
  99. pyme_extra-1.0.4.post0.dist-info/entry_points.txt +3 -0
  100. pyme_extra-1.0.4.post0.dist-info/licenses/LICENSE +674 -0
  101. pyme_extra-1.0.4.post0.dist-info/top_level.txt +1 -0
PYMEcs/IO/MINFLUX.py ADDED
@@ -0,0 +1,851 @@
1
+ # here we provide a few routines to translate MINFLUX provided data structures
2
+ # that are read from NPY files
3
+
4
+ # we translate the NPY based datastructure into a PYME compatible data structure that we hold
5
+ # in a pandas dataframe
6
+
7
+ # currently, for reading into PYME we provide the functionality to write out as a CSV
8
+ # from the pandas dataframe; PYME can parse the generated CSV pretty well upon reading
9
+
10
+ from scipy.stats import binned_statistic
11
+ import pandas as pd
12
+ import numpy as np
13
+ import os
14
+
15
+ import PYME.config
16
+ # foreshortening factor estimate, see also
17
+ # Gwosch, K. C. et al. MINFLUX nanoscopy delivers 3D multicolor nanometer
18
+ # resolution in cells. Nature Methods 17, 217–224 (2020), who use 0.7.
19
+ foreshortening = PYME.config.get('MINFLUX-foreshortening',0.72)
20
+
21
+ warning_msg = ""
22
+
23
+ def get_stddev_property(ids, prop, statistic='std'):
24
+ maxid = int(ids.max())
25
+ edges = -0.5+np.arange(maxid+2)
26
+ idrange = (0,maxid)
27
+
28
+ propstd, bin_edge, binno = binned_statistic(ids, prop, statistic=statistic,
29
+ bins=edges, range=idrange)
30
+ propstd[np.isnan(propstd)] = 1000.0 # (mark as huge error)
31
+ std_events = propstd[ids]
32
+ return std_events
33
+
34
+ from PYMEcs.pyme_warnings import warn
35
+ def npy_is_minflux_data(filename, warning=False, return_msg=False):
36
+ data = np.load(filename)
37
+ valid = True
38
+ msg = None
39
+ if data.dtype.fields is None:
40
+ valid = False
41
+ msg = 'no fields in NPY data, likely not a MINFLUX data set'
42
+ else:
43
+ for field in ['itr','tim','tid','vld']:
44
+ if not field in data.dtype.fields:
45
+ valid = False
46
+ msg = 'no "%s" field in NPY data, likely not a MINFLUX data set' % field
47
+ break
48
+
49
+ if not valid and warning:
50
+ if not msg is None:
51
+ warn(msg)
52
+
53
+ if return_msg:
54
+ return (valid,msg)
55
+ else:
56
+ return valid
57
+
58
+ def zip_is_minflux_zarr_data(filename, warning=False, return_msg=False): # currently just placeholder
59
+ valid = True
60
+ msg = None
61
+
62
+ if not valid and warning:
63
+ if not msg is None:
64
+ warn(msg)
65
+
66
+ if return_msg:
67
+ return (valid,msg)
68
+ else:
69
+ return valid
70
+
71
+ def minflux_npy_new_format(data):
72
+ return 'fnl' in data.dtype.fields
73
+
74
+ # wrapper around legacy vs new format IO
75
+ def minflux_npy2pyme(fname,return_original_array=False,make_clump_index=True,with_cfr_std=False):
76
+ data = np.load(fname)
77
+
78
+ if minflux_npy_new_format(data):
79
+ pymedf = minflux_npy2pyme_new(data,
80
+ make_clump_index=make_clump_index,with_cfr_std=with_cfr_std)
81
+ else:
82
+ pymedf = minflux_npy2pyme_legacy(data,
83
+ make_clump_index=make_clump_index,with_cfr_std=with_cfr_std)
84
+
85
+ pyme_recArray = pymedf.to_records(index=False) # convert into NUMPY recarray
86
+ if return_original_array:
87
+ return (pyme_recArray,data)
88
+ else:
89
+ return pyme_recArray
90
+
91
+ def minflux_zarr2pyme(archz,return_original_array=False,make_clump_index=True,with_cfr_std=False):
92
+ # make data array
93
+ mfx = archz['mfx']
94
+ mfxv = mfx[:][mfx['vld'] == 1]
95
+ seqidsm, incseqs = mk_seqids_maxpos(mfxv)
96
+ data = mfxv[np.logical_not(np.isin(seqidsm,incseqs))] # remove any incomplete sequences
97
+ pymedf = minflux_npy2pyme_new(data,
98
+ make_clump_index=make_clump_index,with_cfr_std=with_cfr_std)
99
+
100
+ pyme_recArray = pymedf.to_records(index=False) # convert into NUMPY recarray
101
+ if return_original_array:
102
+ return (pyme_recArray,data)
103
+ else:
104
+ return pyme_recArray
105
+
106
+
107
+ ###############################
108
+ ### MINFLUX property checks ###
109
+ ###############################
110
+
111
+ # here we check for size either 5 (2D) or 10 (3D); any other size raises an error
112
+ def minflux_npy_detect_3D_legacy(data):
113
+ if data['itr'].shape[1] == 10 or data['itr'].shape[1] == 11:
114
+ return True # 3D
115
+ elif data['itr'].shape[1] == 5 or data['itr'].shape[1] == 6:
116
+ return False # 2D
117
+ else:
118
+ raise RuntimeError('unknown size of itr array, neither 5 (2D) nor 10 (3D), is actually: %d' %
119
+ (data['itr'].shape[1]))
120
+
121
+ def minflux_check_poperties(data): # this is aiming at becoming a single stop to check MINFLUX file/dataset properties
122
+ props = {}
123
+ props['Is3D'] = minflux_npy_detect_3D_new(data)
124
+ props['Tracking'] = minflux_npy_detect_2Dtracking_new(data)
125
+ if minflux_npy_new_format(data):
126
+ props['Format'] = 'RevAutumn2024'
127
+ else:
128
+ props['Format'] = 'Legacy'
129
+ return props
130
+
131
+ def minflux_npy_detect_3D_new(data):
132
+ dfin = data[data['fnl'] == True]
133
+ if dfin['itr'][0] == 9:
134
+ if not np.all(dfin['itr'] == 9):
135
+ raise RuntimeError('3D detected but some "last iterations" have an index different from 9, giving up')
136
+ return True # 3D
137
+ elif dfin['itr'][0] == 4:
138
+ if not np.all(dfin['itr'] == 4):
139
+ raise RuntimeError('2D detected but some "last iterations" have an index different from 4, giving up')
140
+ return False # 2D
141
+ elif dfin['itr'][0] == 3: # 2D tracking
142
+ if not np.all(dfin['itr'] == 3):
143
+ raise RuntimeError('2D tracking detected but some "last iterations" have an index different from 3, giving up')
144
+ return False # 2D
145
+ else:
146
+ raise RuntimeError('unknown number of final iteration, neither 3, (2D tracking), 4 (2D) nor 9 (3D), is actually: %d' %
147
+ (dfin['itr'][0]))
148
+
149
+ def minflux_npy_detect_2Dtracking_new(data):
150
+ dfin = data[data['fnl'] == True]
151
+ if np.all(dfin['itr'] == 3):
152
+ return True
153
+ else:
154
+ return False
155
+
156
+ def minflux_npy_has_extra_iter_legacy(data):
157
+ if data['itr'].shape[1] == 6 or data['itr'].shape[1] == 11:
158
+ return True # has a spare empty starting position
159
+ else:
160
+ return False
161
+
162
+
163
+ ##################
164
+ ### LEGACY IO ####
165
+ ##################
166
+
167
+
168
+ # this one should be able to deal both with 2d and 3D
169
+ def minflux_npy2pyme_legacy(data,make_clump_index=True,with_cfr_std=False):
170
+
171
+ if minflux_npy_detect_3D_legacy(data):
172
+ is_3D = True
173
+ iterno_loc = 9 # we pick up the most precise localisation from this iteration, also fbg
174
+ iterno_other = 9 # we pick up dcr, efo from this iteration
175
+ iterno_cfr = 6
176
+ else:
177
+ is_3D = False
178
+ iterno_loc = 4
179
+ iterno_other = 4
180
+ iterno_cfr = 3
181
+
182
+ # NOTE CS 3/2024: latest data with MBM active seems to generate an "empty" iteration (at position 0)
183
+ # that has NaNs or zeros in the relevant properties
184
+ # we seem to be able to deal with this by just moving our pointers into the iteration just one position up
185
+ # this is subject to confirmation
186
+ if minflux_npy_has_extra_iter_legacy(data):
187
+ has_extra_iter = True
188
+ iterno_loc += 1
189
+ iterno_other += 1
190
+ iterno_cfr += 1
191
+ else:
192
+ has_extra_iter = False
193
+
194
+
195
+ posnm = 1e9*data['itr']['loc'][:,iterno_loc] # we keep all distances in units of nm
196
+ posnm[:,2] *= foreshortening
197
+ if 'lnc' in data['itr'].dtype.fields:
198
+ posnm_nc = 1e9*data['itr']['lnc'][:,iterno_loc]
199
+ posnm_nc[:,2] *= foreshortening
200
+ has_lnc = True
201
+ else:
202
+ has_lnc = False
203
+
204
+ pymedct = {}
205
+
206
+ # this way we ensure that the valid vs invalid portions of the same trace get separate ids
207
+ # it becomes important for calculating std_devs for traces which are otherwise contamined by NaNs
208
+ # from the invalid part of a trace
209
+ rawids = 2*data['tid'] + data['vld']
210
+
211
+ if make_clump_index:
212
+ # we replace the non-sequential trace ids from MINFLUX data with a set of sequential ids
213
+ # this works better for clumpIndex assumptions in the end
214
+ uids,revids = np.unique(rawids,return_inverse=True)
215
+ ids = np.arange(1,uids.size+1,dtype='int32')[revids]
216
+ counts = get_stddev_property(ids,posnm[:,0],statistic='count')
217
+ posinid = mk_posinid(ids)
218
+ pymedct.update({'clumpIndex': ids,
219
+ 'clumpSize' : counts,
220
+ 'posInClump': posinid,
221
+ })
222
+ else:
223
+ ids = rawids
224
+
225
+ stdx = get_stddev_property(ids,posnm[:,0])
226
+ # we expect this to only happen when clumpSize == 1, because then std dev comes back as 0
227
+ stdx[stdx < 1e-3] = 100.0 # if error estimate is too small, replace with 100 as "large" flag
228
+ stdy = get_stddev_property(ids,posnm[:,1])
229
+ stdy[stdy < 1e-3] = 100.0
230
+ if is_3D:
231
+ stdz = get_stddev_property(ids,posnm[:,2])
232
+ stdz[stdz < 1e-3] = 100.0
233
+ pymedct.update({'z':posnm[:,2], 'error_z' : stdz})
234
+
235
+ if with_cfr_std: # we also compute on request a cfr std dev across a trace ID (=clump in PYME)
236
+ pymedct.update({'cfr_std':get_stddev_property(ids,data['itr']['cfr'][:,iterno_cfr])})
237
+
238
+ pymedct.update({'x' : posnm[:,0],
239
+ 'y': posnm[:,1],
240
+ # for t we use time to ms precision (without rounding); this is a reasonably close
241
+ # correspondence to frame numbers as time coordinates in SMLM data
242
+ 't': (1e3*data['tim']).astype('i'),
243
+ 'cfr':data['itr']['cfr'][:,iterno_cfr],
244
+ 'efo':data['itr']['efo'][:,iterno_other],
245
+ 'dcr':data['itr']['dcr'][:,iterno_other],
246
+ 'error_x' : stdx,
247
+ 'error_y' : stdy,
248
+ 'fbg': data['itr']['fbg'][:,iterno_other],
249
+ # we assume for now the offset counts can be used to sum up
250
+ # and get the total photons harvested
251
+ # check with abberior
252
+ # NOTE CS 3/2024: there seems to be an extra iteration in the newer files with MBM
253
+ # in some properties these are NAN, for eco this seems 0, so ok to still use sum along whole axis
254
+ 'nPhotons' : data['itr']['eco'].sum(axis=1),
255
+ 'tim': data['tim'], # we also keep the original float time index, units are [s]
256
+ })
257
+
258
+ if has_lnc:
259
+ pymedct.update({'x_nc' : posnm_nc[:,0],
260
+ 'y_nc' : posnm_nc[:,1]})
261
+ if is_3D:
262
+ pymedct.update({'z_nc' : posnm_nc[:,2]})
263
+
264
+ # copy a few entries verbatim
265
+ for key in ['tid','act','vld']:
266
+ if key in data.dtype.fields:
267
+ pymedct[key] = data[key].astype('i') # these are either integer types or should be converted to integer
268
+
269
+ # TODO: think this through - we don't really need a dataframe here,
270
+ # could return a record array, or at least make that optional
271
+ pymepd = pd.DataFrame.from_dict(pymedct)
272
+ return pymepd
273
+
274
+ #########################
275
+ ### RevAutumn2024 IO ####
276
+ #########################
277
+
278
+ # below is code to generate sequence IDs for all sequences present in the mfx data
279
+ # goal is to use only fast vectorized expressions
280
+
281
+ # this one uses "final iteration" as end of sequence marker
282
+ # we noticed later that this can lead to issues with "incomplete sequences",
283
+ # i.e. sequences that are not terminated by a valid final localisation
284
+ def mk_seqids(data):
285
+ indexlast = data['fnl'] == True
286
+ seq_uid = np.arange(1,indexlast.sum()+1,dtype='i')
287
+ seqidwnans = np.full(data.shape[0],np.nan)
288
+ seqidwnans[indexlast] = seq_uid
289
+ dfidnan = pd.DataFrame({'seqid':seqidwnans})
290
+ seqid = dfidnan.bfill().to_numpy(dtype='i').squeeze() # we use pandas fast backfill to mark the other events that are part of this sequence
291
+ return seqid
292
+
293
+ # this one uses "change to a lower (or equal) sequence number" as end of sequence marker
294
+ # this seems safer than looking for an iteration with the 'fnl' marker as there can be incomplete sequences, see below
295
+ # note we now also look for "<=" in the idxmax computation which should only happen if a valid itr 0 is followed directly by another valid itr 0
296
+ # we also return a list (actually numpy array) of seqids of incomplete sequences
297
+ def mk_seqids_maxpos(data):
298
+ idxmax = np.nonzero((data['itr'][1:]-data['itr'][0:-1]) <= 0)[0]
299
+ seq_uid = np.arange(1,idxmax.size+1,dtype='i')
300
+ seqidwnans = np.full(data.shape[0],np.nan)
301
+ seqidwnans[idxmax] = seq_uid
302
+ if np.isnan(seqidwnans[-1]):
303
+ seqidwnans[-1] = seq_uid.max()+1 # we may need to marke the last event with a unique id
304
+ dfidnan = pd.DataFrame({'seqid':seqidwnans})
305
+ seqid = dfidnan.bfill().to_numpy(dtype='i').squeeze() # we use pandas fast backfill to mark the other events that are part of this sequence
306
+ # also mark incomplete sequences for weeding out
307
+ idxincp = idxmax[data['fnl'][idxmax] != 1] # incomplete sequences end with an event that is not marked as 'fnl'
308
+ incomplete_seqs = seqid[idxincp]
309
+ if data['fnl'][-1] != 1:
310
+ incomplete_seqs = np.append(incomplete_seqs,seqid[-1])
311
+ return seqid, incomplete_seqs
312
+
313
+ # number the position within clumps from 0 to clumpSize-1
314
+ # here we assume that the data is already strictly orderd by time of occurence
315
+ # this should generally be the case!
316
+ # the implementation is currently not as fast as would ideally be the case (we iterate over all ids)
317
+ # ideally a full vector expression would be used - but need to figure out how
318
+ # however, not yet timed if this computation is rate-limiting for the import, it may not be
319
+ # in which case no further optimization would be currently needed
320
+ def mk_posinid(ids):
321
+ posinid = np.zeros_like(ids)
322
+ for curid in np.unique(ids):
323
+ isid = ids == curid
324
+ posinid[isid] = np.arange(int(np.sum(isid)))
325
+ return posinid
326
+
327
+ # this one should be able to deal both with 2d and 3D
328
+ def minflux_npy2pyme_new(data,make_clump_index=True,with_cfr_std=False):
329
+ lastits = data['fnl'] == True
330
+ wherelast = np.nonzero(lastits)[0]
331
+ dfin = data[lastits]
332
+
333
+ props = minflux_check_poperties(data)
334
+
335
+ if props['Is3D']:
336
+ wherecfr = wherelast - 3
337
+ if not np.all(data[wherecfr]['itr'] == 6):
338
+ raise RuntimeError('CFR check_3D: 3D detected but some "cfr iterations" have an index different from 6, giving up')
339
+ else:
340
+ if props['Tracking']:
341
+ wherecfr = wherelast # this is bogus for now; we really need to get CFR from previous itr==2 that belongs to the same trace
342
+ else:
343
+ wherecfr = wherelast - 1 # in 2D we do use the last but one iteration (iteration 3)
344
+ if not np.all(data[wherecfr]['itr'] == 3):
345
+ raise RuntimeError('CFR check_2D: 2D detected but some "cfr iterations" have an index different from 3, giving up')
346
+
347
+ posnm = 1e9*dfin['loc'] # we keep all distances in units of nm
348
+ posnm[:,2] *= foreshortening
349
+ if 'lnc' in data.dtype.fields:
350
+ posnm_nc = 1e9*dfin['lnc']
351
+ posnm_nc[:,2] *= foreshortening
352
+ has_lnc = True
353
+ else:
354
+ has_lnc = False
355
+
356
+ pymedct = {}
357
+
358
+ # this way we ensure that the valid vs invalid portions of the same trace get separate ids
359
+ # it becomes important for calculating std_devs for traces which are otherwise contamined by NaNs
360
+ # from the invalid part of a trace
361
+ rawids = 2*dfin['tid'] + dfin['vld']
362
+
363
+ if make_clump_index:
364
+ # we replace the non-sequential trace ids from MINFLUX data with a set of sequential ids
365
+ # this works better for clumpIndex assumptions in the end
366
+ uids,revids = np.unique(rawids,return_inverse=True)
367
+ ids = np.arange(1,uids.size+1,dtype='int32')[revids]
368
+ posinid = mk_posinid(ids)
369
+ counts = get_stddev_property(ids,posnm[:,0],statistic='count')
370
+ pymedct.update({'clumpIndex': ids,
371
+ 'clumpSize' : counts,
372
+ 'posInClump': posinid,
373
+ })
374
+ else:
375
+ ids = rawids
376
+
377
+ # we are currently not using the info on incomplete sequences
378
+ seqid,incomplete_seqid = mk_seqids_maxpos(data) # we give every sequence a unique id to allow summing up the photons
379
+ # we assume for now the counts at offset can be used to sum up
380
+ # and get the total photons harvested in a sequence
381
+ nphotons_all = get_stddev_property(seqid,data['eco'],statistic='sum')
382
+ niterations_all = get_stddev_property(seqid,data['eco'],statistic='count') # we also count how many iterations were done, to see complete vs partial sequences
383
+
384
+ if with_cfr_std: # we also compute on request a cfr std dev across a trace ID (=clump in PYME)
385
+ pymedct.update({'cfr_std':get_stddev_property(ids,data[wherecfr]['cfr'])})
386
+
387
+ pymedct.update({'x' : posnm[:,0],
388
+ 'y': posnm[:,1],
389
+ # for t we use time to ms precision (without rounding); this is a reasonably close
390
+ # correspondence to frame numbers as time coordinates in SMLM data
391
+ 't': (1e3*dfin['tim']).astype('i'),
392
+ 'cfr':data[wherecfr]['cfr'],
393
+ 'efo':dfin['efo'],
394
+ 'fbg': dfin['fbg'],
395
+ # check with abberior
396
+ # NOTE CS 3/2024: there seems to be an extra iteration in the newer files with MBM
397
+ # in some properties these are NAN, for eco this seems 0, so ok to still use sum along whole axis
398
+ 'tim': dfin['tim'], # we also keep the original float time index, units are [s]
399
+ 'nPhotons': nphotons_all[wherelast],
400
+ 'nIters': niterations_all[wherelast],
401
+ 'itr': dfin['itr']
402
+ })
403
+ # copy a few entries verbatim
404
+ for key in ['tid','act','vld','sta','sqi','thi','gri']:
405
+ if key in data.dtype.fields:
406
+ pymedct[key] = dfin[key].astype('i') # these are either integer types or should be converted to integer
407
+
408
+ # spectral colour info
409
+ pymedct.update({'dcr':dfin['dcr'][:,0]})
410
+ if dfin['dcr'].shape[1] > 1: # first element is ch1/(ch1 + ch2), second is ch2/(ch1 + ch2) if present
411
+ pymedct.update({'dcr2':dfin['dcr'][:,1]})
412
+
413
+ stdx = get_stddev_property(ids,posnm[:,0])
414
+ # we expect this to only happen when clumpSize == 1, because then std dev comes back as 0
415
+ stdx[stdx < 1e-3] = 100.0 # if error estimate is too small, replace with 100 as "large" flag
416
+ stdy = get_stddev_property(ids,posnm[:,1])
417
+ stdy[stdy < 1e-3] = 100.0
418
+ if props['Is3D']:
419
+ stdz = get_stddev_property(ids,posnm[:,2])
420
+ stdz[stdz < 1e-3] = 100.0
421
+
422
+ if props['Tracking']: # NOTE: for now 2D only, must fix in future for 3D!
423
+
424
+ # estimating the experimental localization precision σ for each track by calculating the
425
+ # standard deviation (SD) of coordinate difference between consecutive localizations
426
+ # from supplement in Deguchi, T. et al. Direct observation of motor protein stepping in
427
+ # living cells using MINFLUX. Science 379, 1010–1015 (2023).
428
+ def diffstd(data):
429
+ # take differential and then look at std_dev of that
430
+ # 1/sqrt(2) to account for variance increase on differences
431
+ return np.diff(data).std()/1.41
432
+
433
+ track_stdx = stdx
434
+ track_stdy = stdy
435
+ #LOCERR_MAX = 15.0
436
+ #stdx = np.clip(stdx,None,LOCERR_MAX) # current workaround, need better loc err estimation
437
+ #stdy = np.clip(stdy,None,LOCERR_MAX) # current workaround, need better loc err estimation
438
+ stdx = get_stddev_property(ids,posnm[:,0],statistic=diffstd)
439
+ stdy = get_stddev_property(ids,posnm[:,1],statistic=diffstd)
440
+ track_tmin = get_stddev_property(ids,dfin['tim'],'min')
441
+ track_tms = 1e3*(dfin['tim']-track_tmin)
442
+ track_lims = np.zeros_like(ids)
443
+ track_lims[np.diff(ids,prepend=0) > 0] = 1 # mark beginning of tracks with 1
444
+ track_lims[np.diff(ids,append=ids.max()+1) > 0] = 2 # mark end of tracks with 2
445
+ pymedct.update({'track_stdx':track_stdx, 'track_stdy':track_stdy, 'track_tms':track_tms,
446
+ # we return track_err[xy] in addition to error_x, error_y since it avoids
447
+ # special treatment on coalescing and therefore allows comparison between
448
+ # track_stdx and track_errx etc on a per track basis
449
+ 'track_errx':stdx.copy(), 'track_erry':stdy.copy(),
450
+ 'track_lims':track_lims,
451
+ })
452
+
453
+ pymedct.update({'error_x' : stdx,'error_y' : stdy})
454
+ if props['Is3D']:
455
+ pymedct.update({'z':posnm[:,2], 'error_z' : stdz})
456
+
457
+ if has_lnc:
458
+ pymedct.update({'x_nc' : posnm_nc[:,0],
459
+ 'y_nc' : posnm_nc[:,1]})
460
+ if props['Is3D']:
461
+ pymedct.update({'z_nc' : posnm_nc[:,2]})
462
+
463
+ # TODO: think this through - we don't really need a dataframe here,
464
+ # could return a record array, or at least make that optional
465
+ pymepd = pd.DataFrame.from_dict(pymedct)
466
+ return pymepd
467
+
468
+ #################################
469
+ ### MBM utility functionality ###
470
+ #################################
471
+
472
+ # we try to find an MBM collection attached
473
+ # to an MBMcorrection module generated data source
474
+ # returns None if unsuccesful
475
+ def findmbm(pipeline,warnings=True,return_mod=False):
476
+ from PYMEcs.recipes.localisations import MBMcorrection
477
+ dsname = None
478
+ # search/check for instance
479
+ for mod in pipeline.recipe.modules:
480
+ if isinstance(mod,MBMcorrection):
481
+ dsname = mod.output
482
+ break
483
+ if dsname is None:
484
+ if warnings:
485
+ warn("we rely on MBM info present in a datasource generated by the MBMcorrection module.\n\n" +
486
+ "Can't find such a datasource, please add MBMcorrection module to your recipe.\n\nAborting...")
487
+ return None
488
+ mbm = pipeline.dataSources[dsname].mdh.get('Processing.MBMcorrection.mbm')
489
+ if mbm is None:
490
+ if warnings:
491
+ warn(("found no MBM collection in metadata of datasource '%s' generated by MBMcorrection module.\n\n" % dsname )+
492
+ "Have you loaded valid MBM data into module yet?\n\nAborting..." )
493
+ return None
494
+ if return_mod:
495
+ return mod
496
+ else:
497
+ return mbm
498
+
499
+
500
+ #####################################
501
+ ### metadata utility functions ######
502
+ #####################################
503
+
504
+ def _get_basic_MINFLUX_metadata(filename):
505
+ from pathlib import Path
506
+ mdh = MetaDataHandler.NestedClassMDHandler()
507
+
508
+ mdh['MINFLUX.Filename'] = Path(filename).name # the MINFLUX filename often holds some metadata
509
+ mdh['MINFLUX.Foreshortening'] = foreshortening
510
+ from PYMEcs.misc.utils import get_timestamp_from_filename, parse_timestamp_from_filename
511
+ ts = get_timestamp_from_filename(filename)
512
+ if ts is not None:
513
+ mdh['MINFLUX.TimeStamp'] = ts
514
+ # we add the zero to defeat the regexp that checks for names ending with 'time$'
515
+ # this falls foul of the comparison with an int (epoch time) in the metadata repr function
516
+ # because our time stamp is a pandas time stamp and comparison with int fails
517
+ mdh['MINFLUX.StartTime0'] = parse_timestamp_from_filename(filename).strftime("%Y-%m-%d %H:%M:%S")
518
+
519
+ return mdh
520
+
521
+ def _get_mdh(data,filename):
522
+ mdh = _get_basic_MINFLUX_metadata(filename)
523
+ if minflux_npy_new_format(data):
524
+ props = minflux_check_poperties(data)
525
+ mdh['MINFLUX.Format'] = props['Format']
526
+ mdh['MINFLUX.Is3D'] = props['Is3D']
527
+ mdh['MINFLUX.Tracking'] = props['Tracking']
528
+ else:
529
+ mdh['MINFLUX.Format'] = 'Legacy'
530
+ mdh['MINFLUX.Is3D'] = minflux_npy_detect_3D_legacy(data)
531
+ mdh['MINFLUX.ExtraIteration'] = minflux_npy_has_extra_iter_legacy(data)
532
+ mdh['MINFLUX.Tracking'] = False # for now we do not support tracking with legacy data
533
+
534
+ return mdh
535
+
536
+ def _get_mdh_zarr(filename,arch):
537
+ mdh = _get_basic_MINFLUX_metadata(filename)
538
+ mfx_attrs = arch['mfx'].attrs.asdict()
539
+ if not '_legacy' in mfx_attrs:
540
+ mdh['MINFLUX.Format'] = 'RevAutumn2024'
541
+ mdh['MINFLUX.AcquisitionDate'] = mfx_attrs['acquisition_date']
542
+ mdh['MINFLUX.DataID'] = mfx_attrs['did']
543
+ mdh['MINFLUX.Is3D'] = mfx_attrs['measurement']['dimensionality'] > 2
544
+ # now do some checks of acquisitiondate vs any filename derived info
545
+ from PYMEcs.misc.utils import get_timestamp_from_mdh_acqdate, compare_timestamps_s
546
+ ts = get_timestamp_from_mdh_acqdate(mdh)
547
+ if ts is not None:
548
+ mts = mdh.get('MINFLUX.TimeStamp')
549
+ if mts is not None:
550
+ if mts != ts:
551
+ delta_s = compare_timestamps_s(mts,ts)
552
+ if delta_s > 5: # there can be rounding errors from the different TS sources, we tolerate up to 5s difference
553
+ warn("acq time stamp (%s) not equal to filename time stamp (%s), delta in s is %d" % (ts,mts,delta_s))
554
+ else:
555
+ mdh['MINFLUX.TimeStamp'] = ts
556
+
557
+ md_by_itrs,mfx_global_par = get_metadata_from_mfx_attrs(mfx_attrs)
558
+ for par in mfx_global_par:
559
+ mdh['MINFLUX.Globals.%s' % par] = mfx_global_par[par]
560
+ for pars in md_by_itrs:
561
+ # make sure we convert to list; otherwise we cannot easily convert to JSON as JSON does not like ndarray
562
+ mdh['MINFLUX.ByItrs.%s' % pars] = md_by_itrs[pars].to_numpy().tolist()
563
+ import re
564
+ mdh['MINFLUX.Tracking'] = re.search('tracking', mfx_global_par['ID'], re.IGNORECASE) is not None
565
+ else:
566
+ mdh['MINFLUX.Format'] = 'LegacyZarrConversion'
567
+ mdh['MINFLUX.Is3D'] = mfx_attrs['_legacy']['_seqs'][0]['Itr'][0]['Mode']['dim'] > 2
568
+
569
+ return mdh
570
+
571
+ def get_metadata_from_mfx_attrs(mfx_attrs):
572
+ mfx_itrs = mfx_attrs['measurement']['threads'][0]['sequences'][0]['Itr']
573
+ mfx_globals = mfx_attrs['measurement']['threads'][0]['sequences'][0]
574
+
575
+ md_by_itrs = pd.DataFrame(columns=['IterationNumber','PinholeAU','ActivationLaser', 'ExcitationLaserAbbrev',
576
+ 'ExcitationWavelength_nm', 'ExcitationPower_percent', 'ExcitationDAC',
577
+ 'DetectionChannel01','DetectionChannel02','BackgroundThreshold',
578
+ 'PhotonLimit', 'CCRLimit', 'DwellTime_ms',
579
+ 'PatternGeoFactor','PatternRepeat', 'PatternGeometryAbbrev','Strategy'],
580
+ index=range(len(mfx_itrs)))
581
+ for i, itr in enumerate(mfx_itrs):
582
+ md_by_itrs.loc[i].IterationNumber = i
583
+ md_by_itrs.loc[i].PinholeAU = itr['Mode']['phDiaAU']
584
+ md_by_itrs.loc[i].ActivationLaser = itr['_activation']['laser'] if itr['_activation']['laser'] != '' else 'NA'
585
+ md_by_itrs.loc[i].ExcitationLaserAbbrev = itr['_excitation']['laser'].replace('MINFLUX','M')
586
+ md_by_itrs.loc[i].ExcitationWavelength_nm = np.rint(1e9*itr['_excitation']['wavelength'])
587
+ md_by_itrs.loc[i].ExcitationPower_percent = itr['_excitation']['power']
588
+ md_by_itrs.loc[i].ExcitationDAC = itr['_excitation']['dac']
589
+ md_by_itrs.loc[i].DetectionChannel01 = itr['_detection']['channels'][0]
590
+ md_by_itrs.loc[i].DetectionChannel02 = itr['_detection']['channels'][1] if len(itr['_detection']['channels']) >1 else 'NA'
591
+ md_by_itrs.loc[i].BackgroundThreshold = itr['bgcThreshold']
592
+ md_by_itrs.loc[i].PhotonLimit = itr['phtLimit']
593
+ md_by_itrs.loc[i].CCRLimit = itr['ccrLimit']
594
+ md_by_itrs.loc[i].DwellTime_ms = 1e3*itr['patDwellTime']
595
+ md_by_itrs.loc[i].PatternGeoFactor = itr['patGeoFactor']
596
+ md_by_itrs.loc[i].PatternRepeat = itr['patRepeat']
597
+ md_by_itrs.loc[i].PatternGeometryAbbrev = itr['Mode']['pattern'].replace('hexagon','hex').replace('zline','zl').replace('square','sq')
598
+ md_by_itrs.loc[i].Strategy = itr['Mode']['strategy']
599
+
600
+ mfx_global_pars = {}
601
+
602
+ mfx_global_pars['BgcSense'] = mfx_globals['bgcSense']
603
+ mfx_global_pars['CtrDwellFactor'] = mfx_globals['ctrDwellFactor']
604
+ mfx_global_pars['Damping'] = mfx_globals['damping']
605
+ mfx_global_pars['Headstart'] = mfx_globals['headstart']
606
+ mfx_global_pars['ID'] = mfx_globals['id']
607
+ mfx_global_pars['Liveview'] = mfx_globals['liveview']['show']
608
+ mfx_global_pars['LocLimit'] = mfx_globals['locLimit']
609
+ mfx_global_pars['Stickiness'] = mfx_globals['stickiness']
610
+ mfx_global_pars['FieldAlgorithm'] = mfx_globals['field']['algo']
611
+ mfx_global_pars['FieldGeoFactor'] = mfx_globals['field']['fldGeoFactor']
612
+ mfx_global_pars['FieldStride'] = mfx_globals['field']['stride']
613
+
614
+ return (md_by_itrs,mfx_global_pars)
615
+
616
+
617
+ ##############################
618
+ ### tabular classes #########
619
+ ##############################
620
+
621
+ from PYME.IO.tabular import TabularBase
622
+
623
+ # closely modeled on RecArraySource
624
+ class MinfluxNpySource(TabularBase):
625
+ _name = "MINFLUX NPY File Source"
626
+ def __init__(self, filename):
627
+ """ Input filter for use with NPY data exported from MINFLUX data (typically residing in MSR files)."""
628
+
629
+ self.res = minflux_npy2pyme(filename)
630
+
631
+ # check for invalid localisations:
632
+ # possible TODO - is this needed/helpful, or should we propagate missing values further?
633
+ # FIXED - minflux_npy2pyme should now also work properly when invalid data is present
634
+ # so returning just the valid events to PYME should be ok
635
+ if np.any(self.res['vld'] < 1):
636
+ self.res = self.res[self.res['vld'] >= 1]
637
+
638
+ self._keys = list(self.res.dtype.names)
639
+
640
+
641
+ def keys(self):
642
+ return self._keys
643
+
644
+ def __getitem__(self, keys):
645
+ key, sl = self._getKeySlice(keys)
646
+
647
+ if not key in self._keys:
648
+ raise KeyError('Key (%s) not found' % key)
649
+
650
+
651
+ return self.res[key][sl]
652
+
653
+
654
+ def getInfo(self):
655
+ return 'MINFLUX NPY Data Source\n\n %d points' % len(self.res['x'])
656
+
657
+ class MinfluxZarrSource(MinfluxNpySource):
658
+ _name = "MINFLUX zarr File Source"
659
+ def __init__(self, filename):
660
+ """ Input filter for use with ZARR data exported from MINFLUX data (originally residing in MSR files)."""
661
+ import zarr
662
+ archz = zarr.open(filename)
663
+ self.zarr = archz
664
+ self._own_file = True # is this necessary? Normally only used by HDF to close HFD on destroy, zarr does not need "closing"
665
+
666
+ # NOTE: no further 'locations valid' check should be necessary - we filter already in the conversion function
667
+ self.res = minflux_zarr2pyme(archz)
668
+
669
+ self._keys = list(self.res.dtype.names)
670
+
671
+ # note: aparently, closing an open zarr archive is not required; accordingly no delete and close methods necessary
672
+ self._paraflux_analysis = None
673
+
674
+ ##############################
675
+ ### Register IO with PYME ####
676
+ ##############################
677
+
678
+ # we are monkeypatching pipeline and VisGUIFrame methods to sneak MINFLUX npy IO in;
679
+ # this gets called from the MINFLUX plugin in the Plug routine;
680
+ # this way it can patch the relevant VisGUIFrame and Pipeline methods in the instances
681
+ # of these classes in the visGUI app
682
+ #
683
+ # in future we will ask for a way to get this considered by David B for a proper hook
684
+ # in the file loading code and possibly allow registering file load hooks for new formats
685
+ def monkeypatch_npyorzarr_io(visFr):
686
+ import types
687
+ import logging
688
+ import os
689
+ import wx
690
+ from PYME.IO import MetaDataHandler
691
+ from PYME.IO.FileUtils import nameUtils
692
+
693
+ logger = logging.getLogger(__name__)
694
+ logger.info("MINFLUX monkeypatching IO")
695
+ def _populate_open_args_npyorzarr(self, filename):
696
+ # this is currently just the minmal functionality for .npy,
697
+ # we should really check a few things before going any further
698
+ # .mat and CSV files give examples...
699
+ if os.path.splitext(filename)[1] == '.npy':
700
+ valid, warnmsg = npy_is_minflux_data(filename,warning=False,return_msg=True)
701
+ if not valid:
702
+ warn('file "%s" does not look like a valid MINFLUX NPY file:\n"%s"\n\nOPENING ABORTED'
703
+ % (os.path.basename(filename),warnmsg))
704
+ return # this is not MINFLUX NPY data - we give up
705
+ return {} # all good, just return empty args
706
+ elif os.path.splitext(filename)[1] == '.zip':
707
+ valid, warnmsg = zip_is_minflux_zarr_data(filename,warning=False,return_msg=True)
708
+ if not valid:
709
+ warn('file "%s" does not look like a valid MINFLUX zarr file:\n"%s"\n\nOPENING ABORTED'
710
+ % (os.path.basename(filename),warnmsg))
711
+ return # this is not MINFLUX zarr data - we give up
712
+ return {} # all good, just return empty args
713
+ else:
714
+ return self._populate_open_args_original(filename)
715
+
716
+ visFr._populate_open_args_original = visFr._populate_open_args
717
+ visFr._populate_open_args = types.MethodType(_populate_open_args_npyorzarr,visFr)
718
+
719
+ def _load_ds_npy(filename):
720
+ ds = MinfluxNpySource(filename)
721
+ ds.filename = filename
722
+
723
+ data = np.load(filename)
724
+ ds.mdh = _get_mdh(data,filename)
725
+
726
+ return ds
727
+
728
+ def _load_ds_zarrzip(filename):
729
+ ds = MinfluxZarrSource(filename)
730
+ ds.filename = filename
731
+
732
+ ds.mdh = _get_mdh_zarr(filename,ds.zarr)
733
+
734
+ return ds
735
+
736
+ def _ds_from_file_npyorzarr(self, filename, **kwargs):
737
+ if os.path.splitext(filename)[1] == '.npy': # MINFLUX NPY file
738
+ logger.info('.npy file, trying to load as MINFLUX npy ...')
739
+ return _load_ds_npy(filename)
740
+ elif os.path.splitext(filename)[1] == '.zip': # MINFLUX ZARR file in zip format
741
+ logger.info('.zip file, trying to load as MINFLUX zarr ...')
742
+ return _load_ds_zarrzip(filename)
743
+ else:
744
+ return self._ds_from_file_original(filename, **kwargs)
745
+
746
+ visFr.pipeline._ds_from_file_original = visFr.pipeline._ds_from_file
747
+ visFr.pipeline._ds_from_file = types.MethodType(_ds_from_file_npyorzarr,visFr.pipeline)
748
+
749
+ from PYMEcs.IO.NPC import findNPCset
750
+ visFr.pipeline.get_npcs = types.MethodType(findNPCset,visFr.pipeline) # we make this a method for pipeline to make access easier
751
+
752
+ ### we now also need to monkey_patch the _load_input method of the pipeline recipe
753
+ ### this should allow session loading to succeed
754
+ def _load_input_npyorzarr(self, filename, key='input', metadata_defaults={}, cache={}, default_to_image=True, args={}):
755
+ """
756
+ Load input data from a file and inject into namespace
757
+ """
758
+ from PYME.IO import unifiedIO
759
+ import os
760
+
761
+ if '?' in filename:
762
+ self._load_input_original(filename,key=key,metadata_defaults=metadata_defaults,
763
+ cache=cache,default_to_image=default_to_image,args=args)
764
+ if os.path.splitext(filename)[1] == '.npy': # MINFLUX NPY file
765
+ logger.info('.npy file, trying to load as MINFLUX npy ...')
766
+ self.namespace[key] = _load_ds_npy(filename)
767
+ elif os.path.splitext(filename)[1] == '.zip': # MINFLUX NPY file
768
+ logger.info('.npy file, trying to load as MINFLUX zarr ...')
769
+ self.namespace[key] = _load_ds_zarrzip(filename)
770
+ else:
771
+ self._load_input_original(filename,key=key,metadata_defaults=metadata_defaults,
772
+ cache=cache,default_to_image=default_to_image,args=args)
773
+
774
+ if '_load_input' in dir(visFr.pipeline.recipe):
775
+ visFr.pipeline.recipe._load_input_original = visFr.pipeline.recipe._load_input
776
+ visFr.pipeline.recipe._load_input = types.MethodType(_load_input_npyorzarr,visFr.pipeline.recipe)
777
+
778
+ # we install this as new Menu item as File>Open is already assigned
779
+ # however the new File>Open MINFLUX NPY entry can also open all other allowed file types
780
+ def OnOpenFileNPYorZARR(self, event):
781
+ filename = wx.FileSelector("Choose a file to open",
782
+ nameUtils.genResultDirectoryPath(),
783
+ wildcard='|'.join(['All supported formats|*.h5r;*.txt;*.mat;*.csv;*.hdf;*.3d;*.3dlp;*.npy;*.zip;*.pvs',
784
+ 'PYME Results Files (*.h5r)|*.h5r',
785
+ 'Tab Formatted Text (*.txt)|*.txt',
786
+ 'Matlab data (*.mat)|*.mat',
787
+ 'Comma separated values (*.csv)|*.csv',
788
+ 'HDF Tabular (*.hdf)|*.hdf',
789
+ 'MINFLUX NPY (*.npy)|*.npy',
790
+ 'MINFLUX ZARR (*.zip)|*.zip',
791
+ 'Session files (*.pvs)|*.pvs',]))
792
+
793
+ if not filename == '':
794
+ self.OpenFile(filename)
795
+
796
+
797
+ visFr.OnOpenFileNPYorZARR = types.MethodType(OnOpenFileNPYorZARR,visFr)
798
+ visFr.AddMenuItem('File', "Open MINFLUX NPY, zarr or session", visFr.OnOpenFileNPYorZARR)
799
+
800
+ logger.info("MINFLUX monkeypatching IO completed")
801
+
802
+ # set option to make choosing filetype options available in FileDialogs on macOS
803
+ # seems to be ok to be set on non-macOS systems, too
804
+ wx.SystemOptions.SetOption(u"osx.openfiledialog.always-show-types", 1)
805
+
806
+ def _get_session_datasources_whook(self): # with hook for saving lowess cache
807
+ # try to save an mbm lowess cache if present
808
+ mod = findmbm(visFr.pipeline,warnings=False,return_mod=True)
809
+ mbm = findmbm(visFr.pipeline,warnings=False,return_mod=False)
810
+ if mod is not None and mbm is not None:
811
+ if mod.MBM_lowess_fraction > 1e-5:
812
+ if not mod.lowess_cachefilepath().exists():
813
+ mod.lowess_cachesave()
814
+
815
+ return self._get_session_datasources_original()
816
+
817
+ visFr.pipeline._get_session_datasources_original = visFr.pipeline._get_session_datasources
818
+ visFr.pipeline._get_session_datasources = types.MethodType(_get_session_datasources_whook,visFr.pipeline)
819
+
820
+ # below we make a class Pipeline that inherits from PYME.LMVis.pipeline.Pipeline
821
+ # and changes the relevant method in the subclass
822
+ #
823
+ # in your own code (e.g. Python notebook) use as
824
+ #
825
+ # from PYMEcs.IO.MINFLUX import Pipeline # use this instead of PYME.LMVis.pipeline
826
+ # data = Pipeline('my_minflux_file.npy')
827
+ #
828
+ from PYME.LMVis import pipeline
829
+ from PYME.IO import MetaDataHandler
830
+ import os
831
+ import logging
832
+ class Pipeline(pipeline.Pipeline):
833
+
834
+ def _ds_from_file(self, filename, **kwargs):
835
+ if os.path.splitext(filename)[1] == '.npy': # MINFLUX NPY file
836
+ logging.getLogger(__name__).info('.npy file, trying to load as MINFLUX npy ...')
837
+ if not npy_is_minflux_data(filename,warning=True):
838
+ raise RuntimeError("can't read pipeline data from NPY file - not a MINFLUX data set?")
839
+ ds = MinfluxNpySource(filename)
840
+ data = np.load(filename)
841
+ ds.mdh = _get_mdh(data,filename)
842
+ return ds
843
+ elif os.path.splitext(filename)[1] == '.zip': # MINFLUX zarr file
844
+ logging.getLogger(__name__).info('.zip file, trying to load as MINFLUX zarr ...')
845
+ if not zip_is_minflux_zarr_data(filename,warning=True):
846
+ raise RuntimeError("can't read pipeline data from MINFLUX zarr file - not a MINFLUX data set?")
847
+ ds = MinfluxZarrSource(filename)
848
+ ds.mdh = _get_mdh_zarr(filename,ds.zarr)
849
+ return ds
850
+ else:
851
+ return super()._ds_from_file(filename, **kwargs)