PYME-extra 1.0.4.post0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. PYMEcs/Acquire/Actions/__init__.py +0 -0
  2. PYMEcs/Acquire/Actions/custom.py +167 -0
  3. PYMEcs/Acquire/Hardware/LPthreadedSimple.py +248 -0
  4. PYMEcs/Acquire/Hardware/LPthreadedSimpleSim.py +246 -0
  5. PYMEcs/Acquire/Hardware/NikonTiFlaskServer.py +45 -0
  6. PYMEcs/Acquire/Hardware/NikonTiFlaskServerT.py +59 -0
  7. PYMEcs/Acquire/Hardware/NikonTiRESTClient.py +73 -0
  8. PYMEcs/Acquire/Hardware/NikonTiSim.py +35 -0
  9. PYMEcs/Acquire/Hardware/__init__.py +0 -0
  10. PYMEcs/Acquire/Hardware/driftTrackGUI.py +329 -0
  11. PYMEcs/Acquire/Hardware/driftTrackGUI_n.py +472 -0
  12. PYMEcs/Acquire/Hardware/driftTracking.py +424 -0
  13. PYMEcs/Acquire/Hardware/driftTracking_n.py +433 -0
  14. PYMEcs/Acquire/Hardware/fakeCamX.py +15 -0
  15. PYMEcs/Acquire/Hardware/offsetPiezoRESTCorrelLog.py +38 -0
  16. PYMEcs/Acquire/__init__.py +0 -0
  17. PYMEcs/Analysis/MBMcollection.py +552 -0
  18. PYMEcs/Analysis/MINFLUX.py +280 -0
  19. PYMEcs/Analysis/MapUtils.py +77 -0
  20. PYMEcs/Analysis/NPC.py +1176 -0
  21. PYMEcs/Analysis/Paraflux.py +218 -0
  22. PYMEcs/Analysis/Simpler.py +81 -0
  23. PYMEcs/Analysis/Sofi.py +140 -0
  24. PYMEcs/Analysis/__init__.py +0 -0
  25. PYMEcs/Analysis/decSofi.py +211 -0
  26. PYMEcs/Analysis/eventProperties.py +50 -0
  27. PYMEcs/Analysis/fitDarkTimes.py +569 -0
  28. PYMEcs/Analysis/objectVolumes.py +20 -0
  29. PYMEcs/Analysis/offlineTracker.py +130 -0
  30. PYMEcs/Analysis/stackTracker.py +180 -0
  31. PYMEcs/Analysis/timeSeries.py +63 -0
  32. PYMEcs/Analysis/trackFiducials.py +186 -0
  33. PYMEcs/Analysis/zerocross.py +91 -0
  34. PYMEcs/IO/MINFLUX.py +851 -0
  35. PYMEcs/IO/NPC.py +117 -0
  36. PYMEcs/IO/__init__.py +0 -0
  37. PYMEcs/IO/darkTimes.py +19 -0
  38. PYMEcs/IO/picasso.py +219 -0
  39. PYMEcs/IO/tabular.py +11 -0
  40. PYMEcs/__init__.py +0 -0
  41. PYMEcs/experimental/CalcZfactor.py +51 -0
  42. PYMEcs/experimental/FRC.py +338 -0
  43. PYMEcs/experimental/ImageJROItools.py +49 -0
  44. PYMEcs/experimental/MINFLUX.py +1537 -0
  45. PYMEcs/experimental/NPCcalcLM.py +560 -0
  46. PYMEcs/experimental/Simpler.py +369 -0
  47. PYMEcs/experimental/Sofi.py +78 -0
  48. PYMEcs/experimental/__init__.py +0 -0
  49. PYMEcs/experimental/binEventProperty.py +187 -0
  50. PYMEcs/experimental/chaining.py +23 -0
  51. PYMEcs/experimental/clusterTrack.py +179 -0
  52. PYMEcs/experimental/combine_maps.py +104 -0
  53. PYMEcs/experimental/eventProcessing.py +93 -0
  54. PYMEcs/experimental/fiducials.py +323 -0
  55. PYMEcs/experimental/fiducialsNew.py +402 -0
  56. PYMEcs/experimental/mapTools.py +271 -0
  57. PYMEcs/experimental/meas2DplotDh5view.py +107 -0
  58. PYMEcs/experimental/mortensen.py +131 -0
  59. PYMEcs/experimental/ncsDenoise.py +158 -0
  60. PYMEcs/experimental/onTimes.py +295 -0
  61. PYMEcs/experimental/procPoints.py +77 -0
  62. PYMEcs/experimental/pyme2caml.py +73 -0
  63. PYMEcs/experimental/qPAINT.py +965 -0
  64. PYMEcs/experimental/randMap.py +188 -0
  65. PYMEcs/experimental/regExtraCmaps.py +11 -0
  66. PYMEcs/experimental/selectROIfilterTable.py +72 -0
  67. PYMEcs/experimental/showErrs.py +51 -0
  68. PYMEcs/experimental/showErrsDh5view.py +58 -0
  69. PYMEcs/experimental/showShiftMap.py +56 -0
  70. PYMEcs/experimental/snrEvents.py +188 -0
  71. PYMEcs/experimental/specLabeling.py +51 -0
  72. PYMEcs/experimental/splitRender.py +246 -0
  73. PYMEcs/experimental/testChannelByName.py +36 -0
  74. PYMEcs/experimental/timedSpecies.py +28 -0
  75. PYMEcs/experimental/utils.py +31 -0
  76. PYMEcs/misc/ExtraCmaps.py +177 -0
  77. PYMEcs/misc/__init__.py +0 -0
  78. PYMEcs/misc/configUtils.py +169 -0
  79. PYMEcs/misc/guiMsgBoxes.py +27 -0
  80. PYMEcs/misc/mapUtils.py +230 -0
  81. PYMEcs/misc/matplotlib.py +136 -0
  82. PYMEcs/misc/rectsFromSVG.py +182 -0
  83. PYMEcs/misc/shellutils.py +1110 -0
  84. PYMEcs/misc/utils.py +205 -0
  85. PYMEcs/misc/versionCheck.py +20 -0
  86. PYMEcs/misc/zcInfo.py +90 -0
  87. PYMEcs/pyme_warnings.py +4 -0
  88. PYMEcs/recipes/__init__.py +0 -0
  89. PYMEcs/recipes/base.py +75 -0
  90. PYMEcs/recipes/localisations.py +2380 -0
  91. PYMEcs/recipes/manipulate_yaml.py +83 -0
  92. PYMEcs/recipes/output.py +177 -0
  93. PYMEcs/recipes/processing.py +247 -0
  94. PYMEcs/recipes/simpler.py +290 -0
  95. PYMEcs/version.py +2 -0
  96. pyme_extra-1.0.4.post0.dist-info/METADATA +114 -0
  97. pyme_extra-1.0.4.post0.dist-info/RECORD +101 -0
  98. pyme_extra-1.0.4.post0.dist-info/WHEEL +5 -0
  99. pyme_extra-1.0.4.post0.dist-info/entry_points.txt +3 -0
  100. pyme_extra-1.0.4.post0.dist-info/licenses/LICENSE +674 -0
  101. pyme_extra-1.0.4.post0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,552 @@
1
+ ###############################################
2
+ ### A class definition for basic MBM processing
3
+ ### and analysis
4
+ ###############################################
5
+ import numpy as np
6
+ import matplotlib.pyplot as plt
7
+ import pandas as pd
8
+ from warnings import warn
9
+
10
+ def interp_bead(tnew, bead, customdict=None, extrapisnan=False):
11
+ ibead = {}
12
+ if customdict is None:
13
+ for i,axis in enumerate(['x','y','z']):
14
+ if extrapisnan:
15
+ ibead[axis] = np.interp(tnew, bead['tim'], 1e9*bead['pos'][:,i], right=np.nan) # everything in nm
16
+ else:
17
+ ibead[axis] = np.interp(tnew, bead['tim'], 1e9*bead['pos'][:,i]) # everything in nm
18
+ #ibead[axis][tnew>bead['tim'].max()] = 0
19
+ else:
20
+ for key,value in customdict.items():
21
+ if extrapisnan:
22
+ ibead[key] = np.interp(tnew, bead['tim'], bead[value], right=np.nan)
23
+ else:
24
+ ibead[key] = np.interp(tnew, bead['tim'], bead[value])
25
+ #ibead[key][tnew>bead['tim'].max()] = 0
26
+
27
+ ibead['t'] = tnew
28
+ return ibead
29
+
30
+ def stdev_bead(bead,samplewindow=9):
31
+ sbead = {}
32
+ for i,axis in enumerate(['x','y','z']):
33
+ sbead["std_%s" % axis] = pd.Series(1e9*bead['pos'][:,i]).rolling(window=samplewindow).std() # everything in nm
34
+ sbead['std'] = np.sqrt(sbead['std_x']**2 + sbead['std_y']**2 + sbead['std_z']**2)
35
+ sbead['tim'] = bead['tim']
36
+ return sbead
37
+
38
+ def stdev_beads(beads,samplewindow=9):
39
+ sbeads = {}
40
+ for bead in beads:
41
+ sbeads[bead] = stdev_bead(beads[bead],samplewindow=samplewindow)
42
+ return sbeads
43
+
44
+ def interp_sbeads(sbeads,extrapisnan=False):
45
+ return interp_beads(sbeads,customdict=dict(std='std',std_x='std_x',
46
+ std_y='std_y',std_z='std_z'),
47
+ extrapisnan=extrapisnan)
48
+
49
+ def interp_beads(beads,customdict=None,extrapisnan=False):
50
+ mint = 1e6
51
+ for bead in beads:
52
+ mincur = beads[bead]['tim'].min()
53
+ if mincur < mint:
54
+ mint = mincur
55
+
56
+ maxt = 0
57
+ for bead in beads:
58
+ maxcur = beads[bead]['tim'].max()
59
+ if maxcur > maxt:
60
+ maxt = maxcur
61
+
62
+ # here we may need some checks if some bead tracks are a lot shorter than others (does this occur)?
63
+ # this could lead to issues with interpolation unless these go to zero
64
+ # so watch out for cases like that and consider code tweaks if needed
65
+ tnew = np.arange(np.round(mint),np.round(maxt)+1)
66
+ ibeads = {}
67
+
68
+ for bead in beads:
69
+ ibeads[bead] = interp_bead(tnew,beads[bead],customdict=customdict,extrapisnan=extrapisnan)
70
+
71
+ return ibeads
72
+
73
+ import pandas as pd
74
+ def df_from_interp_beads(beads,customdict=None):
75
+ ibeads = interp_beads(beads,customdict=customdict,extrapisnan=True)
76
+ dictbeads = {}
77
+ for axis in ['x','y','z','std_x','std_y','std_z','std']:
78
+ dictbeads[axis] = {}
79
+
80
+ for bead in ibeads:
81
+ for axis in ['x','y','z']:
82
+ dictbeads[axis][bead] = ibeads[bead][axis]
83
+ t = ibeads[bead]['t'] # this is actually always the same t
84
+
85
+ dfbeads = {}
86
+ for axis in ['x','y','z']:
87
+ dfbeads[axis] = pd.DataFrame(dictbeads[axis],index=t)
88
+
89
+ sbeads = stdev_beads(beads)
90
+ sibeads = interp_sbeads(sbeads,extrapisnan=True)
91
+ for bead in sibeads:
92
+ for axis in ['std_x','std_y','std_z','std']:
93
+ dictbeads[axis][bead] = sibeads[bead][axis]
94
+ for axis in ['std_x','std_y','std_z','std']:
95
+ dfbeads[axis] = pd.DataFrame(dictbeads[axis],index=t)
96
+
97
+ return dfbeads
98
+
99
+ def get_mbm(ds):
100
+ mbm = {}
101
+ mbm['t'] = 1e-3*ds['t']
102
+ mbm['x'] = ds['x']-ds['x_nc']
103
+ mbm['y'] = ds['y']-ds['y_nc']
104
+ if 'z_nc' in ds.keys():
105
+ mbm['z'] = ds['z']-ds['z_nc']
106
+ return mbm
107
+
108
+ # minimal recipe to coalesce events
109
+ COALESCE_RECIPE = """
110
+ - localisations.AddPipelineDerivedVars:
111
+ inputEvents: ''
112
+ inputFitResults: FitResults
113
+ outputLocalizations: Localizations
114
+ - localisations.MergeClumps:
115
+ discardTrivial: true
116
+ inputName: Localizations
117
+ outputName: coalesced_nz
118
+ """
119
+
120
+ import hashlib
121
+ import json
122
+ # we use this function to generate a unique hash from a dictionary
123
+ # see also https://stackoverflow.com/questions/16092594/how-to-create-a-unique-key-for-a-dictionary-in-python
124
+ # this will be used further below to check if our cached value of the mean is still usable
125
+ def hashdict(dict):
126
+ hashkey = hashlib.sha1(json.dumps(dict, sort_keys=True).encode()).hexdigest()
127
+ return hashkey
128
+
129
+ class MBMCollection(object):
130
+ def __init__(self,name=None,filename=None,variance_window = 9):
131
+ self.mbms = {}
132
+ self.beadisgood = {}
133
+ self.offsets = {}
134
+ self.is3D = False
135
+ self._mean = None
136
+ self._hashkey = ''
137
+ self._offsets_valid = False
138
+ self.t = None
139
+ self.tperiod = None
140
+ self._trange= (None,None)
141
+ self.variance_window = variance_window # by default use last 9 localisations for variance/std calculation
142
+
143
+ if filename is not None:
144
+ # this is a MBM bead file with raw bead tracks
145
+ self.name=filename
146
+ self._raw_beads = np.load(filename)
147
+ ibeads = interp_beads(self._raw_beads)
148
+ self.add_beads(ibeads)
149
+ # now add info on std deviation
150
+ sbeads = stdev_beads(self._raw_beads)
151
+ sibeads = interp_sbeads(sbeads)
152
+ for bead in sibeads:
153
+ if not np.allclose(self.t,sibeads[bead]['t'],1e-3):
154
+ raise RuntimeError("time vector for new bead variance differs by more than 0.1 %")
155
+ for property in sibeads[bead].keys():
156
+ if property != 't':
157
+ self.mbms[bead][property] = sibeads[bead][property]
158
+ else:
159
+ self.name = name
160
+
161
+ @property
162
+ def beads(self):
163
+ return self.mbms.keys()
164
+
165
+ def _validaxis(self,axis):
166
+ if self.is3D:
167
+ axes = ['x','y','z','std_x','std_y','std_z','std']
168
+ else:
169
+ axes = ['x','y','std_x','std_y','std']
170
+ return axis in axes
171
+
172
+ def beadtrack(self,bead,axis,unaligned=False):
173
+ if not bead in self.beads:
174
+ raise RuntimeError("asking for non existing bead track for bead %s" % bead)
175
+ if not self._validaxis(axis):
176
+ raise RuntimeError("asking for invalid axis %s" % axis)
177
+ if self._offsets_valid and not unaligned:
178
+ return self.mbms[bead][axis] - self.offsets[bead][axis]
179
+ else:
180
+ return self.mbms[bead][axis]
181
+
182
+ def mean(self):
183
+ if self._mean is not None and self._hashkey == hashdict(self.beadisgood):
184
+ # print("cache hit!")
185
+ return self._mean
186
+ else:
187
+ self._mean = {}
188
+ for axis in ['x','y']:
189
+ self._mean[axis] = np.mean([self.beadtrack(bead,axis) for bead in self.beads if self.beadisgood[bead]], axis=0)
190
+ if self.is3D:
191
+ self._mean['z'] = np.mean([self.beadtrack(bead,'z') for bead in self.beads if self.beadisgood[bead]], axis=0)
192
+ self._hashkey = hashdict(self.beadisgood)
193
+ return self._mean
194
+
195
+ def add_bead(self,bead,mbm):
196
+ if self.t is None:
197
+ self.t = mbm['t']
198
+ self.is3D = 'z' in mbm.keys()
199
+ else:
200
+ if self.t.size != mbm['t'].size:
201
+ raise RuntimeError("register bead: size of time vectors do not match\nold size %s, new size %s, bead %s"
202
+ % (self.t.size,mbm['t'].size,bead) )
203
+ if not np.allclose(self.t,mbm['t'],1e-3):
204
+ raise RuntimeError("time vector for new bead differs by more than 0.1 %")
205
+ if not 'z' in mbm.keys() and self.is3D:
206
+ raise RuntimeError('adding bead lacking z info to existing 3D MBM collection')
207
+ self.mbms[bead] = mbm # note we may need copies of vectors, possibly at leas
208
+ self.markasgood(bead)
209
+ self._mean = None # invalidate cache
210
+ self._offsets_valid = False
211
+
212
+ def add_beads(self,beads):
213
+ for bead in beads:
214
+ self.add_bead(bead,beads[bead])
215
+
216
+ def align_beads(self,tmin=None,tmax=None):
217
+ if tmin is None:
218
+ if self._trange[0] is None:
219
+ tmin = self.t.min()
220
+ else:
221
+ tmin=self._trange[0]
222
+ if tmax is None:
223
+ if self._trange[1] is None:
224
+ tmax = self.t.max()
225
+ else:
226
+ tmax=self._trange[0]
227
+ self.tperiod = (self.t > tmin)*(self.t < tmax)
228
+ if np.all(self.tperiod == False):
229
+ raise RuntimeError("empty range, tmin: %d, tmax: %d" % (tmin,tmax))
230
+ self._trange = (tmin,tmax)
231
+ for bead in self.beads:
232
+ self.offsets[bead] = {}
233
+ self.offsets[bead]['x'] = np.mean(self.mbms[bead]['x'][self.tperiod])
234
+ self.offsets[bead]['y'] = np.mean(self.mbms[bead]['y'][self.tperiod])
235
+ if self.is3D:
236
+ self.offsets[bead]['z'] = np.mean(self.mbms[bead]['z'][self.tperiod])
237
+ self._offsets_valid = True
238
+ self._mean = None # invalidate cache
239
+
240
+ def markasbad(self,*beads): # mark a bead as bad
241
+ for bead in beads:
242
+ if bead in self.beads:
243
+ self.beadisgood[bead] = False
244
+
245
+ def markasgood(self,*beads): # if currently bad, mark as good
246
+ for bead in beads:
247
+ if bead in self.beads:
248
+ self.beadisgood[bead] = True
249
+
250
+ def plot_tracks(self,axis,unaligned=False,use_tperiod=False,legend=True,tmin=None,tmax=None,plot_mean=True):
251
+ if tmin is None:
252
+ tmin=self._trange[0]
253
+ if tmax is None:
254
+ tmax=self._trange[1]
255
+ if tmin != self._trange[0] or tmax != self._trange[1]:
256
+ self._offsets_valid = False
257
+
258
+ if axis.startswith('std'):
259
+ unaligned = True # not sensible to align the std devs
260
+ plot_mean = False # the mean also does not make much sense
261
+
262
+ # we may need to check the alignment logic below
263
+ if not unaligned:
264
+ if not self._offsets_valid:
265
+ self.align_beads(tmin=tmin,tmax=tmax)
266
+
267
+ for bead in self.beads:
268
+ if self.beadisgood[bead]:
269
+ plt.plot(self.t,self.beadtrack(bead,axis,unaligned=unaligned),label=bead)
270
+ if plot_mean:
271
+ plt.plot(self.t,self.mean()[axis],'--',label='mean')
272
+ if legend:
273
+ plt.legend()
274
+ if use_tperiod:
275
+ plt.xlim(self._trange[0],self._trange[1])
276
+
277
+ def plot_deviation_from_mean(self,axis,align=True,legend=True):
278
+ if axis.startswith('std'):
279
+ raise RuntimeError("this method is not suitable for standard deviation trajectories")
280
+ if align and not self._offsets_valid:
281
+ self.align_beads()
282
+ for bead in self.beads:
283
+ if self.beadisgood[bead]:
284
+ plt.plot(self.t,self.beadtrack(bead,axis)-self.mean()[axis],label=bead)
285
+ if legend:
286
+ plt.legend()
287
+
288
+ try:
289
+ import plotly.express as px
290
+ import plotly.graph_objects as go
291
+ from plotly.subplots import make_subplots
292
+ except ImportError:
293
+ warn("can't import plotly modules, new style bead plotting using MBMCollectionDF will not work")
294
+
295
+ # we use this function to generate a unique hash from a dataframe
296
+ # need to check if this is necessary or if it is ok to make the the filter settimngs into a unique hash for caching
297
+ def hashdf(df):
298
+ return hashlib.sha1(pd.util.hash_pandas_object(df).values).hexdigest()
299
+
300
+ class MBMCollectionDF(object): # collection based on dataframe objects
301
+ def __init__(self,name=None,filename=None,variance_window = 9,foreshortening=1.0):
302
+ self.mbms = {}
303
+ self.beadisgood = {}
304
+ self.t = None
305
+ self.tperiod = None
306
+ self._trange= (None,None)
307
+ self.variance_window = variance_window # by default use last 9 localisations for variance/std calculation
308
+ self.median_window = 0 # 0 means not active
309
+ self.foreshortening = foreshortening
310
+ self.plotbad = False
311
+
312
+ if filename is not None:
313
+ self.filename = filename
314
+ self.populate_df_from_file(filename)
315
+ if name is None:
316
+ from pathlib import Path
317
+ name = Path(filename).stem # should really be just the basename; also may want to protect against filename being a file IO object
318
+
319
+ self.name = name
320
+
321
+ def to_JSON(self): # this is a dummy mostly to get the object to convert without error in metadata output
322
+ return "Dummy for MBMCollectionDF object"
323
+
324
+ def populate_df_from_file(self,filename):
325
+ import os
326
+ # this is a MBM bead file with raw bead tracks
327
+ self.name=filename
328
+ if os.path.splitext(filename)[1] == '.npz':
329
+ self._raw_beads = np.load(filename)
330
+ elif os.path.splitext(filename)[1] == '.zip':
331
+ import zarr
332
+ arch = zarr.open(filename)
333
+ mbm_data = arch['grd']['mbm']['points'][:] # the indexing imports this as an np.array
334
+ mbm_attrs = arch['grd']['mbm'].points.attrs['points_by_gri']
335
+ rawbeads = {}
336
+ for gri_id in np.unique(mbm_data['gri']):
337
+ gri_str = str(gri_id)
338
+ bead = mbm_attrs[gri_str]['name']
339
+ print("%d - name %s" % (gri_id,bead))
340
+ dbead = mbm_data[mbm_data['gri'] == gri_id]
341
+ dbead.dtype.names = ('gri', 'pos', 'tim', 'str')
342
+ rawbeads[bead] = dbead
343
+ self._raw_beads = rawbeads
344
+ else:
345
+ raise RuntimeError('unknown MBM file format, file name is "%s"' % filename)
346
+
347
+ for bead in self._raw_beads:
348
+ self._raw_beads[bead]['pos'][:,2] *= self.foreshortening
349
+ self.beads = df_from_interp_beads(self._raw_beads)
350
+ self.t = self.beads['x'].index
351
+
352
+ for bead in self.beads['x']:
353
+ self.beadisgood[bead] = True
354
+
355
+ def markasbad(self,*beads): # mark a bead as bad
356
+ for bead in beads:
357
+ if bead in self.beads['x']:
358
+ self.beadisgood[bead] = False
359
+
360
+ def markasgood(self,*beads): # if currently bad, mark as good
361
+ for bead in beads:
362
+ if bead in self.beads['x']:
363
+ self.beadisgood[bead] = True
364
+
365
+ def mean(self,axis,tmin=None,tmax=None):
366
+ if tmin is None:
367
+ tmin=self._trange[0]
368
+ if tmax is None:
369
+ tmax=self._trange[1]
370
+
371
+ if tmin is None:
372
+ tmin = self.t.min()
373
+ if tmax is None:
374
+ tmax = self.t.max()
375
+
376
+ if axis.startswith('std'):
377
+ raise RuntimeError("mean not defined for axis %s" % axis) # not sensible to align the std devs
378
+
379
+ if self.median_window > 0:
380
+ startdf = self.beads[axis].rolling(self.median_window).median()
381
+ else:
382
+ startdf = self.beads[axis]
383
+ startdfg = startdf[[bead for bead in self.beadisgood if self.beadisgood[bead]]]
384
+ dfplotg = startdfg-startdfg.loc[tmin:tmax].mean(axis=0)
385
+ has_bads = not np.all(list(self.beadisgood.values())) # we have at least a single bad bead
386
+ if has_bads:
387
+ dfplotb = startdf[[bead for bead in self.beadisgood if not self.beadisgood[bead]]]
388
+ dfplotb = dfplotb - dfplotb.loc[tmin:tmax].mean(axis=0)
389
+ emptybeads = dfplotg.columns[dfplotg.isnull().all(axis=0)]
390
+ if len(emptybeads)>0:
391
+ warn('removing beads with no valid info after alignment %s...' % emptybeads)
392
+ dfplotg = dfplotg[dfplotg.columns[~dfplotg.isnull().all(axis=0)]]
393
+
394
+ return dfplotg.mean(axis=1)
395
+
396
+ def plot_tracks(self,axis,unaligned=False,tmin=None,tmax=None):
397
+ if tmin is None:
398
+ tmin=self._trange[0]
399
+ if tmax is None:
400
+ tmax=self._trange[1]
401
+
402
+ if tmin is None:
403
+ tmin = self.t.min()
404
+ if tmax is None:
405
+ tmax = self.t.max()
406
+
407
+ if axis.startswith('std'):
408
+ unaligned = True # not sensible to align the std devs
409
+
410
+ if self.median_window > 0:
411
+ startdf = self.beads[axis].rolling(self.median_window).median()
412
+ else:
413
+ startdf = self.beads[axis]
414
+ if not unaligned:
415
+ startdfg = startdf[[bead for bead in self.beadisgood if self.beadisgood[bead]]]
416
+ dfplotg = startdfg-startdfg.loc[tmin:tmax].mean(axis=0)
417
+ has_bads = not np.all(list(self.beadisgood.values())) # we have at least a single bad bead
418
+ if has_bads:
419
+ dfplotb = startdf[[bead for bead in self.beadisgood if not self.beadisgood[bead]]]
420
+ dfplotb = dfplotb - dfplotb.loc[tmin:tmax].mean(axis=0)
421
+ emptybeads = dfplotg.columns[dfplotg.isnull().all(axis=0)]
422
+ if len(emptybeads)>0:
423
+ warn('removing beads with no valid info after alignment %s...' % emptybeads)
424
+ dfplotg = dfplotg[dfplotg.columns[~dfplotg.isnull().all(axis=0)]]
425
+
426
+ fig1 = px.line(dfplotg)
427
+ fig1.add_trace(go.Scatter(x=self.t, y=dfplotg.mean(axis=1), name='Mean',
428
+ line=dict(color='firebrick', dash='dash')))
429
+ fig2 = px.line(dfplotg.sub(dfplotg.mean(axis=1),axis=0))
430
+
431
+ fig = make_subplots(rows=2, cols=1)
432
+
433
+ # we use explicit trace coloring and legend ranking to "survive" the trace reordering below when 'bad' beads are plotted as well
434
+ col_dict = px.colors.qualitative.Plotly
435
+ dict_len = len(col_dict)
436
+ tracenum = 0
437
+
438
+ for d in fig1.data:
439
+ fig.add_trace((go.Scatter(x=d['x'], y=d['y'], name = d['name'], line=dict(color=col_dict[tracenum % dict_len]),
440
+ legendrank=tracenum+1)), row=1, col=1)
441
+ tracenum += 1
442
+
443
+ if self.plotbad and has_bads:
444
+ fig.data = fig.data[::-1] # here we initially reverse the plotting sequence of the fig1 traces, but see below
445
+ for column in dfplotb:
446
+ # print("adding bad trace %s" % column)
447
+ fig.add_trace((go.Scatter(x=self.t, y=dfplotb[column], name="%s - bad" % column, opacity=0.2,
448
+ line=dict(color=col_dict[tracenum % dict_len]),
449
+ legendrank=tracenum+1)), row=1, col=1)
450
+ tracenum += 1
451
+ fig.data = fig.data[::-1] # now we reverse again so that the 'bad traces' are plotted first (and thus at bottom)
452
+ # the original reversal at the top of this block (fig 1 traces) is now reversed so that the mean is plotted last
453
+
454
+ colnum = 0 # we start colors again at position 0 for the second subplot
455
+ for d in fig2.data:
456
+ fig.add_trace((go.Scatter(x=d['x'], y=d['y'], name = d['name'], line=dict(color=col_dict[colnum % dict_len]),
457
+ legendrank=tracenum+1)), row=2, col=1)
458
+ tracenum += 1
459
+ colnum += 1
460
+
461
+ fig.update_layout(autosize=False, width=1000, height=700,title_text="aligned MBM tracks along %s" % axis)
462
+ # Update axes properties
463
+ fig.update_xaxes(title_text="time (s)", row=1, col=1)
464
+ fig.update_xaxes(title_text="time (s)", row=2, col=1)
465
+ fig.update_yaxes(title_text="drift (nm)", range=[np.min([-15.0,dfplotg.min().min()]),np.max([15.0,dfplotg.max().max()])], row=1, col=1)
466
+ fig.update_yaxes(title_text="deviation (nm)", range=[-10,10], row=2, col=1)
467
+
468
+ fig.show()
469
+
470
+ else:
471
+ if axis.startswith('std'):
472
+ yaxis_title = "std dev (nm)"
473
+ title = 'MBM localisation precisions (%s)' % axis
474
+ else:
475
+ title = 'tracks along %s, not aligned' % axis
476
+ yaxis_title = "distance (nm)"
477
+ dfplot = startdf
478
+ dfplotg = dfplot[[bead for bead in self.beadisgood if self.beadisgood[bead]]]
479
+ fig = px.line(dfplotg)
480
+ fig.update_layout(xaxis_title="time (s)", yaxis_title=yaxis_title, title_text=title)
481
+ if axis.startswith('std'):
482
+ fig.update_yaxes(range = (0,np.max([10.0,dfplotg.max().max()])))
483
+ fig.show()
484
+
485
+ def plot_tracks_matplotlib(self,axis,unaligned=False,tmin=None,tmax=None,ax=None,goodalpha=1.0):
486
+ if tmin is None:
487
+ tmin=self._trange[0]
488
+ if tmax is None:
489
+ tmax=self._trange[1]
490
+
491
+ if tmin is None:
492
+ tmin = self.t.min()
493
+ if tmax is None:
494
+ tmax = self.t.max()
495
+
496
+ if axis.startswith('std'):
497
+ unaligned = True # not sensible to align the std devs
498
+
499
+ if self.median_window > 0:
500
+ startdf = self.beads[axis].rolling(self.median_window).median()
501
+ else:
502
+ startdf = self.beads[axis]
503
+ if not unaligned:
504
+ startdfg = startdf[[bead for bead in self.beadisgood if self.beadisgood[bead]]]
505
+ dfplotg = startdfg-startdfg.loc[tmin:tmax].mean(axis=0)
506
+ has_bads = not np.all(list(self.beadisgood.values())) # we have at least a single bad bead
507
+ if has_bads:
508
+ dfplotb = startdf[[bead for bead in self.beadisgood if not self.beadisgood[bead]]]
509
+ dfplotb = dfplotb - dfplotb.loc[tmin:tmax].mean(axis=0)
510
+ emptybeads = dfplotg.columns[dfplotg.isnull().all(axis=0)]
511
+ if len(emptybeads)>0:
512
+ warn('removing beads with no valid info after alignment %s...' % emptybeads)
513
+ dfplotg = dfplotg[dfplotg.columns[~dfplotg.isnull().all(axis=0)]]
514
+
515
+ if has_bads:
516
+ ax = dfplotb.plot(legend = True,alpha=0.2,ax=ax)
517
+ dfplotg.plot(legend = True,ax = ax,alpha=goodalpha)
518
+ else:
519
+ ax = dfplotg.plot(legend = True,ax=ax,alpha=goodalpha)
520
+ ax.plot(self.t, dfplotg.mean(axis=1),label='mean',alpha=goodalpha) # add the mean
521
+ ax.legend()
522
+ ax.set_title("MBM for %s axis" % axis)
523
+ ax.set_xlabel("time (s)")
524
+ ax.set_ylabel("drift %s (nm)" % axis)
525
+ ax.set_ylim(np.min([-15.0,dfplotg.min().min()]),np.max([15.0,dfplotg.max().max()]))
526
+
527
+ else:
528
+ if axis.startswith('std'):
529
+ yaxis_title = "std dev (nm)"
530
+ title = 'MBM localisation precisions (%s)' % axis
531
+ else:
532
+ title = 'tracks along %s, not aligned' % axis
533
+ yaxis_title = "distance (nm)"
534
+ dfplot = startdf
535
+ dfplotg = dfplot[[bead for bead in self.beadisgood if self.beadisgood[bead]]]
536
+
537
+ ax = dfplotg.plot(legend = True,alpha=goodalpha)
538
+ ax.set_title(title)
539
+ ax.set_xlabel("time (s)")
540
+ ax.set_ylabel(yaxis_title)
541
+ ax.set_ylim(0,np.max([10.0,dfplotg.max().max()]))
542
+
543
+ # we add custom pickling/unopickling method so that an mbm instance in the
544
+ # PYME metadata won't trip up image saving with metadata
545
+ # really some kind of hack, perhaps it is better to save the mbm instance in some other form
546
+ def __getstate__(self):
547
+ warn("mbm is being pickled - just a dummy mostly for PYME metadata - won't be usable after unpickling")
548
+ return 'not a valid mbm collection after pickling/unpickling'
549
+
550
+ def __setstate__(self, d):
551
+ warn("mbm is being unpickled - this is just a dummy unpickle, won't be usable after unpickling")
552
+ self._unpickled = d