PYME-extra 1.0.4.post0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. PYMEcs/Acquire/Actions/__init__.py +0 -0
  2. PYMEcs/Acquire/Actions/custom.py +167 -0
  3. PYMEcs/Acquire/Hardware/LPthreadedSimple.py +248 -0
  4. PYMEcs/Acquire/Hardware/LPthreadedSimpleSim.py +246 -0
  5. PYMEcs/Acquire/Hardware/NikonTiFlaskServer.py +45 -0
  6. PYMEcs/Acquire/Hardware/NikonTiFlaskServerT.py +59 -0
  7. PYMEcs/Acquire/Hardware/NikonTiRESTClient.py +73 -0
  8. PYMEcs/Acquire/Hardware/NikonTiSim.py +35 -0
  9. PYMEcs/Acquire/Hardware/__init__.py +0 -0
  10. PYMEcs/Acquire/Hardware/driftTrackGUI.py +329 -0
  11. PYMEcs/Acquire/Hardware/driftTrackGUI_n.py +472 -0
  12. PYMEcs/Acquire/Hardware/driftTracking.py +424 -0
  13. PYMEcs/Acquire/Hardware/driftTracking_n.py +433 -0
  14. PYMEcs/Acquire/Hardware/fakeCamX.py +15 -0
  15. PYMEcs/Acquire/Hardware/offsetPiezoRESTCorrelLog.py +38 -0
  16. PYMEcs/Acquire/__init__.py +0 -0
  17. PYMEcs/Analysis/MBMcollection.py +552 -0
  18. PYMEcs/Analysis/MINFLUX.py +280 -0
  19. PYMEcs/Analysis/MapUtils.py +77 -0
  20. PYMEcs/Analysis/NPC.py +1176 -0
  21. PYMEcs/Analysis/Paraflux.py +218 -0
  22. PYMEcs/Analysis/Simpler.py +81 -0
  23. PYMEcs/Analysis/Sofi.py +140 -0
  24. PYMEcs/Analysis/__init__.py +0 -0
  25. PYMEcs/Analysis/decSofi.py +211 -0
  26. PYMEcs/Analysis/eventProperties.py +50 -0
  27. PYMEcs/Analysis/fitDarkTimes.py +569 -0
  28. PYMEcs/Analysis/objectVolumes.py +20 -0
  29. PYMEcs/Analysis/offlineTracker.py +130 -0
  30. PYMEcs/Analysis/stackTracker.py +180 -0
  31. PYMEcs/Analysis/timeSeries.py +63 -0
  32. PYMEcs/Analysis/trackFiducials.py +186 -0
  33. PYMEcs/Analysis/zerocross.py +91 -0
  34. PYMEcs/IO/MINFLUX.py +851 -0
  35. PYMEcs/IO/NPC.py +117 -0
  36. PYMEcs/IO/__init__.py +0 -0
  37. PYMEcs/IO/darkTimes.py +19 -0
  38. PYMEcs/IO/picasso.py +219 -0
  39. PYMEcs/IO/tabular.py +11 -0
  40. PYMEcs/__init__.py +0 -0
  41. PYMEcs/experimental/CalcZfactor.py +51 -0
  42. PYMEcs/experimental/FRC.py +338 -0
  43. PYMEcs/experimental/ImageJROItools.py +49 -0
  44. PYMEcs/experimental/MINFLUX.py +1537 -0
  45. PYMEcs/experimental/NPCcalcLM.py +560 -0
  46. PYMEcs/experimental/Simpler.py +369 -0
  47. PYMEcs/experimental/Sofi.py +78 -0
  48. PYMEcs/experimental/__init__.py +0 -0
  49. PYMEcs/experimental/binEventProperty.py +187 -0
  50. PYMEcs/experimental/chaining.py +23 -0
  51. PYMEcs/experimental/clusterTrack.py +179 -0
  52. PYMEcs/experimental/combine_maps.py +104 -0
  53. PYMEcs/experimental/eventProcessing.py +93 -0
  54. PYMEcs/experimental/fiducials.py +323 -0
  55. PYMEcs/experimental/fiducialsNew.py +402 -0
  56. PYMEcs/experimental/mapTools.py +271 -0
  57. PYMEcs/experimental/meas2DplotDh5view.py +107 -0
  58. PYMEcs/experimental/mortensen.py +131 -0
  59. PYMEcs/experimental/ncsDenoise.py +158 -0
  60. PYMEcs/experimental/onTimes.py +295 -0
  61. PYMEcs/experimental/procPoints.py +77 -0
  62. PYMEcs/experimental/pyme2caml.py +73 -0
  63. PYMEcs/experimental/qPAINT.py +965 -0
  64. PYMEcs/experimental/randMap.py +188 -0
  65. PYMEcs/experimental/regExtraCmaps.py +11 -0
  66. PYMEcs/experimental/selectROIfilterTable.py +72 -0
  67. PYMEcs/experimental/showErrs.py +51 -0
  68. PYMEcs/experimental/showErrsDh5view.py +58 -0
  69. PYMEcs/experimental/showShiftMap.py +56 -0
  70. PYMEcs/experimental/snrEvents.py +188 -0
  71. PYMEcs/experimental/specLabeling.py +51 -0
  72. PYMEcs/experimental/splitRender.py +246 -0
  73. PYMEcs/experimental/testChannelByName.py +36 -0
  74. PYMEcs/experimental/timedSpecies.py +28 -0
  75. PYMEcs/experimental/utils.py +31 -0
  76. PYMEcs/misc/ExtraCmaps.py +177 -0
  77. PYMEcs/misc/__init__.py +0 -0
  78. PYMEcs/misc/configUtils.py +169 -0
  79. PYMEcs/misc/guiMsgBoxes.py +27 -0
  80. PYMEcs/misc/mapUtils.py +230 -0
  81. PYMEcs/misc/matplotlib.py +136 -0
  82. PYMEcs/misc/rectsFromSVG.py +182 -0
  83. PYMEcs/misc/shellutils.py +1110 -0
  84. PYMEcs/misc/utils.py +205 -0
  85. PYMEcs/misc/versionCheck.py +20 -0
  86. PYMEcs/misc/zcInfo.py +90 -0
  87. PYMEcs/pyme_warnings.py +4 -0
  88. PYMEcs/recipes/__init__.py +0 -0
  89. PYMEcs/recipes/base.py +75 -0
  90. PYMEcs/recipes/localisations.py +2380 -0
  91. PYMEcs/recipes/manipulate_yaml.py +83 -0
  92. PYMEcs/recipes/output.py +177 -0
  93. PYMEcs/recipes/processing.py +247 -0
  94. PYMEcs/recipes/simpler.py +290 -0
  95. PYMEcs/version.py +2 -0
  96. pyme_extra-1.0.4.post0.dist-info/METADATA +114 -0
  97. pyme_extra-1.0.4.post0.dist-info/RECORD +101 -0
  98. pyme_extra-1.0.4.post0.dist-info/WHEEL +5 -0
  99. pyme_extra-1.0.4.post0.dist-info/entry_points.txt +3 -0
  100. pyme_extra-1.0.4.post0.dist-info/licenses/LICENSE +674 -0
  101. pyme_extra-1.0.4.post0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,50 @@
1
+ import numpy as np
2
+
3
+
4
+ # in the below p is supposed to be a pipeline object
5
+ # some heuristics to get the area covered by the pipeline data
6
+ def getarea(p):
7
+ if 'x' in p.filterKeys:
8
+ width = p.filterKeys['x'][1]-p.filterKeys['x'][0]
9
+ else:
10
+ width = p.mdh.Camera.ROIWidth*p.mdh.voxelsize_nm.x
11
+ if 'y' in p.filterKeys:
12
+ height = p.filterKeys['y'][1]-p.filterKeys['y'][0]
13
+ else:
14
+ height = p.mdh.Camera.ROIHeight*p.mdh.voxelsize_nm.y
15
+ # all distances are in nm and we want um^2
16
+ area1 = 1e-6*width*height
17
+
18
+ nEvents = p['x'].size
19
+ if nEvents > 1:
20
+ xrange = [p['x'].min(),p['x'].max()]
21
+ yrange = [p['y'].min(),p['y'].max()]
22
+
23
+ width = xrange[1]-xrange[0]
24
+ height = yrange[1]-yrange[0]
25
+ # all distances are in nm and we want um^2
26
+ area2 = 1e-6*width*height
27
+
28
+ if ('x' in p.filterKeys) and ('y' in p.filterKeys):
29
+ area = area1
30
+ else:
31
+ if abs(area1-area2)/area1 > 0.2: # we have a > 20 % difference
32
+ area = area2
33
+ else:
34
+ area = area1
35
+
36
+ return area # area in um^2
37
+
38
+ def evtDensity(p):
39
+ area = getarea(p)
40
+ trange = p['t'].max()-p['t'].min()+1
41
+ nEvents = p['x'].size
42
+
43
+ if area > 1e-6:
44
+ dens = nEvents / area # events per um^2
45
+
46
+ intens1 = dens / trange * 5e3 # events per um^2 per 5k frames
47
+ intens2 = 400 * dens / trange # events per (20um)^2 per frame
48
+ return (dens, intens1, intens2, trange)
49
+ else:
50
+ return None
@@ -0,0 +1,569 @@
1
+ import numpy as np
2
+
3
+ # this is code to obtain dark times from tabular event columns
4
+ #
5
+ # it works but likely needs a code overhaul
6
+ # this includes both the fitting but also
7
+ # how the input pipeline is handled - conceivably it
8
+ # should just received 1D vectors and be completely
9
+ # ignorant of any pipeline/datasource origin
10
+
11
+ import logging
12
+ logger = logging.getLogger(__name__)
13
+
14
+ import PYME.IO.tabular as tabular
15
+ # quick tabular class that wraps a recarray
16
+ # and allows adding new columns
17
+ # and inherits tabular I/O
18
+ class TabularRecArrayWrap(tabular.TabularBase):
19
+ def __init__(self, recarray, validCols = None):
20
+ self._recarray = recarray
21
+ self.new_columns = {}
22
+ if validCols is not None:
23
+ self._recarrayKeys = validCols
24
+ else:
25
+ self._recarrayKeys = self._recarray.dtype.fields.keys()
26
+
27
+ def keys(self):
28
+ return list(set(list(self._recarrayKeys + self.new_columns.keys())))
29
+
30
+ def __getitem__(self, keys):
31
+ key, sl = self._getKeySlice(keys)
32
+ if key in self._recarrayKeys:
33
+ return self._recarray[key][sl]
34
+ else:
35
+ return self.new_columns[key][sl]
36
+
37
+ def addColumn(self, name, values):
38
+ """
39
+ Adds a column of values to the tabular measure.
40
+
41
+ Parameters
42
+ ----------
43
+ name : str
44
+ The new column name
45
+ values : array-like
46
+ The values. This should be the same length as the existing columns.
47
+
48
+ """
49
+
50
+ #force to be an array
51
+ values = np.array(values)
52
+
53
+ if not len(values) == self._recarray.shape[0]:
54
+ raise RuntimeError('New column does not match the length of existing columns')
55
+
56
+ #insert into our __dict__ object (for backwards compatibility - TODO change me to something less hacky)
57
+ #setattr(self, name, values)
58
+
59
+ self.new_columns[name] = values
60
+
61
+
62
+ def getZeroColumn(self, dtype='float64'):
63
+ return np.zeros(self._recarray.shape, dtype=dtype)
64
+
65
+
66
+ def addNewColumnByID(self, fromids, colname, valsByID):
67
+ if not np.all(np.in1d(fromids,self['objectID'])):
68
+ logger.warn('some ids not present in measurements')
69
+ # limit everything below to those IDs present in the events
70
+ fromids1 = fromids[np.in1d(fromids,self['objectID'])]
71
+ # this expression finds the lookup table to locate
72
+ # ids in fromids in self['objectID']
73
+ # i.e. we should have self['objectID'][idx] == fromids
74
+ idx = np.nonzero((fromids1[None,:] == self['objectID'][:,None]).T)[1]
75
+ if not np.all(self['objectID'][idx] == fromids1):
76
+ raise RuntimeError('Lookup error - this should not happen')
77
+
78
+ newcol = self.getZeroColumn(dtype='float64')
79
+ # make sure we match fromids1 shape in assignment
80
+ newcol[idx] = valsByID[np.in1d(fromids,self['objectID'])]
81
+ self.addColumn(colname,newcol)
82
+
83
+ def lookupByID(self, ids, key):
84
+ idi = ids.astype('int')
85
+ uids = np.unique(idi[idi.nonzero()])
86
+ uids_avail = uids[np.in1d(uids,self['objectID'])]
87
+ idx = np.nonzero((uids_avail[None,:] == self['objectID'][:,None]).T)[1]
88
+ valsByID = self[key][idx] # these are the values from column 'key' matching uids_avail
89
+
90
+ lut = np.zeros(uids_avail.max()+1,dtype='float64')
91
+ lut[uids_avail] = valsByID
92
+
93
+ lu = np.zeros_like(idi,dtype='float64')
94
+ idiflat = idi.ravel()
95
+ luflat = lu.ravel()
96
+ luflat[np.in1d(idiflat,uids_avail)] = lut[idiflat[np.in1d(idiflat,uids_avail)]]
97
+ return lu
98
+
99
+
100
+ def mergeChannelMeasurements(channels,measures):
101
+ master = measures[0]['objectID']
102
+ for chan,meas in zip(channels,measures):
103
+ if meas['objectID'].size != master.size:
104
+ raise RuntimeError('channel %s does not have same size as channel %s' % (chan, channels[0]))
105
+ if not np.all(meas['objectID'] == master):
106
+ raise RuntimeError('channel %s object IDs do not match channel %s object IDs' % (chan, channels[0]))
107
+ mergedIDs = np.zeros(master.size, dtype=[('objectID','i4')])
108
+ mergedIDs[:] = master
109
+ mergedMeas = TabularRecArrayWrap(mergedIDs)
110
+
111
+ for chan,meas in zip(channels,measures):
112
+ for key in meas.keys():
113
+ if key != 'objectID':
114
+ mergedMeas.addColumn('%s_%s' % (key,chan), meas[key])
115
+
116
+ return mergedMeas
117
+
118
+
119
+ def safeRatio(mmeas, div11, div22):
120
+ mzeros = mmeas.getZeroColumn(dtype='float')
121
+ div1 = mzeros+div11 # this converts scalars if needed
122
+ div2 = mzeros+div22
123
+ ratio = np.zeros_like(div1)
124
+ d1good = (np.logical_not(np.isnan(div1)))
125
+ d2good = div2 > 0
126
+ allgood = d1good * d2good
127
+ ratio[allgood] = div1[allgood] / div2[allgood]
128
+ return ratio
129
+
130
+
131
+ def makeRatio(meas, key, div1, div2):
132
+ meas.addColumn(key,safeRatio(meas, div1, div2))
133
+
134
+
135
+ def makeSum(meas, key, add11, add22):
136
+ mzeros = meas.getZeroColumn(dtype='float')
137
+ add1 = mzeros+add11
138
+ add2 = mzeros+add22
139
+ msum = np.zeros_like(add1)
140
+ a1good = (np.logical_not(np.isnan(add1)))
141
+ a2good = (np.logical_not(np.isnan(add2)))
142
+ allgood = a1good*a2good
143
+ msum[allgood] = add1[allgood] + add2[allgood]
144
+ meas.addColumn(key,msum)
145
+
146
+
147
+ def channelName(key, chan):
148
+ return '%s_%s' % (key,chan)
149
+
150
+
151
+ def channelColumn(meas,key,chan):
152
+ fullkey = channelName(key,chan)
153
+ return meas[fullkey]
154
+
155
+
156
+ def mergedMeasurementsRatios(mmeas, chan1, chan2, cal1, cal2):
157
+ for chan, cal in zip([chan1,chan2],[cal1,cal2]):
158
+ # if channelName('qIndex',chan) not in mmeas.keys():
159
+ # makeRatio(mmeas, channelName('qIndex',chan), 100.0, channelColumn(mmeas,'tau1',chan))
160
+ if channelName('qIndexC',chan) not in mmeas.keys():
161
+ makeRatio(mmeas, channelName('qIndexC',chan), channelColumn(mmeas,'qIndex',chan), cal)
162
+ if (channelName('qDensity',chan) not in mmeas.keys()) and (channelName('area',chan) in mmeas.keys()):
163
+ makeRatio(mmeas, channelName('qDensity',chan), channelColumn(mmeas,'qIndex',chan),
164
+ channelColumn(mmeas,'area',chan))
165
+ if (channelName('qDensityC',chan) not in mmeas.keys()) and (channelName('area',chan) in mmeas.keys()):
166
+ makeRatio(mmeas, channelName('qDensityC',chan), channelColumn(mmeas,'qIndexC',chan),
167
+ channelColumn(mmeas,'area',chan))
168
+ makeRatio(mmeas, channelName('qRatio','%svs%s' % (chan1,chan2)),
169
+ channelColumn(mmeas,'qIndex',chan1),
170
+ channelColumn(mmeas,'qIndex',chan2))
171
+ makeRatio(mmeas, channelName('qRatioC','%svs%s' % (chan1,chan2)),
172
+ channelColumn(mmeas,'qIndexC',chan1),
173
+ channelColumn(mmeas,'qIndexC',chan2))
174
+
175
+
176
+
177
+ # darktime fitting section
178
+ from scipy.optimize import curve_fit
179
+ def cumuexpfit(t,tau):
180
+ return 1-np.exp(-t/tau)
181
+
182
+ def cumumultiexpfit(t,tau1,tau2,a):
183
+ return a*(1-np.exp(-t/tau1))+(1-a)*(1-np.exp(-t/tau2))
184
+
185
+ def mkcmexpfit(tau2):
186
+ def fitfunc(t,tau1,a):
187
+ return a*(1-np.exp(-t/tau1))+(1-a)*(1-np.exp(-t/tau2))
188
+ return fitfunc
189
+
190
+ def notimes(ndarktimes):
191
+ analysis = {
192
+ 'NDarktimes' : ndarktimes,
193
+ 'tau1' : [None,None,None,None],
194
+ 'tau2' : [None,None,None,None]
195
+ }
196
+ return analysis
197
+
198
+
199
+ def cumuhist(timeintervals):
200
+ ti = timeintervals
201
+ nIntervals = ti.shape[0]
202
+ cumux = np.sort(ti+0.01*np.random.random(nIntervals)) # hack: adding random noise helps us ensure uniqueness of x values
203
+ cumuy = (1.0+np.arange(nIntervals))/float(nIntervals)
204
+ return (cumux,cumuy)
205
+
206
+
207
+ def cumuhistBinned(timeintervals):
208
+ binedges = 0.5+np.arange(0,timeintervals.max())
209
+ binctrs = 0.5*(binedges[0:-1]+binedges[1:])
210
+ h,be2 = np.histogram(timeintervals,bins=binedges)
211
+ hc = np.cumsum(h)/float(timeintervals.shape[0]) # normalise
212
+ hcg = hc[h>0] # only nonzero bins
213
+ binctrsg = binctrs[h>0]
214
+
215
+ return (binctrs, hc, binctrsg, hcg)
216
+
217
+ import math
218
+ def cumuhistBinnedLog(timeintervals,dlog=0.1,return_hist=False, return_good=False):
219
+ binmax = int((math.log10(timeintervals.max())-1.0-dlog)/dlog+2.0)
220
+ binedges = np.append(0.5+np.arange(10), 10.0**(1.0+dlog*(np.arange(binmax)+1.0)))
221
+ binctrs = 0.5*(binedges[0:-1]+binedges[1:])
222
+ h,be2 = np.histogram(timeintervals,bins=binedges)
223
+ hc = np.cumsum(h)/float(timeintervals.shape[0]) # normalise
224
+ hcg = hc[h>0] # only nonzero bins
225
+ binctrsg = binctrs[h>0]
226
+
227
+ retvals = [binctrs, hc]
228
+ if return_good:
229
+ retvals = retvals + [binctrsg, hcg]
230
+ if return_hist:
231
+ retvals = retvals + [h/float(timeintervals.shape[0])]
232
+
233
+ return retvals
234
+
235
+ def fitDarktimes(t):
236
+ # determine darktime from gaps and reject zeros (no real gaps)
237
+ nts = 0 # initialise to safe default
238
+ NTMIN = 5
239
+
240
+ if t.size > NTMIN:
241
+ dts = t[1:]-t[0:-1]-1
242
+ dtg = dts[dts>0]
243
+ nts = dtg.shape[0]
244
+
245
+ if nts > NTMIN:
246
+ # now make a cumulative histogram from these
247
+ cumux, cumuy = cumuhist(dtg)
248
+ try:
249
+ tauEst = cumux[(np.abs(cumuy - 0.63)).argmin()]
250
+ except ValueError:
251
+ tauEst = 100.0
252
+ # generate alternative histogram with binning
253
+ binctrs, hc, binctrsg, hcg = cumuhistBinned(dtg)
254
+ try:
255
+ tauEstH = binctrsg[(np.abs(hcg - 0.63)).argmin()]
256
+ except ValueError:
257
+ tauEstH = 100.0
258
+
259
+ success = True
260
+ # fit theoretical distributions
261
+ try:
262
+ popth,pcovh,infodicth,errmsgh,ierrh = curve_fit(cumuexpfit,binctrs,hc, p0=(tauEstH),full_output=True)
263
+ except:
264
+ success = False
265
+ else:
266
+ if hc.shape[0] > 1:
267
+ chisqredh = ((hc - infodicth['fvec'])**2).sum()/(hc.shape[0]-1)
268
+ else:
269
+ chisqredh = 0
270
+ try:
271
+ popt,pcov,infodict,errmsg,ierr = curve_fit(cumuexpfit,cumux,cumuy, p0=(tauEst),full_output=True)
272
+ except:
273
+ success = False
274
+ else:
275
+ chisqred = ((cumuy - infodict['fvec'])**2).sum()/(nts-1)
276
+
277
+ if success:
278
+ analysis = {
279
+ 'NDarktimes' : nts,
280
+ 'tau1' : [popt[0],np.sqrt(pcov[0][0]),chisqred,tauEst], # cumuhist based
281
+ 'tau2' : [popth[0],np.sqrt(pcovh[0][0]),chisqredh,tauEstH] # cumuhistBinned based
282
+ }
283
+ else:
284
+ analysis = notimes(nts)
285
+ else:
286
+ analysis = notimes(nts)
287
+
288
+ return analysis
289
+
290
+ measureDType = [('objectID', 'i4'), ('t', 'i4'), ('x', 'f4'), ('y', 'f4'),
291
+ ('NEvents', 'i4'), ('NDarktimes', 'i4'), ('tau1', 'f4'),
292
+ ('tau2', 'f4'), ('tau1err', 'f4'), ('tau2err', 'f4'),
293
+ ('chisqr1', 'f4'), ('chisqr2', 'f4'), ('tau1est', 'f4'), ('tau2est', 'f4'),
294
+ ('NDefocused', 'i4'), ('NDefocusedFrac', 'f4')]
295
+
296
+
297
+ def measure(object, measurements = np.zeros(1, dtype=measureDType)):
298
+ #measurements = {}
299
+
300
+ measurements['NEvents'] = object['t'].shape[0]
301
+ measurements['t'] = np.median(object['t'])
302
+ measurements['x'] = object['x'].mean()
303
+ measurements['y'] = object['y'].mean()
304
+
305
+ t = object['t']
306
+
307
+ darkanalysis = fitDarktimes(t)
308
+ measurements['tau1'] = darkanalysis['tau1'][0]
309
+ measurements['tau2'] = darkanalysis['tau2'][0]
310
+ measurements['tau1err'] = darkanalysis['tau1'][1]
311
+ measurements['tau2err'] = darkanalysis['tau2'][1]
312
+ measurements['chisqr1'] = darkanalysis['tau1'][2]
313
+ measurements['chisqr2'] = darkanalysis['tau2'][2]
314
+ measurements['tau1est'] = darkanalysis['tau1'][3]
315
+ measurements['tau2est'] = darkanalysis['tau2'][3]
316
+
317
+ measurements['NDarktimes'] = darkanalysis['NDarktimes']
318
+
319
+ return measurements
320
+
321
+
322
+ def measureObjectsByID(filter, ids, sigDefocused = None):
323
+ # IMPORTANT: repeated filter access is extremely costly!
324
+ # need to cache any filter access here first
325
+ x = filter['x'] #+ 0.1*random.randn(filter['x'].size)
326
+ y = filter['y'] #+ 0.1*random.randn(x.size)
327
+ id = filter['objectID'].astype('i')
328
+ t = filter['t']
329
+ sig = filter['sig'] # we must do our own caching!
330
+
331
+ measurements = np.zeros(len(ids), dtype=measureDType)
332
+
333
+ for j,i in enumerate(ids):
334
+ if not i == 0:
335
+ if np.all(np.in1d(i, id)): # check if this ID is present in data
336
+ ind = id == i
337
+ obj = {'x': x[ind], 'y': y[ind], 't': t[ind]}
338
+ #print obj.shape
339
+ measure(obj, measurements[j])
340
+ # here we measure the fraction of defocused localisations to give us an idea how 3D something is
341
+ if sigDefocused is not None:
342
+ measurements[j]['NDefocused'] = np.sum(sig[ind] > sigDefocused)
343
+ measurements[j]['NDefocusedFrac'] = float(measurements[j]['NDefocused'])/measurements[j]['NEvents']
344
+ else:
345
+ for key in measurements[j].dtype.fields.keys():
346
+ measurements[j][key]=0
347
+ measurements[j]['objectID'] = i
348
+
349
+ # wrap recarray in tabular that allows us to
350
+ # easily add columns and save using tabular methods
351
+ return TabularRecArrayWrap(measurements)
352
+
353
+
354
+
355
+ def retrieveMeasuresForIDs(measurements,idcolumn,columns=['tau1','NDarktimes','qIndex']):
356
+ newcols = {key: np.zeros_like(idcolumn, dtype = 'float64') for key in columns}
357
+
358
+ for j,i in enumerate(measurements['objectID']):
359
+ if not i == 0:
360
+ ind = idcolumn == i
361
+ for col in newcols.keys():
362
+ if not np.isnan(measurements[col][j]):
363
+ newcols[col][ind] = measurements[col][j]
364
+
365
+ return newcols
366
+
367
+
368
+ from traits.api import HasTraits, Str, Int, CStr, List, Enum, Float, Bool
369
+ class FitSettings(HasTraits):
370
+ coalescedProcessing = Enum(['useClumpIndexOnly','useTminTmaxIfAvailable'])
371
+ cumulativeDistribution = Enum(['binned','empirical'])
372
+ fitMode = Enum(['SingleMode','TwoModes'])
373
+ Tau2Constant = Bool(False)
374
+ Tau2FixedValue = Float(2.0)
375
+ IDcolumn = CStr('objectID')
376
+
377
+
378
+ measureDType2 = [('objectID', 'i4'),
379
+ ('NEvents', 'i4'), ('NEventsCorr', 'i4'), ('NDarktimes', 'i4'),
380
+ ('tau1', 'f4'), ('tau2', 'f4'), ('tau1err', 'f4'), ('tau2err', 'f4'),
381
+ ('amp1','f4'), ('amp1err','f4'),
382
+ ('chisqr', 'f4'), ('tau1est', 'f4'), ('qindex', 'f4')]
383
+
384
+ def measureObjectsByID2(datasource, idname='objectID', settings=FitSettings()):
385
+ # Probably an old statement - check: IMPORTANT: repeated filter access is extremely costly!
386
+ ds = datasource
387
+ idDs = ds[idname].astype('i')
388
+
389
+ idall = np.unique(ds[idname].astype('int'))
390
+ ids = idall[idall > 0] # only accept nonzero IDs
391
+
392
+ meas = np.zeros(ids.size, dtype=measureDType2)
393
+ darkTimes = []
394
+ times = []
395
+
396
+ # check which type of time processing we are going to use
397
+ if ('clumpIndex' in ds.keys()) and not ('fitError_x0' in ds.keys()): # heuristic to only do on coalesced data
398
+ usingClumpIndex = True
399
+ if (settings.coalescedProcessing == 'useTminTmaxIfAvailable') and ('tmin' in ds.keys()) and ('tmax' in ds.keys()):
400
+ usingTminTmax = True
401
+ else:
402
+ usingTminTmax = False
403
+ else:
404
+ usingClumpIndex = False
405
+ usingTminTmax = False
406
+
407
+ fields = ['NEvents','NDarktimes', 'qindex']
408
+ if usingTminTmax:
409
+ fields.append('NEventsCorr')
410
+
411
+ if settings.Tau2FixedValue:
412
+ tau2const = settings.Tau2Constant
413
+ else:
414
+ tau2const = 0.0
415
+
416
+ if settings.fitMode == 'SingleMode':
417
+ # retv = [tau1, tau1err, chisqr, tau1est]
418
+ mfields = ['tau1','tau1err','chisqr','tau1est']
419
+ else:
420
+ # retv = [tau1, tau2, atau1, tau1err, tau2err, atau1err, chisqr, tauest]
421
+ mfields = ['tau1','tau2','amp1','tau1err','tau2err','amp1err','chisqr','tau1est']
422
+
423
+ validCols = fields + mfields
424
+
425
+ # loop over object IDs
426
+ ndtmin = 5
427
+ for j,this_id in enumerate(ids):
428
+ if not this_id == 0:
429
+ if np.all(np.in1d(this_id, idDs)): # check if this ID is present in data
430
+ idx = idDs == this_id
431
+ # stuff to be done in the innermost loop
432
+ te, dte, nevents, nevtscorr = extractEventTimes(ds,idx,useTminTmax=usingTminTmax)
433
+ meas[j]['NEvents'] = nevents
434
+ meas[j]['NDarktimes'] = dte.size
435
+ darkTimes.append(dte)
436
+ times.append(te)
437
+ if usingTminTmax:
438
+ meas[j]['NEventsCorr'] = nevtscorr
439
+ if dte.size >= ndtmin:
440
+ if settings.cumulativeDistribution == 'binned':
441
+ xc, yc = cumuhistBinnedLog(dte,dlog=0.05)
442
+ else:
443
+ xc, yc = cumuhist(dte)
444
+
445
+ try:
446
+ retv = fitTaus(xc,yc,fitTau2 = (settings.fitMode == 'TwoModes'), tau2const=tau2const, return_tau1est=True)
447
+ except RuntimeError:
448
+ pass # we got a convergence error
449
+ else:
450
+ for i, field in enumerate(mfields):
451
+ meas[j][field] = retv[i]
452
+ meas[j]['qindex'] = 100.0/meas[j]['tau1']
453
+ else:
454
+ for key in meas[j].dtype.fields.keys():
455
+ meas[j][key]=0
456
+ meas[j]['objectID'] = this_id
457
+
458
+ # wrap recarray in tabular that allows us to
459
+ # easily add columns and save using tabular methods
460
+ rmeas = TabularRecArrayWrap(meas, validCols=validCols+['objectID'])
461
+
462
+ return {'measures': rmeas,
463
+ 'darkTimes' : darkTimes,
464
+ 'times' : times,
465
+ 'validColumns': validCols,
466
+ 'state' : {
467
+ 'usingClumpIndex': usingClumpIndex,
468
+ 'usingTminTmax': usingTminTmax,
469
+ 'IDcolumn': settings.IDcolumn,
470
+ 'coalescedProcessing': settings.coalescedProcessing,
471
+ 'Tau2Constant': settings.Tau2Constant,
472
+ 'Tau2FixedValue': settings.Tau2FixedValue,
473
+ 'FitMode' : settings.fitMode
474
+ }}
475
+
476
+ def retrieveMeasuresForIDs2(measurements,idcol):
477
+ validCols = measurements['validColumns']
478
+ measures = measurements['measures']
479
+
480
+ newcols = {key: np.zeros_like(idcol, dtype = measures._recarray.dtype.fields[key][0]) for key in validCols}
481
+
482
+ for j,id in enumerate(measures['objectID']):
483
+ if not id == 0:
484
+ ind = idcol == id
485
+ for col in newcols.keys():
486
+ if not np.isnan(measures[col][j]):
487
+ newcols[col][ind] = measures[col][j]
488
+
489
+ return newcols
490
+
491
+ def extractEventTimes(datasource, idx = None, useTminTmax = True, return_modes = False):
492
+ d = datasource
493
+ t = d['t']
494
+ if idx is None:
495
+ idx = np.ones_like(t,dtype='bool')
496
+ ti = t[idx]
497
+
498
+ nevents_corrected = None
499
+ # if we have coalesced events use this info
500
+ if ('clumpIndex' in d.keys()) and not ('fitError_x0' in d.keys()): # heuristic to only do on coalesced data
501
+ usingClumpIndex = True
502
+ csz = d['clumpSize'][idx]
503
+ nevents = csz.sum()
504
+ if useTminTmax and ('tmin' in d.keys()) and ('tmax' in d.keys()):
505
+ tmin = d['tmin'][idx]
506
+ tmax = d['tmax'][idx]
507
+ tc = np.arange(tmin[0],tmax[0]+1)
508
+ for i in range(1,tmin.size):
509
+ tc = np.append(tc,np.arange(tmin[i],tmax[i]+1))
510
+ tc.sort()
511
+ usingTminTmax = True
512
+ nevents_corrected = tc.size
513
+ else:
514
+ tc = np.arange(int(ti[0]-csz[0]/2),int(ti[0]+csz[0]/2))
515
+ for i in range(1,ti.size):
516
+ tc = np.append(tc,np.arange(int(t[i]-csz[i]/2),int(t[i]+csz[i]/2)))
517
+ tc.sort()
518
+ usingTminTmax = False
519
+ else:
520
+ tc = ti
521
+ usingTminTmax = False
522
+ usingClumpIndex = False
523
+ nevents = tc.size
524
+
525
+ dts = tc[1:]-tc[0:-1]-1
526
+ dtg = dts[dts>0]
527
+
528
+ if return_modes:
529
+ return (tc, dtg, nevents, nevents_corrected, usingClumpIndex, usingTminTmax)
530
+ else:
531
+ return (tc, dtg, nevents, nevents_corrected)
532
+
533
+
534
+ def fitTaus(x_t, y_h, fitTau2 = False, tau2const = 0.0, return_tau1est = False, tau2max=8.0):
535
+
536
+ # could be refined by subtracting off the histogram for values around 9 frames or so
537
+ # and then ask for reaching 63% off the remaining difference to 1
538
+ idx = (np.abs(y_h - 0.63)).argmin()
539
+ tau1est = x_t[idx]
540
+
541
+ # further possibilities:
542
+ # use tau2 but keep it fixed
543
+ # add bounds on atau1 (between 0..1) and tau2 (0..8)
544
+
545
+ if fitTau2:
546
+ popt,pcov = curve_fit(cumumultiexpfit,x_t,y_h, p0=(tau1est,2.0,0.8),bounds=(0, (np.inf, tau2max, 1.0)))
547
+ (tau1, tau2, atau1) = popt
548
+ (tau1err, tau2err, atau1err) = np.sqrt(np.diag(pcov))
549
+ chisqr = ((y_h - cumumultiexpfit(x_t,*popt))**2).sum()/(x_t.size-1)
550
+ results = [tau1, tau2, atau1, tau1err, tau2err, atau1err, chisqr]
551
+ else:
552
+ if tau2const < 1e-4:
553
+ popt,pcov = curve_fit(cumuexpfit,x_t,y_h, p0=(tau1est))
554
+ (tau1,tau1err) = (popt[0],np.sqrt(pcov[0][0]))
555
+ chisqr = ((y_h - cumuexpfit(x_t,*popt))**2).sum()/(x_t.size-1)
556
+ results = [tau1, tau1err, chisqr]
557
+ else:
558
+ popt,pcov = curve_fit(mkcmexpfit(tau2const),x_t,y_h, p0=(tau1est,0.8),bounds=(0, (np.inf, 1.0)))
559
+ (tau1, atau1) = popt
560
+ (tau1err, atau1err) = np.sqrt(np.diag(pcov))
561
+ (tau2,tau2err) = (tau2const,0)
562
+ chisqr = ((y_h - cumumultiexpfit(x_t, tau1, tau2, atau1))**2).sum()/(x_t.size-1)
563
+ results = [tau1, tau2, atau1, tau1err, tau2err, atau1err, chisqr]
564
+
565
+ if return_tau1est:
566
+ results.append(tau1est)
567
+
568
+ return results
569
+
@@ -0,0 +1,20 @@
1
+ # calculate object volumes for given set of points
2
+ # objects are characterised by ID
3
+ # ID vector must match first dim of points
4
+ import numpy as np
5
+ from scipy.spatial import ConvexHull
6
+
7
+ def objectVolumes(points, ids):
8
+ idi = ids.astype('int')
9
+ volumes = np.zeros_like(idi,dtype='float')
10
+ idu = np.unique(idi)
11
+ for i in range(idu.shape[0]):
12
+ objectid = idu[i]
13
+ thisID = (idi == objectid)
14
+ if thisID.sum() > 2:
15
+ hull = ConvexHull(points[thisID,:])
16
+ volumes[thisID] = hull.volume
17
+ else:
18
+ volumes[thisID] = 0
19
+
20
+ return volumes