PYME-extra 1.0.4.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PYMEcs/Acquire/Actions/__init__.py +0 -0
- PYMEcs/Acquire/Actions/custom.py +167 -0
- PYMEcs/Acquire/Hardware/LPthreadedSimple.py +248 -0
- PYMEcs/Acquire/Hardware/LPthreadedSimpleSim.py +246 -0
- PYMEcs/Acquire/Hardware/NikonTiFlaskServer.py +45 -0
- PYMEcs/Acquire/Hardware/NikonTiFlaskServerT.py +59 -0
- PYMEcs/Acquire/Hardware/NikonTiRESTClient.py +73 -0
- PYMEcs/Acquire/Hardware/NikonTiSim.py +35 -0
- PYMEcs/Acquire/Hardware/__init__.py +0 -0
- PYMEcs/Acquire/Hardware/driftTrackGUI.py +329 -0
- PYMEcs/Acquire/Hardware/driftTrackGUI_n.py +472 -0
- PYMEcs/Acquire/Hardware/driftTracking.py +424 -0
- PYMEcs/Acquire/Hardware/driftTracking_n.py +433 -0
- PYMEcs/Acquire/Hardware/fakeCamX.py +15 -0
- PYMEcs/Acquire/Hardware/offsetPiezoRESTCorrelLog.py +38 -0
- PYMEcs/Acquire/__init__.py +0 -0
- PYMEcs/Analysis/MBMcollection.py +552 -0
- PYMEcs/Analysis/MINFLUX.py +280 -0
- PYMEcs/Analysis/MapUtils.py +77 -0
- PYMEcs/Analysis/NPC.py +1176 -0
- PYMEcs/Analysis/Paraflux.py +218 -0
- PYMEcs/Analysis/Simpler.py +81 -0
- PYMEcs/Analysis/Sofi.py +140 -0
- PYMEcs/Analysis/__init__.py +0 -0
- PYMEcs/Analysis/decSofi.py +211 -0
- PYMEcs/Analysis/eventProperties.py +50 -0
- PYMEcs/Analysis/fitDarkTimes.py +569 -0
- PYMEcs/Analysis/objectVolumes.py +20 -0
- PYMEcs/Analysis/offlineTracker.py +130 -0
- PYMEcs/Analysis/stackTracker.py +180 -0
- PYMEcs/Analysis/timeSeries.py +63 -0
- PYMEcs/Analysis/trackFiducials.py +186 -0
- PYMEcs/Analysis/zerocross.py +91 -0
- PYMEcs/IO/MINFLUX.py +851 -0
- PYMEcs/IO/NPC.py +117 -0
- PYMEcs/IO/__init__.py +0 -0
- PYMEcs/IO/darkTimes.py +19 -0
- PYMEcs/IO/picasso.py +219 -0
- PYMEcs/IO/tabular.py +11 -0
- PYMEcs/__init__.py +0 -0
- PYMEcs/experimental/CalcZfactor.py +51 -0
- PYMEcs/experimental/FRC.py +338 -0
- PYMEcs/experimental/ImageJROItools.py +49 -0
- PYMEcs/experimental/MINFLUX.py +1537 -0
- PYMEcs/experimental/NPCcalcLM.py +560 -0
- PYMEcs/experimental/Simpler.py +369 -0
- PYMEcs/experimental/Sofi.py +78 -0
- PYMEcs/experimental/__init__.py +0 -0
- PYMEcs/experimental/binEventProperty.py +187 -0
- PYMEcs/experimental/chaining.py +23 -0
- PYMEcs/experimental/clusterTrack.py +179 -0
- PYMEcs/experimental/combine_maps.py +104 -0
- PYMEcs/experimental/eventProcessing.py +93 -0
- PYMEcs/experimental/fiducials.py +323 -0
- PYMEcs/experimental/fiducialsNew.py +402 -0
- PYMEcs/experimental/mapTools.py +271 -0
- PYMEcs/experimental/meas2DplotDh5view.py +107 -0
- PYMEcs/experimental/mortensen.py +131 -0
- PYMEcs/experimental/ncsDenoise.py +158 -0
- PYMEcs/experimental/onTimes.py +295 -0
- PYMEcs/experimental/procPoints.py +77 -0
- PYMEcs/experimental/pyme2caml.py +73 -0
- PYMEcs/experimental/qPAINT.py +965 -0
- PYMEcs/experimental/randMap.py +188 -0
- PYMEcs/experimental/regExtraCmaps.py +11 -0
- PYMEcs/experimental/selectROIfilterTable.py +72 -0
- PYMEcs/experimental/showErrs.py +51 -0
- PYMEcs/experimental/showErrsDh5view.py +58 -0
- PYMEcs/experimental/showShiftMap.py +56 -0
- PYMEcs/experimental/snrEvents.py +188 -0
- PYMEcs/experimental/specLabeling.py +51 -0
- PYMEcs/experimental/splitRender.py +246 -0
- PYMEcs/experimental/testChannelByName.py +36 -0
- PYMEcs/experimental/timedSpecies.py +28 -0
- PYMEcs/experimental/utils.py +31 -0
- PYMEcs/misc/ExtraCmaps.py +177 -0
- PYMEcs/misc/__init__.py +0 -0
- PYMEcs/misc/configUtils.py +169 -0
- PYMEcs/misc/guiMsgBoxes.py +27 -0
- PYMEcs/misc/mapUtils.py +230 -0
- PYMEcs/misc/matplotlib.py +136 -0
- PYMEcs/misc/rectsFromSVG.py +182 -0
- PYMEcs/misc/shellutils.py +1110 -0
- PYMEcs/misc/utils.py +205 -0
- PYMEcs/misc/versionCheck.py +20 -0
- PYMEcs/misc/zcInfo.py +90 -0
- PYMEcs/pyme_warnings.py +4 -0
- PYMEcs/recipes/__init__.py +0 -0
- PYMEcs/recipes/base.py +75 -0
- PYMEcs/recipes/localisations.py +2380 -0
- PYMEcs/recipes/manipulate_yaml.py +83 -0
- PYMEcs/recipes/output.py +177 -0
- PYMEcs/recipes/processing.py +247 -0
- PYMEcs/recipes/simpler.py +290 -0
- PYMEcs/version.py +2 -0
- pyme_extra-1.0.4.post0.dist-info/METADATA +114 -0
- pyme_extra-1.0.4.post0.dist-info/RECORD +101 -0
- pyme_extra-1.0.4.post0.dist-info/WHEEL +5 -0
- pyme_extra-1.0.4.post0.dist-info/entry_points.txt +3 -0
- pyme_extra-1.0.4.post0.dist-info/licenses/LICENSE +674 -0
- pyme_extra-1.0.4.post0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1110 @@
|
|
|
1
|
+
from __future__ import print_function # (at top of module)
|
|
2
|
+
from PYME.Analysis.piecewise import piecewiseLinear
|
|
3
|
+
import matplotlib.pylab as plt
|
|
4
|
+
import numpy as np
|
|
5
|
+
import math
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def genFitImage(fitResults,mdh):
|
|
9
|
+
fitMod = __import__('PYME.localization.FitFactories.' + mdh.getEntry('Analysis.FitModule'), fromlist=['PYME', 'localization', 'FitFactories']) #import our fitting module
|
|
10
|
+
if 'genFitImage' in dir(fitMod):
|
|
11
|
+
imf = fitMod.genFitImage(fitResults, mdh).squeeze()
|
|
12
|
+
return imf
|
|
13
|
+
else:
|
|
14
|
+
return None
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def bigClumpStats(pipeline,minsize=15):
|
|
18
|
+
import numpy as np
|
|
19
|
+
import pandas as pd
|
|
20
|
+
|
|
21
|
+
p = pipeline # convenient shortcut
|
|
22
|
+
bgn = 'fitResults_background'
|
|
23
|
+
|
|
24
|
+
ci, idx = np.unique(p['clumpIndex'], return_index=True)
|
|
25
|
+
cisz = p['clumpSize'][idx]
|
|
26
|
+
ci2 = ci[cisz>minsize]
|
|
27
|
+
cisz2=cisz[cisz>minsize]
|
|
28
|
+
|
|
29
|
+
if ci2.size < 1:
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
x = np.zeros_like(ci2,dtype='f')
|
|
33
|
+
sx = np.zeros_like(ci2,dtype='f')
|
|
34
|
+
ex = np.zeros_like(ci2,dtype='f')
|
|
35
|
+
bg = np.zeros_like(ci2,dtype='f')
|
|
36
|
+
np = np.zeros_like(ci2,dtype='f')
|
|
37
|
+
|
|
38
|
+
for j,this_ci in enumerate(ci2):
|
|
39
|
+
this_idx = p['clumpIndex'] == this_ci
|
|
40
|
+
x[j] = p['x'][this_idx].mean()
|
|
41
|
+
sx[j] = p['x'][this_idx].std()
|
|
42
|
+
ex[j] = p['error_x'][this_idx].mean()
|
|
43
|
+
bg[j] = p[bgn][this_idx].mean()
|
|
44
|
+
np[j] = p['nPhotons'][this_idx].mean()
|
|
45
|
+
|
|
46
|
+
epc = p.mdh['Camera.ElectronsPerCount']
|
|
47
|
+
|
|
48
|
+
return pd.DataFrame.from_dict({'x':x, 'x_std':sx, 'error_x':ex, 'background':bg*epc, 'nPhotons':np, 'clumpIndex': ci2})
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def test_fiducial(pipeline,mode='Gaussian',scale=11.0):
|
|
52
|
+
import PYMEcs.Analysis.trackFiducials as tf
|
|
53
|
+
t_f, x_f, y_f, z_f, isFiducial = tf.extractTrajectoriesClump(pipeline)
|
|
54
|
+
avt = tf.AverageTrack(pipeline,[t_f, x_f, y_f, z_f],filter=mode, filterScale=scale)
|
|
55
|
+
return avt
|
|
56
|
+
|
|
57
|
+
def tclump(pipeline):
|
|
58
|
+
p = pipeline
|
|
59
|
+
tc = np.arange(p['tmin'][0],p['tmax'][0]+1)
|
|
60
|
+
for i in range(1,p['t'].shape[0]):
|
|
61
|
+
tc = np.append(tc,np.arange(p['tmin'][i],p['tmax'][i]+1))
|
|
62
|
+
tc.sort()
|
|
63
|
+
return tc
|
|
64
|
+
|
|
65
|
+
def plotserpipeline(t,val,base=0):
|
|
66
|
+
# t is on integer times assumed (from pipeline)
|
|
67
|
+
# val is the corresponding value
|
|
68
|
+
tmin = t.min()
|
|
69
|
+
tmax = t.max()
|
|
70
|
+
tstart = tmin-1
|
|
71
|
+
tend = tmax + 1
|
|
72
|
+
tlen = int(tend-tstart+1)
|
|
73
|
+
|
|
74
|
+
to = t-tstart
|
|
75
|
+
tidx = np.argsort(to)
|
|
76
|
+
tos = to[tidx].astype('I')
|
|
77
|
+
vs = val[tidx]
|
|
78
|
+
|
|
79
|
+
tvalid = np.zeros((tlen))
|
|
80
|
+
tvalid[tos] = 1
|
|
81
|
+
|
|
82
|
+
vals = np.zeros((tlen))
|
|
83
|
+
vals[tos] = vs
|
|
84
|
+
|
|
85
|
+
tup = tos[1:][(tos[1:]-tos[:-1] > 1)]
|
|
86
|
+
tvalid[tup-1] = 1
|
|
87
|
+
vals[tup-1] = base
|
|
88
|
+
|
|
89
|
+
tdown = tos[:-1][(tos[1:]-tos[:-1] > 1)]
|
|
90
|
+
tvalid[tdown+1] = 1
|
|
91
|
+
vals[tdown+1] = base
|
|
92
|
+
|
|
93
|
+
tplot = tvalid.nonzero()[0]
|
|
94
|
+
vplot = vals[tplot]
|
|
95
|
+
|
|
96
|
+
return (tplot+tstart,vplot)
|
|
97
|
+
|
|
98
|
+
# python3 compatible version to grep locals in PYME GUI shells
|
|
99
|
+
# can be used to search through the variable names available in the shell
|
|
100
|
+
def greplocals(expr):
|
|
101
|
+
import inspect
|
|
102
|
+
frame = inspect.currentframe()
|
|
103
|
+
gframe = frame.f_back
|
|
104
|
+
return [(key, value) for key, value in gframe.f_locals.items() if expr in key]
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def showPckgVersions():
|
|
108
|
+
from PYME.ui.progress import get_package_versions
|
|
109
|
+
print(get_package_versions())
|
|
110
|
+
|
|
111
|
+
# convert paste board to UTF-16 little endian
|
|
112
|
+
# this is what pasting into the shell tab appears to require
|
|
113
|
+
def convertpb2utf16le():
|
|
114
|
+
import os
|
|
115
|
+
from sys import platform
|
|
116
|
+
if platform == "darwin":
|
|
117
|
+
os.system("pbpaste | iconv -f ascii -t utf-16le | pbcopy")
|
|
118
|
+
else:
|
|
119
|
+
raise RuntimeError('function only available on mac')
|
|
120
|
+
|
|
121
|
+
def getvar(varname, inmodule = False):
|
|
122
|
+
import inspect
|
|
123
|
+
frame = inspect.currentframe()
|
|
124
|
+
gframe = frame.f_back
|
|
125
|
+
# go up one level further if called within module
|
|
126
|
+
if inmodule:
|
|
127
|
+
gframe = gframe.f_back
|
|
128
|
+
|
|
129
|
+
var = None
|
|
130
|
+
try: # first for dh5view
|
|
131
|
+
var = gframe.f_locals[varname]
|
|
132
|
+
except:
|
|
133
|
+
pass
|
|
134
|
+
return var
|
|
135
|
+
|
|
136
|
+
def getmdh(inmodule = False):
|
|
137
|
+
import inspect
|
|
138
|
+
frame = inspect.currentframe()
|
|
139
|
+
gframe = frame.f_back
|
|
140
|
+
# go up one level further if called within module
|
|
141
|
+
if inmodule:
|
|
142
|
+
gframe = gframe.f_back
|
|
143
|
+
|
|
144
|
+
mp = None
|
|
145
|
+
try: # first for dh5view
|
|
146
|
+
mp = gframe.f_locals['mdv']
|
|
147
|
+
except:
|
|
148
|
+
try: # alternatively VisGui
|
|
149
|
+
mp = gframe.f_locals['mdp']
|
|
150
|
+
except:
|
|
151
|
+
pass
|
|
152
|
+
finally:
|
|
153
|
+
del frame
|
|
154
|
+
|
|
155
|
+
if mp is not None:
|
|
156
|
+
return mp.mdh
|
|
157
|
+
else:
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
from PYME.IO import MetaDataHandler
|
|
161
|
+
def mdhnogui(filename):
|
|
162
|
+
import tables
|
|
163
|
+
h5f = tables.openFile(filename)
|
|
164
|
+
mdh = MetaDataHandler.HDFMDHandler(h5f)
|
|
165
|
+
return {'h5f': h5f, 'mdh': mdh}
|
|
166
|
+
|
|
167
|
+
def _mcheck(mdh,key):
|
|
168
|
+
if key in mdh.keys():
|
|
169
|
+
return mdh[key]
|
|
170
|
+
else:
|
|
171
|
+
return None
|
|
172
|
+
|
|
173
|
+
def _tformat(timeval):
|
|
174
|
+
import time
|
|
175
|
+
if timeval < 946684800: # timestamp for year 2000 as heuristic
|
|
176
|
+
return timeval
|
|
177
|
+
else:
|
|
178
|
+
return "%s (%s)" % (timeval,time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timeval)))
|
|
179
|
+
|
|
180
|
+
def seriestiming(mdh=None):
|
|
181
|
+
if mdh is None:
|
|
182
|
+
mdh = getmdh(inmodule=True)
|
|
183
|
+
tinfo = {}
|
|
184
|
+
if _mcheck(mdh,'StartTime'):
|
|
185
|
+
tinfo['start'] = mdh['StartTime']
|
|
186
|
+
tinfo['end'] = _mcheck(mdh,'EndTime')
|
|
187
|
+
elif _mcheck(mdh,'Source.StartTime'):
|
|
188
|
+
tinfo['start'] = mdh['Source.StartTime']
|
|
189
|
+
tinfo['end'] = _mcheck(mdh,'Source.EndTime')
|
|
190
|
+
else:
|
|
191
|
+
print("no timing info found")
|
|
192
|
+
return
|
|
193
|
+
print("Start\t\t%s" % _tformat(tinfo['start']))
|
|
194
|
+
if tinfo['end']:
|
|
195
|
+
print("End\t\t\t%s" % _tformat(tinfo['end']))
|
|
196
|
+
print("Duration\t%.2f s (%.1f min)" % (tinfo['end']-tinfo['start'],(tinfo['end']-tinfo['start'])/60.0))
|
|
197
|
+
|
|
198
|
+
def getDriftPars(mdh=None):
|
|
199
|
+
if mdh is None:
|
|
200
|
+
mdh = getmdh(inmodule=True)
|
|
201
|
+
try:
|
|
202
|
+
dc = mdh['DriftCorrection']
|
|
203
|
+
except:
|
|
204
|
+
print('could not find DriftCorrection info')
|
|
205
|
+
return None
|
|
206
|
+
else:
|
|
207
|
+
print('found drift correction info')
|
|
208
|
+
|
|
209
|
+
# estimate the number of frames or fall back to default
|
|
210
|
+
try:
|
|
211
|
+
frames = (mdh['Source.EndTime']-mdh['Source.StartTime'])/mdh['Source.Camera.CycleTime']
|
|
212
|
+
except:
|
|
213
|
+
frames = 2e4
|
|
214
|
+
|
|
215
|
+
exec('pars = %s' % dc['Parameters'])
|
|
216
|
+
a0,a1,a2,a3,a4 = [pars[v] for v in ['a0','a1','a2','a3','a4']]
|
|
217
|
+
b0,b1,b2,b3,b4 = [pars[v] for v in ['b0','b1','b2','b3','b4']]
|
|
218
|
+
|
|
219
|
+
t = np.arange(frames)
|
|
220
|
+
x = 0
|
|
221
|
+
y = 0
|
|
222
|
+
exec ('x = %s' % dc['ExprX'])
|
|
223
|
+
exec ('y = %s' % dc['ExprY'])
|
|
224
|
+
|
|
225
|
+
plt.plot(t,x)
|
|
226
|
+
plt.plot(t,y)
|
|
227
|
+
plt.show()
|
|
228
|
+
|
|
229
|
+
return pars
|
|
230
|
+
|
|
231
|
+
def visguiDriftPlot(driftpane):
|
|
232
|
+
parameterNames, indepVarsUsed, xCode, yCode, zCode , parameterNamesZ, varExpandCode, varExpandCodeZ = driftpane.dp.driftCorrFcn
|
|
233
|
+
|
|
234
|
+
indepVars = driftpane.visFr.filter
|
|
235
|
+
#t = np.linspace(indepVars['t'].min(), indepVars['t'].max())
|
|
236
|
+
|
|
237
|
+
x = 0
|
|
238
|
+
y = 0
|
|
239
|
+
|
|
240
|
+
driftx=driftpane.visFr['driftx']
|
|
241
|
+
drifty=driftpane.visFr['drifty']
|
|
242
|
+
x_raw=driftpane.visFr['x_raw']
|
|
243
|
+
y_raw=driftpane.visFr['y_raw']
|
|
244
|
+
t = driftpane.visFr['t']
|
|
245
|
+
|
|
246
|
+
p = [driftpane.dp.driftCorrParams[pn] for pn in parameterNames]
|
|
247
|
+
|
|
248
|
+
exec(varExpandCode)
|
|
249
|
+
|
|
250
|
+
x1 = eval(xCode)
|
|
251
|
+
y1 = eval(yCode)
|
|
252
|
+
|
|
253
|
+
x = driftpane.visFr['x']
|
|
254
|
+
y = driftpane.visFr['y']
|
|
255
|
+
|
|
256
|
+
xs = np.mean(x[0:50])
|
|
257
|
+
ys = np.mean(y[0:50])
|
|
258
|
+
|
|
259
|
+
plt.figure(1)
|
|
260
|
+
plt.clf()
|
|
261
|
+
#plt.plot(t,-x1)
|
|
262
|
+
plt.plot(t,x-xs)
|
|
263
|
+
plt.figure(2)
|
|
264
|
+
plt.clf()
|
|
265
|
+
|
|
266
|
+
#plt.plot(t,-y1)
|
|
267
|
+
plt.plot(t,y-ys)
|
|
268
|
+
plt.show()
|
|
269
|
+
|
|
270
|
+
return (x1,y1,x,y)
|
|
271
|
+
|
|
272
|
+
def findSlide(mdh=None):
|
|
273
|
+
if mdh is None:
|
|
274
|
+
mdh = getmdh(inmodule=True)
|
|
275
|
+
|
|
276
|
+
try:
|
|
277
|
+
slideref = mdh['Source.Sample.SlideRef']
|
|
278
|
+
except:
|
|
279
|
+
return None
|
|
280
|
+
|
|
281
|
+
from PYME.Acquire import sampleInformationDjangoDirect as sampleInformation
|
|
282
|
+
from PYME.SampleDB2.samples import models
|
|
283
|
+
|
|
284
|
+
matches = models.Slide.objects.filter(reference__contains=slideref)
|
|
285
|
+
slide = matches[0]
|
|
286
|
+
return slide
|
|
287
|
+
|
|
288
|
+
# from PYME.SampleDB.samples import models
|
|
289
|
+
# qs3 = models.Slide.objects.filter(reference__contains='22_7_10_C')
|
|
290
|
+
# qs2 = models.Slide.objects.filter(slideID__exact=-1394421344L)
|
|
291
|
+
# sample=qs2[0].sample
|
|
292
|
+
# sample.sampleType
|
|
293
|
+
# sample.species
|
|
294
|
+
|
|
295
|
+
def imagestats():
|
|
296
|
+
import math
|
|
297
|
+
import scipy.ndimage as nd
|
|
298
|
+
image = getvar('image',inmodule=True)
|
|
299
|
+
if image is None:
|
|
300
|
+
print('could not find image')
|
|
301
|
+
return
|
|
302
|
+
do = getvar('do',inmodule=True)
|
|
303
|
+
if do is None:
|
|
304
|
+
print('could not find display object')
|
|
305
|
+
return
|
|
306
|
+
|
|
307
|
+
data = image.data[:,:,do.zp].squeeze()
|
|
308
|
+
dmed = nd.median(data)
|
|
309
|
+
|
|
310
|
+
print("mean:\t\t%f" % data.mean())
|
|
311
|
+
print("variance:\t%f" % data.var())
|
|
312
|
+
print("std dev:\t%f" % data.std())
|
|
313
|
+
print("median:\t\t%f" % dmed)
|
|
314
|
+
print("med-sqrt:\t%f" % math.sqrt(dmed))
|
|
315
|
+
|
|
316
|
+
# generate a default basename
|
|
317
|
+
def defaultbase():
|
|
318
|
+
import os.path
|
|
319
|
+
image = getvar('image',inmodule=True)
|
|
320
|
+
if image is None:
|
|
321
|
+
print('could not find image')
|
|
322
|
+
return
|
|
323
|
+
return os.path.splitext(os.path.basename(image.filename))[0]
|
|
324
|
+
|
|
325
|
+
def saveSelection(fname):
|
|
326
|
+
do = getvar('do',inmodule=True)
|
|
327
|
+
if do is None:
|
|
328
|
+
print('could not find display object')
|
|
329
|
+
return
|
|
330
|
+
lx, ly, hx, hy = do.GetSliceSelection()
|
|
331
|
+
image = getvar('image',inmodule=True)
|
|
332
|
+
if image is None:
|
|
333
|
+
print('could not find image')
|
|
334
|
+
return
|
|
335
|
+
filen = image.filename
|
|
336
|
+
|
|
337
|
+
print('source file %s' % (filen))
|
|
338
|
+
print('selection ', (lx,ly,hx,hy))
|
|
339
|
+
f = open(fname,'w')
|
|
340
|
+
f.write("%s\n" % filen)
|
|
341
|
+
for item in (lx,ly,hx,hy):
|
|
342
|
+
f.write("%d\t" % item)
|
|
343
|
+
f.write("\n")
|
|
344
|
+
f.flush()
|
|
345
|
+
f.close
|
|
346
|
+
|
|
347
|
+
def writecoords(filename,pipeline):
|
|
348
|
+
n = pipeline['x'].shape[0]
|
|
349
|
+
f = open(filename,'w')
|
|
350
|
+
px = pipeline['x']
|
|
351
|
+
py = pipeline['y']
|
|
352
|
+
pt = pipeline['t']
|
|
353
|
+
minx = px.min()
|
|
354
|
+
miny = py.min()
|
|
355
|
+
for i in range(n):
|
|
356
|
+
f.write("%.3f %.3f %d\n" % (px[i]-minx,py[i]-miny,pt[i]))
|
|
357
|
+
f.close()
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def csvcoords(filename,pipeline,keys,fieldnames=None):
|
|
361
|
+
import csv
|
|
362
|
+
if fieldnames is None:
|
|
363
|
+
fieldnames = keys
|
|
364
|
+
with open(filename, 'wb') as csvfile:
|
|
365
|
+
writer = csv.writer(csvfile, delimiter=',',
|
|
366
|
+
quotechar='#', quoting=csv.QUOTE_MINIMAL)
|
|
367
|
+
writer.writerow(fieldnames)
|
|
368
|
+
pkeys = [pipeline[key] for key in keys] # cache the pipelines as these calls may be costly
|
|
369
|
+
n = pipeline['x'].shape[0]
|
|
370
|
+
for i in range(n):
|
|
371
|
+
writer.writerow([pkey[i] for pkey in pkeys])
|
|
372
|
+
|
|
373
|
+
def randmapping(pipeline):
|
|
374
|
+
pipeline.selectedDataSource.setMapping('rand1','0*x+np.random.rand(x.size)')
|
|
375
|
+
|
|
376
|
+
def binSum(binVar, indepVar, bins):
|
|
377
|
+
bm = np.zeros(len(bins) - 1,dtype = indepVar.dtype)
|
|
378
|
+
bs = np.zeros(len(bins) - 1)
|
|
379
|
+
bn = np.zeros(len(bins) - 1, dtype='i')
|
|
380
|
+
|
|
381
|
+
for i, el, er in zip(range(len(bm)), bins[:-1], bins[1:]):
|
|
382
|
+
v = indepVar[(binVar >= el)*(binVar < er)]
|
|
383
|
+
|
|
384
|
+
bn[i] = len(v)
|
|
385
|
+
if bn[i] == 0:
|
|
386
|
+
bm[i] = 0
|
|
387
|
+
bs[i] = 0
|
|
388
|
+
else:
|
|
389
|
+
bm[i] = v.sum()
|
|
390
|
+
bs[i] = v.std()
|
|
391
|
+
|
|
392
|
+
return bn, bm, bs
|
|
393
|
+
|
|
394
|
+
def frc(image):
|
|
395
|
+
from PYME.Analysis import binAvg
|
|
396
|
+
import numpy as np
|
|
397
|
+
import pylab
|
|
398
|
+
|
|
399
|
+
voxelsize = image.voxelsize
|
|
400
|
+
|
|
401
|
+
shape = image.data.shape[0:2]
|
|
402
|
+
hwin = np.sqrt(np.outer(np.hanning(shape[0]),np.hanning(shape[1])))
|
|
403
|
+
|
|
404
|
+
#assume we have exactly 2 channels #FIXME - add a selector
|
|
405
|
+
#grab image data
|
|
406
|
+
imA = hwin * image.data[:,:,:,0].squeeze()
|
|
407
|
+
imB = hwin * image.data[:,:,:,1].squeeze()
|
|
408
|
+
|
|
409
|
+
X, Y = np.mgrid[0:float(imA.shape[0]), 0:float(imA.shape[1])]
|
|
410
|
+
X = X/X.shape[0]
|
|
411
|
+
Y = Y/X.shape[1]
|
|
412
|
+
X = X - .5
|
|
413
|
+
Y = Y - .5
|
|
414
|
+
R = np.sqrt(X**2 + Y**2)
|
|
415
|
+
|
|
416
|
+
H1 = pylab.fft2(imA)
|
|
417
|
+
H2 = pylab.fft2(imB)
|
|
418
|
+
|
|
419
|
+
ringwidth = 1 # in pixels
|
|
420
|
+
rB = np.linspace(0,0.5,0.5*imA.shape[0]/ringwidth)
|
|
421
|
+
|
|
422
|
+
bn, bm, bs = binSum(R, pylab.fftshift(H1*H2.conjugate()), rB)
|
|
423
|
+
|
|
424
|
+
bn1, bm1, bs1 = binSum(R, pylab.fftshift(abs(H1*H1.conjugate())), rB)
|
|
425
|
+
bn2, bm2, bs2 = binSum(R, pylab.fftshift(abs(H2*H2.conjugate())), rB)
|
|
426
|
+
|
|
427
|
+
bmr = np.real(bm)
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
pylab.figure()
|
|
431
|
+
|
|
432
|
+
ax = pylab.gca()
|
|
433
|
+
|
|
434
|
+
freqpnm = rB/voxelsize[0]
|
|
435
|
+
ax.plot(freqpnm[:-1], bmr/np.sqrt(bm1*bm2))
|
|
436
|
+
ax.plot(freqpnm[:-1], 2./np.sqrt(bn/2))
|
|
437
|
+
ax.plot(freqpnm[:-1], 0*bmr + 1.0/7)
|
|
438
|
+
ax.plot(freqpnm[:-1], 0*bmr, '--')
|
|
439
|
+
|
|
440
|
+
xt = np.array([10., 15, 20, 30, 50, 80, 100, 150])
|
|
441
|
+
rt = 1.0/xt
|
|
442
|
+
|
|
443
|
+
pylab.xticks(rt[::-1],['%d' % xi for xi in xt[::-1]])
|
|
444
|
+
|
|
445
|
+
pylab.show()
|
|
446
|
+
|
|
447
|
+
return H1, H2, R, bmr/np.sqrt(bm1*bm2), bn, bm, bm1, bm2, rB
|
|
448
|
+
|
|
449
|
+
def abscorrel(a,b):
|
|
450
|
+
from scipy.fftpack import fftn, ifftn
|
|
451
|
+
from pylab import fftshift, ifftshift
|
|
452
|
+
import numpy as np
|
|
453
|
+
|
|
454
|
+
F0 = fftn(a)
|
|
455
|
+
Fi = ifftn(b)
|
|
456
|
+
corr = abs(fftshift(ifftn(F0*Fi)))
|
|
457
|
+
|
|
458
|
+
return corr
|
|
459
|
+
|
|
460
|
+
def cent2d(im,usefrac=0.25):
|
|
461
|
+
im -= im.min()
|
|
462
|
+
|
|
463
|
+
im = np.maximum(im - im.max()*(1.0-usefrac), 0)
|
|
464
|
+
|
|
465
|
+
xi, yi = np.where(im)
|
|
466
|
+
|
|
467
|
+
im_s = im[im>0]
|
|
468
|
+
im_s/= im_s.sum()
|
|
469
|
+
|
|
470
|
+
dxi = ((xi*im_s).sum() - im.shape[0]/2.)
|
|
471
|
+
dyi = ((yi*im_s).sum() - im.shape[1]/2.)
|
|
472
|
+
|
|
473
|
+
return [dxi,dyi]
|
|
474
|
+
|
|
475
|
+
def trackser(ref, series, frange=None, usefrac=0.25):
|
|
476
|
+
if frange is None:
|
|
477
|
+
frange = range(series.shape[2])
|
|
478
|
+
nframes = len(frange)
|
|
479
|
+
|
|
480
|
+
dx = np.zeros(nframes)
|
|
481
|
+
dy = np.zeros(nframes)
|
|
482
|
+
for i in range(nframes):
|
|
483
|
+
corr = abscorrel(ref,series[:,:,frange[i]].squeeze())
|
|
484
|
+
dxi, dyi = cent2d(corr,usefrac=usefrac)
|
|
485
|
+
dx[i] = dxi
|
|
486
|
+
dy[i] = dyi
|
|
487
|
+
|
|
488
|
+
return [dx,dy]
|
|
489
|
+
|
|
490
|
+
import matplotlib.pyplot as plt
|
|
491
|
+
# turns out the below is mostly taken from https://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
|
|
492
|
+
def savitzky_golay(y, window_size, order, deriv=0):
|
|
493
|
+
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
|
|
494
|
+
The Savitzky-Golay filter removes high frequency noise from data.
|
|
495
|
+
It has the advantage of preserving the original shape and
|
|
496
|
+
features of the signal better than other types of filtering
|
|
497
|
+
approaches, such as moving averages techhniques.
|
|
498
|
+
|
|
499
|
+
This code has been taken from http://www.scipy.org/Cookbook/SavitzkyGolay
|
|
500
|
+
Parameters
|
|
501
|
+
----------
|
|
502
|
+
y : array_like, shape (N,)
|
|
503
|
+
the values of the time history of the signal.
|
|
504
|
+
window_size : int
|
|
505
|
+
the length of the window. Must be an odd integer number.
|
|
506
|
+
order : int
|
|
507
|
+
the order of the polynomial used in the filtering.
|
|
508
|
+
Must be less then `window_size` - 1.
|
|
509
|
+
deriv: int
|
|
510
|
+
the order of the derivative to compute (default = 0 means only smoothing)
|
|
511
|
+
Returns
|
|
512
|
+
-------
|
|
513
|
+
ys : ndarray, shape (N)
|
|
514
|
+
the smoothed signal (or it's n-th derivative).
|
|
515
|
+
Notes
|
|
516
|
+
-----
|
|
517
|
+
The Savitzky-Golay is a type of low-pass filter, particularly
|
|
518
|
+
suited for smoothing noisy data. The main idea behind this
|
|
519
|
+
approach is to make for each point a least-square fit with a
|
|
520
|
+
polynomial of high order over a odd-sized window centered at
|
|
521
|
+
the point.
|
|
522
|
+
Examples
|
|
523
|
+
--------
|
|
524
|
+
t = np.linspace(-4, 4, 500)
|
|
525
|
+
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
|
|
526
|
+
ysg = savitzky_golay(y, window_size=31, order=4)
|
|
527
|
+
import matplotlib.pyplot as plt
|
|
528
|
+
plt.plot(t, y, label='Noisy signal')
|
|
529
|
+
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
|
|
530
|
+
plt.plot(t, ysg, 'r', label='Filtered signal')
|
|
531
|
+
plt.legend()
|
|
532
|
+
plt.savefig('images/golay.png')
|
|
533
|
+
#plt.show()
|
|
534
|
+
References
|
|
535
|
+
----------
|
|
536
|
+
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
|
|
537
|
+
Data by Simplified Least Squares Procedures. Analytical
|
|
538
|
+
Chemistry, 1964, 36 (8), pp 1627-1639.
|
|
539
|
+
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
|
|
540
|
+
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
|
|
541
|
+
Cambridge University Press ISBN-13: 9780521880688
|
|
542
|
+
"""
|
|
543
|
+
try:
|
|
544
|
+
window_size = np.abs(np.int(window_size))
|
|
545
|
+
order = np.abs(np.int(order))
|
|
546
|
+
except ValueError as msg:
|
|
547
|
+
raise ValueError("window_size and order have to be of type int")
|
|
548
|
+
if window_size % 2 != 1 or window_size < 1:
|
|
549
|
+
raise TypeError("window_size size must be a positive odd number")
|
|
550
|
+
if window_size < order + 2:
|
|
551
|
+
raise TypeError("window_size is too small for the polynomials order")
|
|
552
|
+
order_range = range(order+1)
|
|
553
|
+
half_window = (window_size -1) // 2
|
|
554
|
+
# precompute coefficients
|
|
555
|
+
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
|
|
556
|
+
m = np.linalg.pinv(b).A[deriv]
|
|
557
|
+
# pad the signal at the extremes with
|
|
558
|
+
# values taken from the signal itself
|
|
559
|
+
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
|
|
560
|
+
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
|
|
561
|
+
y = np.concatenate((firstvals, y, lastvals))
|
|
562
|
+
return np.convolve( m, y, mode='valid')
|
|
563
|
+
|
|
564
|
+
from PYME.Analysis.BleachProfile import kinModels
|
|
565
|
+
def gphotons(pipeline):
|
|
566
|
+
colourFilter = pipeline.colourFilter
|
|
567
|
+
metadata = pipeline.mdh
|
|
568
|
+
chans = colourFilter.getColourChans()
|
|
569
|
+
channame = ''
|
|
570
|
+
if len(chans) == 0:
|
|
571
|
+
nph = kinModels.getPhotonNums(colourFilter, metadata)
|
|
572
|
+
merr = colourFilter['error_x']
|
|
573
|
+
return [channame, nph.mean(), merr.mean()]
|
|
574
|
+
ret = []
|
|
575
|
+
curcol = colourFilter.currentColour
|
|
576
|
+
for chan in chans:
|
|
577
|
+
channame = pipeline.fluorSpeciesDyes[chan]
|
|
578
|
+
colourFilter.setColour(chan)
|
|
579
|
+
nph = kinModels.getPhotonNums(colourFilter, metadata)
|
|
580
|
+
merr = colourFilter['error_x']
|
|
581
|
+
ret.append([channame,nph.mean(),merr.mean()])
|
|
582
|
+
|
|
583
|
+
colourFilter.setColour(curcol)
|
|
584
|
+
return ret
|
|
585
|
+
|
|
586
|
+
import PYME.Analysis.BleachProfile.kinModels as km
|
|
587
|
+
def plotphotons(pipeline,color='red'):
|
|
588
|
+
nph = km.getPhotonNums(pipeline.colourFilter,pipeline.mdh)
|
|
589
|
+
ph_range = 6*nph.mean()
|
|
590
|
+
n, bins = np.histogram(nph, np.linspace(0, ph_range, 100))
|
|
591
|
+
plt.bar(bins[:-1], n, width=bins[1]-bins[0], alpha=0.4, color=color)
|
|
592
|
+
return nph
|
|
593
|
+
|
|
594
|
+
def photonconvert(data,mdh=None):
|
|
595
|
+
if mdh is None:
|
|
596
|
+
mdh = getmdh(inmodule=True)
|
|
597
|
+
return (data-mdh['Camera.ADOffset'])*mdh['Camera.ElectronsPerCount']/mdh['Camera.TrueEMGain']
|
|
598
|
+
|
|
599
|
+
def gmesig(sig,N,Nb,voxelsize):
|
|
600
|
+
siga = np.sqrt(sig*sig+voxelsize*voxelsize/12.0)
|
|
601
|
+
return siga*siga/N*(16.0/9+8*math.pi*siga*siga*Nb/(N*voxelsize*voxelsize))
|
|
602
|
+
|
|
603
|
+
def gmestd(sig,N,Nb,voxelsize,mdh=None):
|
|
604
|
+
return np.sqrt(gmesig(sig,N,Nb,voxelsize))
|
|
605
|
+
|
|
606
|
+
# histogram with binwidth guaranteed to be one
|
|
607
|
+
def histone(data,binwidth=1):
|
|
608
|
+
d=data.squeeze()
|
|
609
|
+
plt.hist(d, bins=range(int(min(d)), int(max(d)) + binwidth, binwidth))
|
|
610
|
+
|
|
611
|
+
# this routine is designed for correlative tracking using the first (potentially averaged) image
|
|
612
|
+
# as reference and then determines displacement of later images aginst that one
|
|
613
|
+
import scipy.ndimage
|
|
614
|
+
from numpy.fft import *
|
|
615
|
+
def correltrack(data,start=0,avgover=10,pixelsize=70.0,centersize=7,centroidfrac=1.5):
|
|
616
|
+
cs = centersize
|
|
617
|
+
shp = [d for d in data.shape if d > 1]
|
|
618
|
+
nsteps = long((shp[2]-start)/avgover)
|
|
619
|
+
shh = (shp[0]/2,shp[1]/2)
|
|
620
|
+
xctw=np.zeros((2*centersize+1,2*centersize+1,nsteps))
|
|
621
|
+
shifts = []
|
|
622
|
+
i1 = data[:,:,start:start+avgover].squeeze().mean(axis=2)
|
|
623
|
+
I1 = fftn(i1)
|
|
624
|
+
for i in range(nsteps):
|
|
625
|
+
xc = abs(ifftshift(ifftn(I1*ifftn(data[:,:,start+i*avgover:start+(i+1)*avgover].squeeze().mean(axis=2)))))
|
|
626
|
+
xct = xc-xc.min()
|
|
627
|
+
xct = (xct-xct.max()/centroidfrac)*(xct > xct.max()/centroidfrac)
|
|
628
|
+
xctw[:,:,i] = xct[shh[0]-cs:shh[0]+cs+1,shh[1]-cs:shh[1]+cs+1]
|
|
629
|
+
shifts.append(scipy.ndimage.measurements.center_of_mass(xctw[:,:,i]))
|
|
630
|
+
|
|
631
|
+
sh = np.array(shifts)
|
|
632
|
+
t = start + np.arange(nsteps)*avgover
|
|
633
|
+
sh = pixelsize*(sh-sh[0])
|
|
634
|
+
return t, sh, xctw
|
|
635
|
+
|
|
636
|
+
# we ignore centroidfrac by default
|
|
637
|
+
def correltrack2(data,start=0,avgover=10,pixelsize=70.0,centersize=15,centroidfac=0.6,roi=[0,None,0,None]):
|
|
638
|
+
cs = centersize
|
|
639
|
+
shp = [d for d in data.shape if d > 1]
|
|
640
|
+
nsteps = long((shp[2]-start)/avgover)
|
|
641
|
+
xctw=np.zeros((2*centersize+1,2*centersize+1,nsteps))
|
|
642
|
+
shifts = []
|
|
643
|
+
if avgover > 1:
|
|
644
|
+
ref = data[:,:,start:start+avgover].squeeze().mean(axis=2)
|
|
645
|
+
else:
|
|
646
|
+
ref = data[:,:,start].squeeze()
|
|
647
|
+
ref = ref[roi[0]:roi[3],roi[1]:roi[3]]
|
|
648
|
+
refn = ref/ref.mean() - 1
|
|
649
|
+
Frefn = fftn(refn)
|
|
650
|
+
shh = (ref.shape[0]/2,ref.shape[1]/2)
|
|
651
|
+
|
|
652
|
+
for i in range(nsteps):
|
|
653
|
+
comp = data[:,:,start+i*avgover:start+(i+1)*avgover].squeeze()
|
|
654
|
+
if len(comp.shape) > 2:
|
|
655
|
+
comp = comp.mean(axis=2)
|
|
656
|
+
comp = comp[roi[0]:roi[3],roi[1]:roi[3]]
|
|
657
|
+
compn = comp/comp.mean() - 1
|
|
658
|
+
xc = ifftshift(np.abs(ifftn(Frefn*ifftn(compn))))
|
|
659
|
+
xcm = xc.max()
|
|
660
|
+
xcp = np.maximum(xc - centroidfac*xcm, 0)
|
|
661
|
+
xctw[:,:,i] = xcp[shh[0]-cs:shh[0]+cs+1,shh[1]-cs:shh[1]+cs+1]
|
|
662
|
+
shifts.append(scipy.ndimage.measurements.center_of_mass(xctw[:,:,i]))
|
|
663
|
+
|
|
664
|
+
sh = np.array(shifts)
|
|
665
|
+
t = start + np.arange(nsteps)*avgover
|
|
666
|
+
sh = pixelsize*(sh-sh[0])
|
|
667
|
+
return t, sh, xctw
|
|
668
|
+
|
|
669
|
+
def meanvards(dataSource, start=0, end=-1):
|
|
670
|
+
|
|
671
|
+
nslices = dataSource.getNumSlices()
|
|
672
|
+
if end < 0:
|
|
673
|
+
end = nslices + end
|
|
674
|
+
|
|
675
|
+
nframes = end - start
|
|
676
|
+
xSize, ySize = dataSource.getSliceShape()
|
|
677
|
+
|
|
678
|
+
m = np.zeros((xSize,ySize),dtype='float64')
|
|
679
|
+
for frameN in range(start,end):
|
|
680
|
+
m += dataSource.getSlice(frameN)
|
|
681
|
+
m = m / nframes
|
|
682
|
+
|
|
683
|
+
v = np.zeros((xSize,ySize),dtype='float64')
|
|
684
|
+
for frameN in range(start,end):
|
|
685
|
+
v += (dataSource.getSlice(frameN)-m)**2
|
|
686
|
+
v = v / (nframes-1)
|
|
687
|
+
|
|
688
|
+
return (m,v)
|
|
689
|
+
|
|
690
|
+
def darkCal(dataSource, integrationTimes,transitionTimes):
|
|
691
|
+
ms = []
|
|
692
|
+
vs = []
|
|
693
|
+
endTimes = transitionTimes[1:]+[-1]
|
|
694
|
+
for istart, istop in zip(transitionTimes, endTimes):
|
|
695
|
+
print("starting at %d, using %d frames..." % (istart,istop-istart))
|
|
696
|
+
m, v = meanvards(dataSource,istart,istop)
|
|
697
|
+
ms.append(m)
|
|
698
|
+
vs.append(v)
|
|
699
|
+
return (ms,vs)
|
|
700
|
+
|
|
701
|
+
def darkCalfromMetadata(dataSource,mdh=None):
|
|
702
|
+
if mdh is None:
|
|
703
|
+
mdh = getmdh(inmodule=True)
|
|
704
|
+
it,tt = (mdh['Protocol.IntegrationTimes'],mdh['Protocol.Transitions'])
|
|
705
|
+
ms, vs = darkCal(dataSource,it,tt)
|
|
706
|
+
return (ms,vs,it)
|
|
707
|
+
|
|
708
|
+
from scipy import stats
|
|
709
|
+
def isnparray(a):
|
|
710
|
+
return type(a).__module__ == np.__name__
|
|
711
|
+
|
|
712
|
+
def dcfit(ms,integrationTimes):
|
|
713
|
+
import sys
|
|
714
|
+
if not isnparray(ms):
|
|
715
|
+
ofs = np.dstack(ms) # offsets
|
|
716
|
+
else:
|
|
717
|
+
ofs = ms
|
|
718
|
+
itimes = np.asarray(integrationTimes)
|
|
719
|
+
|
|
720
|
+
sz = ofs.shape
|
|
721
|
+
sz2d = sz[0:2]
|
|
722
|
+
def z2d():
|
|
723
|
+
return np.zeros(sz2d,dtype = 'float32')
|
|
724
|
+
dc = z2d()
|
|
725
|
+
offs = z2d()
|
|
726
|
+
r_value = z2d()
|
|
727
|
+
p_value = z2d()
|
|
728
|
+
std_err = z2d()
|
|
729
|
+
for x in range(sz[0]):
|
|
730
|
+
print("line %d" % (x) + '\r')
|
|
731
|
+
sys.stdout.flush()
|
|
732
|
+
for y in range(sz[1]):
|
|
733
|
+
dc[x,y], offs[x,y], r_value[x,y], p_value[x,y], std_err[x,y] = \
|
|
734
|
+
stats.linregress(itimes,ofs[x,y,:])
|
|
735
|
+
|
|
736
|
+
return (dc,offs,r_value,p_value,std_err)
|
|
737
|
+
|
|
738
|
+
def subsampidx(arraylen, percentage=10):
|
|
739
|
+
newlen = percentage*1e-2*arraylen
|
|
740
|
+
idx = np.random.choice(arraylen,newlen)
|
|
741
|
+
return idx
|
|
742
|
+
|
|
743
|
+
from scipy.stats import gaussian_kde
|
|
744
|
+
def scatterdens(x,y,subsample=1.0, s=10, xlabel=None, ylabel=None, **kwargs):
|
|
745
|
+
xf = x.flatten()
|
|
746
|
+
yf = y.flatten()
|
|
747
|
+
if subsample < 1.0:
|
|
748
|
+
idx = subsampidx(xf.size,percentage = 100*subsample)
|
|
749
|
+
xs = xf[idx]
|
|
750
|
+
ys = yf[idx]
|
|
751
|
+
else:
|
|
752
|
+
xs = xf
|
|
753
|
+
ys = yf
|
|
754
|
+
|
|
755
|
+
estimator = gaussian_kde([xs,ys])
|
|
756
|
+
density = estimator.evaluate([xf,yf])
|
|
757
|
+
print("density min, max: %f, %f" % (density.min(), density.max()))
|
|
758
|
+
plt.scatter(xf,yf,c=density,marker='o',s=s,**kwargs)
|
|
759
|
+
if xlabel is not None:
|
|
760
|
+
plt.xlabel(xlabel)
|
|
761
|
+
if ylabel is not None:
|
|
762
|
+
plt.ylabel(ylabel)
|
|
763
|
+
|
|
764
|
+
return estimator
|
|
765
|
+
|
|
766
|
+
def multicolcheck(pipeline,subsample=0.03,dA=20,xrange=[-1000,3000],yrange=[-1000,6000]):
|
|
767
|
+
p = pipeline
|
|
768
|
+
plt.figure()
|
|
769
|
+
plt.subplot(1, 2, 1)
|
|
770
|
+
estimator = scatterdens(p['fitResults_Ag'],p['fitResults_Ar'],subsample=subsample,s=10)
|
|
771
|
+
plt.xlim(xrange)
|
|
772
|
+
plt.ylim(yrange)
|
|
773
|
+
|
|
774
|
+
x1d = np.arange(xrange[0],xrange[1],dA)
|
|
775
|
+
y1d = np.arange(yrange[0],yrange[1],dA)
|
|
776
|
+
x2d = x1d[:,None] * np.ones_like(y1d)[None,:]
|
|
777
|
+
y2d = np.ones_like(x1d)[:,None] * y1d[None,:]
|
|
778
|
+
|
|
779
|
+
imd = estimator.evaluate([x2d.flatten(),y2d.flatten()])
|
|
780
|
+
imd2d = imd.reshape(x2d.shape)
|
|
781
|
+
imd2d /= imd2d.max()
|
|
782
|
+
|
|
783
|
+
#plt.figure()
|
|
784
|
+
plt.subplot(1, 2, 2)
|
|
785
|
+
plt.imshow(imd2d[:,::-1].transpose(),cmap=plt.get_cmap('jet'),extent=[xrange[0],xrange[1],yrange[0],yrange[1]])
|
|
786
|
+
plt.grid(True)
|
|
787
|
+
|
|
788
|
+
return imd2d
|
|
789
|
+
|
|
790
|
+
def intdens(image,framenum=0):
|
|
791
|
+
mdh = image.mdh
|
|
792
|
+
pixarea = 1e6*mdh['voxelsize.x']*mdh['voxelsize.y']
|
|
793
|
+
data = image.data[:,:,framenum].squeeze()
|
|
794
|
+
|
|
795
|
+
intdens = float(pixarea*data.sum())
|
|
796
|
+
nevts = None
|
|
797
|
+
try:
|
|
798
|
+
nevts = int(mdh['Rendering.NEventsRendered'])
|
|
799
|
+
except:
|
|
800
|
+
pass
|
|
801
|
+
if nevts is not None:
|
|
802
|
+
print("Ratio Events/Intdens = %f" % (nevts/intdens))
|
|
803
|
+
return intdens
|
|
804
|
+
|
|
805
|
+
def px(p):
|
|
806
|
+
t = p['t']*p.mdh['Camera.CycleTime']
|
|
807
|
+
x = p['x']-p['x'][0:10].mean()
|
|
808
|
+
plt.plot(t,x)
|
|
809
|
+
|
|
810
|
+
def py(p):
|
|
811
|
+
t = p['t']*p.mdh['Camera.CycleTime']
|
|
812
|
+
y = p['y']-p['y'][0:10].mean()
|
|
813
|
+
plt.plot(t,y)
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
def cumuexpfit(t,tau):
|
|
817
|
+
return 1-np.exp(-t/tau)
|
|
818
|
+
|
|
819
|
+
from scipy.optimize import curve_fit
|
|
820
|
+
def darktimes(pipeline, mdh=None, plot=True, report=True):
|
|
821
|
+
if mdh is None:
|
|
822
|
+
mdh = getmdh(inmodule=True)
|
|
823
|
+
t = pipeline['t']
|
|
824
|
+
x = pipeline['x']
|
|
825
|
+
y = pipeline['y']
|
|
826
|
+
# determine darktime from gaps and reject zeros (no real gaps)
|
|
827
|
+
dts = t[1:]-t[0:-1]-1
|
|
828
|
+
dtg = dts[dts>0]
|
|
829
|
+
nts = dtg.shape[0]
|
|
830
|
+
# now make a cumulative histogram from these
|
|
831
|
+
cumux = np.sort(dtg+0.01*np.random.random(nts)) # hack: adding random noise helps us ensure uniqueness of x values
|
|
832
|
+
cumuy = (1.0+np.arange(nts))/np.float(nts)
|
|
833
|
+
bbx = (x.min(),x.max())
|
|
834
|
+
bby = (y.min(),y.max())
|
|
835
|
+
voxx = 1e3*mdh['voxelsize.x']
|
|
836
|
+
voxy = 1e3*mdh['voxelsize.y']
|
|
837
|
+
bbszx = bbx[1]-bbx[0]
|
|
838
|
+
bbszy = bby[1]-bby[0]
|
|
839
|
+
maxtd = dtg.max()
|
|
840
|
+
binedges = np.arange(0,maxtd,5)
|
|
841
|
+
binctrs = 0.5*(binedges[0:-1]+binedges[1:])
|
|
842
|
+
h,be2 = np.histogram(dtg,bins=binedges)
|
|
843
|
+
hc = np.cumsum(h)
|
|
844
|
+
hcg = hc[h>0]/float(nts) # only nonzero bins and normalise
|
|
845
|
+
binctrsg = binctrs[h>0]
|
|
846
|
+
popth,pcovh = curve_fit(cumuexpfit,binctrsg,hcg, p0=(300.0))
|
|
847
|
+
popt,pcov = curve_fit(cumuexpfit,cumux,cumuy, p0=(300.0))
|
|
848
|
+
if plot:
|
|
849
|
+
plt.subplot(211)
|
|
850
|
+
plt.plot(cumux,cumuy,'o')
|
|
851
|
+
plt.plot(cumux,cumuexpfit(cumux,popt[0]))
|
|
852
|
+
plt.plot(binctrs,hc/float(nts),'o')
|
|
853
|
+
plt.plot(binctrs,cumuexpfit(binctrs,popth[0]))
|
|
854
|
+
plt.ylim(-0.2,1.2)
|
|
855
|
+
plt.subplot(212)
|
|
856
|
+
plt.semilogx(cumux,cumuy,'o')
|
|
857
|
+
plt.semilogx(cumux,cumuexpfit(cumux,popt[0]))
|
|
858
|
+
plt.semilogx(binctrs,hc/float(nts),'o')
|
|
859
|
+
plt.semilogx(binctrs,cumuexpfit(binctrs,popth[0]))
|
|
860
|
+
plt.ylim(-0.2,1.2)
|
|
861
|
+
plt.show()
|
|
862
|
+
if report:
|
|
863
|
+
print("events: %d" % t.shape[0])
|
|
864
|
+
print("dark times: %d" % nts)
|
|
865
|
+
print("region: %d x %d nm (%d x %d pixel)" % (bbszx,bbszy,bbszx/voxx,bbszy/voxy))
|
|
866
|
+
print("centered at %d,%d (%d,%d pixels)" % (x.mean(),y.mean(),x.mean()/voxx,y.mean()/voxy))
|
|
867
|
+
print("darktime: %.1f (%.1f) frames" % (popt[0],popth[0]))
|
|
868
|
+
print("qunits: %.2f" % (200/(popt[0]+popth[0])))
|
|
869
|
+
|
|
870
|
+
return (cumux,cumuy,popt[0],pcov)
|
|
871
|
+
|
|
872
|
+
def darktimehist(ton):
|
|
873
|
+
# determine darktime from gaps and reject zeros (no real gaps)
|
|
874
|
+
dts = ton[1:]-ton[0:-1]-1
|
|
875
|
+
dtg = dts[dts>0]
|
|
876
|
+
nts = dtg.shape[0]
|
|
877
|
+
# now make a cumulative histogram from these
|
|
878
|
+
cumux = np.sort(dtg+0.01*np.random.random(nts)) # hack: adding random noise helps us ensure uniqueness of x values
|
|
879
|
+
cumuy = (1.0+np.arange(nts))/np.float(nts)
|
|
880
|
+
popt,pcov = curve_fit(cumuexpfit,cumux,cumuy, p0=(300.0))
|
|
881
|
+
|
|
882
|
+
return (cumux,cumuy,cumuexpfit(cumux,popt[0]),popt[0])
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
def analyze1dSeries(series,chunklength=500):
|
|
886
|
+
offset = series.mean()
|
|
887
|
+
chunks = int(len(series)/chunklength)
|
|
888
|
+
chunkmaxs = np.array([max(series[chunk*chunklength:(chunk+1)*chunklength]) for chunk in range(chunks)])
|
|
889
|
+
peakaverage = chunkmaxs.mean()
|
|
890
|
+
offset = series[series < (offset+0.5*(peakaverage-offset))].mean()
|
|
891
|
+
return (offset,peakaverage)
|
|
892
|
+
|
|
893
|
+
def datafrompipeline(datasource,pipeline, ctr, boxsize = 7):
|
|
894
|
+
tser = np.arange(min(datasource.shape[2],pipeline['t'].max()))
|
|
895
|
+
bszh = int(boxsize/2)
|
|
896
|
+
rawser = np.zeros((2*bszh+1,2*bszh+1,tser.shape[0]))
|
|
897
|
+
for t in range(len(tser)):
|
|
898
|
+
ctrx = ctr[0,t]
|
|
899
|
+
ctry = ctr[1,t]
|
|
900
|
+
rawser[:,:,t] = datasource[int(ctrx)-bszh:int(ctrx)+bszh+1,int(ctry)-bszh:int(ctry)+bszh+1,t].squeeze()
|
|
901
|
+
return (tser, rawser)
|
|
902
|
+
|
|
903
|
+
try:
|
|
904
|
+
from StringIO import StringIO ## for Python 2
|
|
905
|
+
except ImportError:
|
|
906
|
+
from io import StringIO ## for Python 3
|
|
907
|
+
|
|
908
|
+
import sys
|
|
909
|
+
def darkAnalysisRawPlusPipeline(datasource, pipeline, driftPane=None, boxsize = 7, doplot = True,
|
|
910
|
+
threshfactor=0.45, mdh=None, debug=1):
|
|
911
|
+
xp = pipeline['x'] # in new code use 'x_raw' and 'y_raw'!
|
|
912
|
+
yp = pipeline['y']
|
|
913
|
+
if mdh is None: # there may be other ways to get at the mdh, e.g. via pipeline?
|
|
914
|
+
mdh = pipeline.mdh
|
|
915
|
+
xpix = 1e3*mdh['voxelsize.x']
|
|
916
|
+
ypix = 1e3*mdh['voxelsize.y']
|
|
917
|
+
|
|
918
|
+
# we need a new strategy for the pixel center selection
|
|
919
|
+
# and inclusion of drift
|
|
920
|
+
# strategy:
|
|
921
|
+
# 1. if we have filterkeys x and y (look up where to find these!) use the center of that ROI
|
|
922
|
+
# 2. if we have a drift time course calculate a centerpix(t), i.e. centerpix as a function of x
|
|
923
|
+
|
|
924
|
+
# for 1: use pipeline.filterKeys['x'] and pipeline.filterKeys['y']
|
|
925
|
+
# for 2: for a given xctr and yctr find the x_raw and y_raw; question: how to do that?
|
|
926
|
+
# for 2: we will have to get timecourse of shift as (1) x = x_raw + dx(t)
|
|
927
|
+
# for 2: if we manage to get (1) we will get (2) xctr_raw(t) = xctr-dx(t)
|
|
928
|
+
|
|
929
|
+
# for 2: (2) needs texting with bead sample
|
|
930
|
+
|
|
931
|
+
# for 2: once we have xctr_raw(t), yctr_raw(t) we need to modify datafrompipeline
|
|
932
|
+
# for 2: make datafrompipeline so that it accepts ctr(t) = [xctr(t),yctr(t)]!!
|
|
933
|
+
|
|
934
|
+
try:
|
|
935
|
+
bbox = [pipeline.filterkeys['x'][0],pipeline.filterkeys['x'][1],pipeline.filterkeys['y'][0],pipeline.filterkeys['y'][1]]
|
|
936
|
+
except:
|
|
937
|
+
bbox = [xp.min(),xp.max(),yp.min(),yp.max()]
|
|
938
|
+
|
|
939
|
+
bboxpix = [bbox[0]/xpix, bbox[1]/xpix, bbox[2]/ypix, bbox[3]/ypix] # only for diagnosis
|
|
940
|
+
bbctr = 0.5*np.array([bbox[0]+bbox[1],bbox[2]+bbox[3]])
|
|
941
|
+
t = np.arange(0,pipeline['t'].max())
|
|
942
|
+
|
|
943
|
+
if driftPane is None:
|
|
944
|
+
bbctrt = bbctr[:,None]*(np.ones((t.shape))[None,:])
|
|
945
|
+
else:
|
|
946
|
+
dx,dy,tt = getdriftcurves(driftPane,pipeline,t) # this should now return the desired times in all cases
|
|
947
|
+
bbctrt = np.zeros((2,t.shape[0]))
|
|
948
|
+
bbctrt[0,:] = bbctr[0]-dx
|
|
949
|
+
bbctrt[1,:] = bbctr[1]-dy
|
|
950
|
+
|
|
951
|
+
|
|
952
|
+
ctrpix = np.rint(bbctrt / np.array(xpix,ypix))
|
|
953
|
+
|
|
954
|
+
if debug:
|
|
955
|
+
print('BBox (nm): ',bbox)
|
|
956
|
+
print('BBox (pix): ',bboxpix)
|
|
957
|
+
print('Ctr (pix): ',ctrpix[:,0])
|
|
958
|
+
sys.stdout.flush()
|
|
959
|
+
|
|
960
|
+
# return (bbox, bbctrt,ctrpix,t)
|
|
961
|
+
|
|
962
|
+
print('extracting region from data...')
|
|
963
|
+
sys.stdout.flush()
|
|
964
|
+
tser, rawser = datafrompipeline(datasource,pipeline,ctrpix,boxsize = boxsize)
|
|
965
|
+
|
|
966
|
+
print('analyzing data...')
|
|
967
|
+
sys.stdout.flush()
|
|
968
|
+
tevts = pipeline['t'].copy()
|
|
969
|
+
rawm, peakav, fitev, fitr, rawthresh = analyzeDataPlusEvents(tser, rawser, tevts, doplot = doplot,
|
|
970
|
+
threshfactor=threshfactor, debug=debug)
|
|
971
|
+
|
|
972
|
+
return (tser, rawser, rawm, peakav, tevts, fitev, fitr, rawthresh)
|
|
973
|
+
|
|
974
|
+
def analyzeDataPlusEvents(tser, rawser, tevts, doplot = True,
|
|
975
|
+
threshfactor=0.45, debug=1, rawthresh=None, size=6):
|
|
976
|
+
rawm = rawser.mean(axis=0).mean(axis=0)
|
|
977
|
+
offset, peakav = analyze1dSeries(rawm,chunklength=500)
|
|
978
|
+
rawm = rawm-offset
|
|
979
|
+
peakav = peakav-offset
|
|
980
|
+
|
|
981
|
+
tp = tevts
|
|
982
|
+
if rawthresh is None:
|
|
983
|
+
rawthresh = threshfactor * peakav
|
|
984
|
+
th = tser[rawm > (rawthresh)]
|
|
985
|
+
|
|
986
|
+
ctp, chip, chipfit, taup = darktimehist(tp)
|
|
987
|
+
ctr, chir, chirfit, taur = darktimehist(th)
|
|
988
|
+
|
|
989
|
+
outstr = StringIO.StringIO()
|
|
990
|
+
|
|
991
|
+
print("events: %d (%d raw)" % (tp.shape[0],th.shape[0]),file=outstr)
|
|
992
|
+
print("dark times: %d (%d raw)" % (ctp.shape[0],ctr.shape[0]),file=outstr)
|
|
993
|
+
#print("region: %d x %d nm (%d x %d pixel)" % (bbszx,bbszy,bbszx/voxx,bbszy/voxy),file=outstr)
|
|
994
|
+
#print("centered at %d,%d (%d,%d pixels)" % (x.mean(),y.mean(),x.mean()/voxx,y.mean()/voxy),file=outstr)
|
|
995
|
+
print("darktime: ev %.1f (raw %.1f) frames" % (taup,taur),file=outstr)
|
|
996
|
+
print("qunits: ev %.2f (raw %.2f), eunits: %.2f" % (200.0/taup,200.0/taur,tp.shape[0]/500.0),file=outstr)
|
|
997
|
+
|
|
998
|
+
labelstr = outstr.getvalue()
|
|
999
|
+
|
|
1000
|
+
if debug:
|
|
1001
|
+
print
|
|
1002
|
+
if doplot:
|
|
1003
|
+
plt.figure()
|
|
1004
|
+
plt.plot(tser, rawm)
|
|
1005
|
+
peaklevel = plt.plot(tser, peakav*np.ones(tser.shape), '--', label = 'median peak')
|
|
1006
|
+
events_h5r = plt.plot(tp, 1.2*rawthresh*np.ones(tp.shape),'o',c='red', label='events')
|
|
1007
|
+
events_raw = plt.plot(th, rawthresh * np.ones(th.shape),'o',c='blue', label='raw detected')
|
|
1008
|
+
plt.legend(handles=[events_raw[0], events_h5r[0], peaklevel[0]])
|
|
1009
|
+
|
|
1010
|
+
plt.figure()
|
|
1011
|
+
events = plt.semilogx(ctp, chip, 'o', c='red', alpha=.5, markersize = size, label = 'events')
|
|
1012
|
+
eventfit = plt.semilogx(ctp, chipfit, label='event fit')
|
|
1013
|
+
raw = plt.semilogx(ctr, chir, 'o', c='blue', alpha=.5, markersize = size, label='raw')
|
|
1014
|
+
rawfit = plt.semilogx(ctr, chirfit, label='raw data fit')
|
|
1015
|
+
plt.ylim(-0.2,1.2)
|
|
1016
|
+
plt.annotate(labelstr, xy=(0.5, 0.1), xycoords='axes fraction',
|
|
1017
|
+
fontsize=10)
|
|
1018
|
+
plt.legend(handles=[events[0],raw[0],eventfit[0],rawfit[0]],loc=4)
|
|
1019
|
+
|
|
1020
|
+
return (rawm, peakav, (ctp, chip, chipfit, taup), (ctr, chir, chirfit, taur), rawthresh)
|
|
1021
|
+
|
|
1022
|
+
|
|
1023
|
+
import pickle
|
|
1024
|
+
def savepickled(object,fname):
|
|
1025
|
+
fi = open(fname,'wb')
|
|
1026
|
+
pickle.dump(object,fi)
|
|
1027
|
+
fi.close()
|
|
1028
|
+
|
|
1029
|
+
def loadpickled(fname):
|
|
1030
|
+
fi = open(fname,'r')
|
|
1031
|
+
return pickle.load(fi)
|
|
1032
|
+
|
|
1033
|
+
from PYME.DSView import dsviewer
|
|
1034
|
+
def setdriftparsFromImg(driftPane,img = None):
|
|
1035
|
+
if img is None:
|
|
1036
|
+
img = dsviewer.openViewers[dsviewer.openViewers.keys()[0]].image
|
|
1037
|
+
driftPane.tXExpr.SetValue(img.mdh['DriftCorrection.ExprX'])
|
|
1038
|
+
driftPane.tYExpr.SetValue(img.mdh['DriftCorrection.ExprY'])
|
|
1039
|
+
driftPane.tZExpr.SetValue(img.mdh['DriftCorrection.ExprZ'])
|
|
1040
|
+
driftPane.OnDriftExprChange(None)
|
|
1041
|
+
destp = driftPane.dp.driftCorrParams
|
|
1042
|
+
srcp = img.mdh['DriftCorrection.Parameters']
|
|
1043
|
+
for key in destp.keys():
|
|
1044
|
+
if key.startswith(('a','b')):
|
|
1045
|
+
destp[key] = srcp[key]
|
|
1046
|
+
driftPane.OnDriftExprChange(None)
|
|
1047
|
+
return destp
|
|
1048
|
+
|
|
1049
|
+
def getOpenImages():
|
|
1050
|
+
img = dsviewer.openViewers
|
|
1051
|
+
return img
|
|
1052
|
+
|
|
1053
|
+
def setSelectionFromFilterKeys(visFr,img):
|
|
1054
|
+
glcv = visFr.glCanvas
|
|
1055
|
+
fk = img.mdh['Filter.Keys']
|
|
1056
|
+
x0,x1 = fk['x']
|
|
1057
|
+
y0,y1 = fk['y']
|
|
1058
|
+
|
|
1059
|
+
glcv.selectionStart = (x0,y0)
|
|
1060
|
+
glcv.selectionFinish = (x1,y1)
|
|
1061
|
+
|
|
1062
|
+
try:
|
|
1063
|
+
import PYMEnf.DriftCorrection.compactFit as cf
|
|
1064
|
+
except ImportError:
|
|
1065
|
+
pass
|
|
1066
|
+
|
|
1067
|
+
def getdriftcurves(driftPane,pipeline,t=None):
|
|
1068
|
+
if t is None:
|
|
1069
|
+
t = pipeline['t']
|
|
1070
|
+
if 'driftx' in driftPane.dp.driftExprX:
|
|
1071
|
+
tt, dx = getdriftxyzFromEvts(pipeline,t,coordpos=0)
|
|
1072
|
+
tt, dy = getdriftxyzFromEvts(pipeline,t,coordpos=1)
|
|
1073
|
+
indepVars = { 't': tt, 'driftx': dx, 'drifty': dy }
|
|
1074
|
+
else:
|
|
1075
|
+
indepVars = pipeline.filter
|
|
1076
|
+
|
|
1077
|
+
dx,dy,tt = cf.xyDriftCurves(driftPane.dp.driftCorrFcn,driftPane.dp.driftCorrParams,indepVars,t)
|
|
1078
|
+
return (dx,dy,tt)
|
|
1079
|
+
|
|
1080
|
+
|
|
1081
|
+
from PYME.Analysis import piecewiseMapping
|
|
1082
|
+
from scipy.interpolate import interp1d
|
|
1083
|
+
|
|
1084
|
+
def getdriftxyzFromEvts(pipeline, tframes=None, coordpos=0):
|
|
1085
|
+
|
|
1086
|
+
ts = []
|
|
1087
|
+
cs = []
|
|
1088
|
+
for e in pipeline.events[pipeline.events['EventName'] == 'ShiftMeasure']:
|
|
1089
|
+
ts.append(e['Time'])
|
|
1090
|
+
cs.append(float(e['EventDescr'].split(', ')[coordpos]))
|
|
1091
|
+
|
|
1092
|
+
if len(ts) > 0:
|
|
1093
|
+
ts = np.array(ts)
|
|
1094
|
+
cs = np.array(cs)
|
|
1095
|
+
# convert time to frame numbers
|
|
1096
|
+
tfr = piecewiseMapping.timeToFrames(ts, pipeline.events, pipeline.mdh)
|
|
1097
|
+
# interpolate to desired frame set
|
|
1098
|
+
if tframes is not None:
|
|
1099
|
+
# finter = interp1d(tfr,cs,fill_value = 'extrapolate') # we need to check that we get no errors from this step
|
|
1100
|
+
# at the moment it will trip on extrapolation
|
|
1101
|
+
#csinter = finter(tframes)
|
|
1102
|
+
csinter = np.interp(tframes,tfr,cs)
|
|
1103
|
+
return (tframes,csinter)
|
|
1104
|
+
else:
|
|
1105
|
+
return(tfr,cs)
|
|
1106
|
+
|
|
1107
|
+
def zs(data,navg=100):
|
|
1108
|
+
n = min(navg,data.shape[0])
|
|
1109
|
+
dm = data[0:n].mean()
|
|
1110
|
+
return data-dm
|