PYME-extra 1.0.4.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PYMEcs/Acquire/Actions/__init__.py +0 -0
- PYMEcs/Acquire/Actions/custom.py +167 -0
- PYMEcs/Acquire/Hardware/LPthreadedSimple.py +248 -0
- PYMEcs/Acquire/Hardware/LPthreadedSimpleSim.py +246 -0
- PYMEcs/Acquire/Hardware/NikonTiFlaskServer.py +45 -0
- PYMEcs/Acquire/Hardware/NikonTiFlaskServerT.py +59 -0
- PYMEcs/Acquire/Hardware/NikonTiRESTClient.py +73 -0
- PYMEcs/Acquire/Hardware/NikonTiSim.py +35 -0
- PYMEcs/Acquire/Hardware/__init__.py +0 -0
- PYMEcs/Acquire/Hardware/driftTrackGUI.py +329 -0
- PYMEcs/Acquire/Hardware/driftTrackGUI_n.py +472 -0
- PYMEcs/Acquire/Hardware/driftTracking.py +424 -0
- PYMEcs/Acquire/Hardware/driftTracking_n.py +433 -0
- PYMEcs/Acquire/Hardware/fakeCamX.py +15 -0
- PYMEcs/Acquire/Hardware/offsetPiezoRESTCorrelLog.py +38 -0
- PYMEcs/Acquire/__init__.py +0 -0
- PYMEcs/Analysis/MBMcollection.py +552 -0
- PYMEcs/Analysis/MINFLUX.py +280 -0
- PYMEcs/Analysis/MapUtils.py +77 -0
- PYMEcs/Analysis/NPC.py +1176 -0
- PYMEcs/Analysis/Paraflux.py +218 -0
- PYMEcs/Analysis/Simpler.py +81 -0
- PYMEcs/Analysis/Sofi.py +140 -0
- PYMEcs/Analysis/__init__.py +0 -0
- PYMEcs/Analysis/decSofi.py +211 -0
- PYMEcs/Analysis/eventProperties.py +50 -0
- PYMEcs/Analysis/fitDarkTimes.py +569 -0
- PYMEcs/Analysis/objectVolumes.py +20 -0
- PYMEcs/Analysis/offlineTracker.py +130 -0
- PYMEcs/Analysis/stackTracker.py +180 -0
- PYMEcs/Analysis/timeSeries.py +63 -0
- PYMEcs/Analysis/trackFiducials.py +186 -0
- PYMEcs/Analysis/zerocross.py +91 -0
- PYMEcs/IO/MINFLUX.py +851 -0
- PYMEcs/IO/NPC.py +117 -0
- PYMEcs/IO/__init__.py +0 -0
- PYMEcs/IO/darkTimes.py +19 -0
- PYMEcs/IO/picasso.py +219 -0
- PYMEcs/IO/tabular.py +11 -0
- PYMEcs/__init__.py +0 -0
- PYMEcs/experimental/CalcZfactor.py +51 -0
- PYMEcs/experimental/FRC.py +338 -0
- PYMEcs/experimental/ImageJROItools.py +49 -0
- PYMEcs/experimental/MINFLUX.py +1537 -0
- PYMEcs/experimental/NPCcalcLM.py +560 -0
- PYMEcs/experimental/Simpler.py +369 -0
- PYMEcs/experimental/Sofi.py +78 -0
- PYMEcs/experimental/__init__.py +0 -0
- PYMEcs/experimental/binEventProperty.py +187 -0
- PYMEcs/experimental/chaining.py +23 -0
- PYMEcs/experimental/clusterTrack.py +179 -0
- PYMEcs/experimental/combine_maps.py +104 -0
- PYMEcs/experimental/eventProcessing.py +93 -0
- PYMEcs/experimental/fiducials.py +323 -0
- PYMEcs/experimental/fiducialsNew.py +402 -0
- PYMEcs/experimental/mapTools.py +271 -0
- PYMEcs/experimental/meas2DplotDh5view.py +107 -0
- PYMEcs/experimental/mortensen.py +131 -0
- PYMEcs/experimental/ncsDenoise.py +158 -0
- PYMEcs/experimental/onTimes.py +295 -0
- PYMEcs/experimental/procPoints.py +77 -0
- PYMEcs/experimental/pyme2caml.py +73 -0
- PYMEcs/experimental/qPAINT.py +965 -0
- PYMEcs/experimental/randMap.py +188 -0
- PYMEcs/experimental/regExtraCmaps.py +11 -0
- PYMEcs/experimental/selectROIfilterTable.py +72 -0
- PYMEcs/experimental/showErrs.py +51 -0
- PYMEcs/experimental/showErrsDh5view.py +58 -0
- PYMEcs/experimental/showShiftMap.py +56 -0
- PYMEcs/experimental/snrEvents.py +188 -0
- PYMEcs/experimental/specLabeling.py +51 -0
- PYMEcs/experimental/splitRender.py +246 -0
- PYMEcs/experimental/testChannelByName.py +36 -0
- PYMEcs/experimental/timedSpecies.py +28 -0
- PYMEcs/experimental/utils.py +31 -0
- PYMEcs/misc/ExtraCmaps.py +177 -0
- PYMEcs/misc/__init__.py +0 -0
- PYMEcs/misc/configUtils.py +169 -0
- PYMEcs/misc/guiMsgBoxes.py +27 -0
- PYMEcs/misc/mapUtils.py +230 -0
- PYMEcs/misc/matplotlib.py +136 -0
- PYMEcs/misc/rectsFromSVG.py +182 -0
- PYMEcs/misc/shellutils.py +1110 -0
- PYMEcs/misc/utils.py +205 -0
- PYMEcs/misc/versionCheck.py +20 -0
- PYMEcs/misc/zcInfo.py +90 -0
- PYMEcs/pyme_warnings.py +4 -0
- PYMEcs/recipes/__init__.py +0 -0
- PYMEcs/recipes/base.py +75 -0
- PYMEcs/recipes/localisations.py +2380 -0
- PYMEcs/recipes/manipulate_yaml.py +83 -0
- PYMEcs/recipes/output.py +177 -0
- PYMEcs/recipes/processing.py +247 -0
- PYMEcs/recipes/simpler.py +290 -0
- PYMEcs/version.py +2 -0
- pyme_extra-1.0.4.post0.dist-info/METADATA +114 -0
- pyme_extra-1.0.4.post0.dist-info/RECORD +101 -0
- pyme_extra-1.0.4.post0.dist-info/WHEEL +5 -0
- pyme_extra-1.0.4.post0.dist-info/entry_points.txt +3 -0
- pyme_extra-1.0.4.post0.dist-info/licenses/LICENSE +674 -0
- pyme_extra-1.0.4.post0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from scipy import ndimage
|
|
5
|
+
|
|
6
|
+
def genRef(refimage, normalised=True):
|
|
7
|
+
X, Y = np.mgrid[0.0:refimage.shape[0], 0.0:refimage.shape[1]]
|
|
8
|
+
X -= refimage.shape[0]/2
|
|
9
|
+
Y -= refimage.shape[1]/2
|
|
10
|
+
bdry = 20
|
|
11
|
+
mask = np.ones_like(refimage[:,:,0],dtype='float64')
|
|
12
|
+
mask[:bdry, :] = 0
|
|
13
|
+
mask[-bdry:, :] = 0
|
|
14
|
+
mask[:, :bdry] = 0
|
|
15
|
+
mask[:,-bdry:] = 0
|
|
16
|
+
|
|
17
|
+
maskg = ndimage.gaussian_filter(mask.astype('float'),5)
|
|
18
|
+
|
|
19
|
+
calImages = np.zeros(refimage.shape[:2] + (21,))
|
|
20
|
+
calFTs = np.zeros(refimage.shape[:2] + (21,), dtype='complex64')
|
|
21
|
+
#refimage3D = refimage.squeeze()
|
|
22
|
+
|
|
23
|
+
for i in range(refimage.shape[2]):
|
|
24
|
+
if not normalised:
|
|
25
|
+
d = refimage[:,:,i]
|
|
26
|
+
ref = d/d.mean() - 1
|
|
27
|
+
else:
|
|
28
|
+
ref = refimage[:,:,i]
|
|
29
|
+
|
|
30
|
+
calFTs[:,:,i] = np.fft.ifftn(ref)
|
|
31
|
+
calImages[:,:,i] = ref*maskg
|
|
32
|
+
|
|
33
|
+
dz = np.gradient(calImages)[2].reshape(-1, 21)
|
|
34
|
+
dzn = np.hstack([1./np.dot(dz[:,i], dz[:,i]) for i in range(21)])
|
|
35
|
+
|
|
36
|
+
return calImages, calFTs, dz, dzn, maskg, X, Y
|
|
37
|
+
|
|
38
|
+
def compare(calImages, calFTs, dz, dzn, posInd, image, mask, X, Y, normalised=False, deltaZ = 0.2):
|
|
39
|
+
d = 1.0*image
|
|
40
|
+
|
|
41
|
+
if not normalised:
|
|
42
|
+
dm = d/d.mean() - 1
|
|
43
|
+
else:
|
|
44
|
+
dm = d
|
|
45
|
+
|
|
46
|
+
FA = calFTs[:,:,posInd]
|
|
47
|
+
refA = calImages[:,:,posInd]
|
|
48
|
+
|
|
49
|
+
ddz = dz[:,posInd]
|
|
50
|
+
dznn = dzn[posInd]
|
|
51
|
+
|
|
52
|
+
C = np.fft.ifftshift(np.abs(np.fft.ifftn(np.fft.fftn(dm)*FA)))
|
|
53
|
+
#C = ifftshift(np.abs(ifftn(fftn(A)*ifftn(B))))
|
|
54
|
+
Cm = C.max()
|
|
55
|
+
|
|
56
|
+
Cp = np.maximum(C - 0.5*Cm, 0)
|
|
57
|
+
Cpsum = Cp.sum()
|
|
58
|
+
|
|
59
|
+
dx = (X*Cp).sum()/Cpsum
|
|
60
|
+
dy = (Y*Cp).sum()/Cpsum
|
|
61
|
+
|
|
62
|
+
ds = ndimage.shift(dm, [-dx, -dy])*mask
|
|
63
|
+
ds_A = (ds - refA)
|
|
64
|
+
|
|
65
|
+
dzz = deltaZ*np.dot(ds_A.ravel(), ddz)*dznn
|
|
66
|
+
|
|
67
|
+
return dx, dy, dzz, Cm, dm
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def genRefxy(refimage, normalised=True):
|
|
71
|
+
X, Y = np.mgrid[0.0:refimage.shape[0], 0.0:refimage.shape[1]]
|
|
72
|
+
X -= refimage.shape[0]/2
|
|
73
|
+
Y -= refimage.shape[1]/2
|
|
74
|
+
bdry = 20
|
|
75
|
+
mask = np.ones_like(refimage,dtype='float64')
|
|
76
|
+
mask[:bdry, :] = 0
|
|
77
|
+
mask[-bdry:, :] = 0
|
|
78
|
+
mask[:, :bdry] = 0
|
|
79
|
+
mask[:,-bdry:] = 0
|
|
80
|
+
|
|
81
|
+
maskg = ndimage.gaussian_filter(mask.astype('float'),5)
|
|
82
|
+
|
|
83
|
+
calImages = np.zeros(refimage.shape[:2])
|
|
84
|
+
calFTs = np.zeros(refimage.shape[:2], dtype='complex64')
|
|
85
|
+
|
|
86
|
+
if not normalised:
|
|
87
|
+
d = refimage
|
|
88
|
+
ref = d/d.mean() - 1
|
|
89
|
+
else:
|
|
90
|
+
ref = refimage
|
|
91
|
+
|
|
92
|
+
calFT = np.fft.ifftn(ref)
|
|
93
|
+
calImage = ref*maskg
|
|
94
|
+
|
|
95
|
+
return calImage, calFT, maskg, X, Y
|
|
96
|
+
|
|
97
|
+
def comparexy(calImage, calFT, image, mask, X, Y, normalised=False):
|
|
98
|
+
d = 1.0*image
|
|
99
|
+
|
|
100
|
+
if not normalised:
|
|
101
|
+
dm = d/d.mean() - 1
|
|
102
|
+
else:
|
|
103
|
+
dm = d
|
|
104
|
+
|
|
105
|
+
FA = calFT
|
|
106
|
+
refA = calImage
|
|
107
|
+
|
|
108
|
+
C = np.fft.ifftshift(np.abs(np.fft.ifftn(np.fft.fftn(dm)*FA)))
|
|
109
|
+
#C = ifftshift(np.abs(ifftn(fftn(A)*ifftn(B))))
|
|
110
|
+
Cm = C.max()
|
|
111
|
+
|
|
112
|
+
Cp = np.maximum(C - 0.5*Cm, 0)
|
|
113
|
+
Cpsum = Cp.sum()
|
|
114
|
+
|
|
115
|
+
dx = (X*Cp).sum()/Cpsum
|
|
116
|
+
dy = (Y*Cp).sum()/Cpsum
|
|
117
|
+
|
|
118
|
+
return dx, dy, Cm, dm
|
|
119
|
+
|
|
120
|
+
def trackstack(ims,refim,pixx=20.0,pixy=20.0):
|
|
121
|
+
# from PYMEcs.Analysis.offlineTracker import genRefxy, comparexy
|
|
122
|
+
calImage, calFT, maskg, X, Y = genRefxy(refim,normalised=False)
|
|
123
|
+
history=[]
|
|
124
|
+
for i in range(ims.shape[2]):
|
|
125
|
+
dx, dy, Cm, dm = comparexy(calImage, calFT, ims[:,:,i], maskg, X, Y)
|
|
126
|
+
history.append((dx, dy, Cm))
|
|
127
|
+
|
|
128
|
+
ha = np.array(history)
|
|
129
|
+
return (pixx*ha[:,0],pixy*ha[:,1],ha[:,2]/ha[:,2].max())
|
|
130
|
+
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy import ndimage
|
|
3
|
+
from numpy.fft import fftn, ifftn, fftshift, ifftshift
|
|
4
|
+
import matplotlib.pyplot as plt
|
|
5
|
+
|
|
6
|
+
# we use this code as a simple and preliminary way to calculate the z-factor
|
|
7
|
+
# this should eventually be reworked as a nicer codebase and also replace/subsume the
|
|
8
|
+
# offline tracker code base
|
|
9
|
+
|
|
10
|
+
# note that this may or may not work!
|
|
11
|
+
def zdiff(data):
|
|
12
|
+
zdf = np.zeros_like(data,dtype='f')
|
|
13
|
+
zdf[:,:,0:-1] = data[:,:,1:]-1.0*data[:,:,0:-1]
|
|
14
|
+
zdf[:,:,-1] = zdf[:,:,-2]
|
|
15
|
+
return zdf
|
|
16
|
+
|
|
17
|
+
# subsample stack from dz0 spacing to dzs sampling, dzs should be even multiple of dz0
|
|
18
|
+
# note: we assume the center image is the target position
|
|
19
|
+
def substack(stack, subsamplefactor, newszhalf=None):
|
|
20
|
+
ssfac = int(subsamplefactor)
|
|
21
|
+
zsz = stack.shape[2]
|
|
22
|
+
zszh = zsz // 2
|
|
23
|
+
if 2*zszh+1 != zsz:
|
|
24
|
+
raise RuntimeError("z dimension must be odd")
|
|
25
|
+
if newszhalf is not None:
|
|
26
|
+
if newszhalf > zszh:
|
|
27
|
+
raise RuntimeError("new stack size must be smaller or equal old stack size")
|
|
28
|
+
nstack = stack[:,:,zszh-newszhalf:zszh+newszhalf+1]
|
|
29
|
+
zszh = newszhalf
|
|
30
|
+
else:
|
|
31
|
+
nstack = stack
|
|
32
|
+
|
|
33
|
+
halfperiods = zszh // ssfac
|
|
34
|
+
refims = nstack[:,:,zszh-halfperiods*ssfac:zszh+halfperiods*ssfac+1:ssfac]
|
|
35
|
+
|
|
36
|
+
return (nstack, refims)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# this one should initialise FFTs etc for a suitable substack and return all the relevant
|
|
40
|
+
# items in a dict (may make class in a future version)
|
|
41
|
+
# return everything as a stackobject, initially just a dict with the required entries
|
|
42
|
+
def initialise_data(stack, subsamplefactor, vsznm, newszhalf=None):
|
|
43
|
+
nstack, refimages_raw = substack(stack, subsamplefactor, newszhalf=newszhalf)
|
|
44
|
+
refimages, calImages, calFTs, gradI, gradIsqr_inv, mask, X, Y = genRefData(refimages_raw)
|
|
45
|
+
|
|
46
|
+
trackobject = {
|
|
47
|
+
'stack': nstack,
|
|
48
|
+
'refimages_raw': refimages_raw,
|
|
49
|
+
'refimages': refimages,
|
|
50
|
+
'calImages': calImages,
|
|
51
|
+
'calFTs': calFTs,
|
|
52
|
+
'gradI': gradI,
|
|
53
|
+
'gradIsqr_inv': gradIsqr_inv,
|
|
54
|
+
'mask': mask,
|
|
55
|
+
'X': X,
|
|
56
|
+
'Y': Y,
|
|
57
|
+
'voxelsize_nm': vsznm,
|
|
58
|
+
'subsamplefactor': int(subsamplefactor)}
|
|
59
|
+
|
|
60
|
+
return trackobject
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
# this one takes an initialised stackobject and returns the relevant shifts by taking
|
|
64
|
+
# the center of the substack as the relevant level to refer to
|
|
65
|
+
def get_shifts_from_stackobject(trackobject):
|
|
66
|
+
to = trackobject
|
|
67
|
+
resx = []
|
|
68
|
+
resy = []
|
|
69
|
+
resz = []
|
|
70
|
+
|
|
71
|
+
refim_zszh = to['refimages'].shape[2] // 2
|
|
72
|
+
for i in range(to['stack'].shape[2]):
|
|
73
|
+
image = to['stack'][:,:,i]
|
|
74
|
+
driftx, drifty, driftz, cm, dm = compare(to['refimages'],
|
|
75
|
+
to['calImages'],
|
|
76
|
+
to['calFTs'],
|
|
77
|
+
to['gradI'],
|
|
78
|
+
to['gradIsqr_inv'],
|
|
79
|
+
refim_zszh, image,
|
|
80
|
+
to['mask'],
|
|
81
|
+
to['X'],
|
|
82
|
+
to['Y'],
|
|
83
|
+
deltaZ=to['subsamplefactor']*to['voxelsize_nm'].z)
|
|
84
|
+
resx.append(driftx)
|
|
85
|
+
resy.append(drifty)
|
|
86
|
+
resz.append(driftz)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
dznm = np.array(resz)
|
|
90
|
+
dxnm = to['voxelsize_nm'].x*np.array(resx)
|
|
91
|
+
dynm = to['voxelsize_nm'].y*np.array(resy)
|
|
92
|
+
|
|
93
|
+
return (dxnm,dynm,dznm)
|
|
94
|
+
|
|
95
|
+
def fit_and_plot_zf(dxnm,dynm,dznm,trackobject):
|
|
96
|
+
|
|
97
|
+
to = trackobject
|
|
98
|
+
zszh = dznm.shape[0] // 2
|
|
99
|
+
dzexp = dznm[zszh-4:zszh+5]
|
|
100
|
+
dztheo = (np.arange(dznm.shape[0])-zszh)*to['voxelsize_nm'].z
|
|
101
|
+
|
|
102
|
+
x = dztheo[zszh-3:zszh+4]
|
|
103
|
+
y = dznm[zszh-3:zszh+4]
|
|
104
|
+
m, b = np.polyfit(x,y,1)
|
|
105
|
+
|
|
106
|
+
plt.figure()
|
|
107
|
+
plt.plot(dztheo[zszh-4:zszh+5],dznm[zszh-4:zszh+5],'-o')
|
|
108
|
+
plt.plot(dztheo[zszh-4:zszh+5],m*dztheo[zszh-4:zszh+5],'--')
|
|
109
|
+
|
|
110
|
+
zfactor = 1.0 / m
|
|
111
|
+
font = {'family': 'serif',
|
|
112
|
+
'color': 'darkred',
|
|
113
|
+
'weight': 'normal',
|
|
114
|
+
'size': 14,
|
|
115
|
+
}
|
|
116
|
+
plt.text(-150, 50, 'Z-factor = %.2f' % zfactor, fontdict=font)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def genRefData(refimages_raw, bdry=10, useSimplediff=False):
|
|
121
|
+
X, Y = np.mgrid[0.0:refimages_raw.shape[0], 0.0:refimages_raw.shape[1]]
|
|
122
|
+
X -= np.ceil(refimages_raw.shape[0]*0.5)
|
|
123
|
+
Y -= np.ceil(refimages_raw.shape[1]*0.5)
|
|
124
|
+
|
|
125
|
+
mask = np.ones_like(refimages_raw[:,:,0])
|
|
126
|
+
mask[:bdry, :] = 0
|
|
127
|
+
mask[-bdry:, :] = 0
|
|
128
|
+
mask[:, :bdry] = 0
|
|
129
|
+
mask[:,-bdry:] = 0
|
|
130
|
+
|
|
131
|
+
refimages = np.zeros_like(refimages_raw,dtype='f')
|
|
132
|
+
calImages = np.zeros_like(refimages_raw,dtype='f')
|
|
133
|
+
calFTs = np.zeros_like(refimages_raw, dtype='complex64')
|
|
134
|
+
|
|
135
|
+
for i in range(refimages_raw.shape[2]):
|
|
136
|
+
d = refimages_raw[:,:,i] # should we work with offset corrected images or irrelevant?
|
|
137
|
+
ref = d/d.mean() - 1
|
|
138
|
+
|
|
139
|
+
refimages[:,:,i] = ref
|
|
140
|
+
|
|
141
|
+
calFTs[:,:,i] = ifftn(ref)
|
|
142
|
+
calImages[:,:,i] = ref*mask
|
|
143
|
+
|
|
144
|
+
if useSimplediff:
|
|
145
|
+
gradI = zdiff(calImages).reshape(-1, calImages.shape[2])
|
|
146
|
+
else:
|
|
147
|
+
gradI = np.gradient(calImages)[2].reshape(-1, calImages.shape[2])
|
|
148
|
+
gradIsqr_inv = np.hstack([1./np.dot(gradI[:,i], gradI[:,i]) for i in range(calImages.shape[2])])
|
|
149
|
+
|
|
150
|
+
return refimages, calImages, calFTs, gradI, gradIsqr_inv, mask, X, Y
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def compare(refImages, calImages, calFTs, gradI, gradIsqr_inv, posInd, image, mask, X, Y, deltaZ = 0.2):
|
|
154
|
+
d = 1.0*image
|
|
155
|
+
dm = d/d.mean() - 1
|
|
156
|
+
|
|
157
|
+
FA = calFTs[:,:,posInd]
|
|
158
|
+
refA = calImages[:,:,posInd]
|
|
159
|
+
|
|
160
|
+
gradIA = gradI[:,posInd]
|
|
161
|
+
gradIsqr_invA = gradIsqr_inv[posInd]
|
|
162
|
+
|
|
163
|
+
C = ifftshift(np.abs(ifftn(fftn(dm)*FA)))
|
|
164
|
+
|
|
165
|
+
Cm = C.max()
|
|
166
|
+
|
|
167
|
+
Cp = np.maximum(C - 0.5*Cm, 0)
|
|
168
|
+
Cpsum = Cp.sum()
|
|
169
|
+
|
|
170
|
+
dx = (X*Cp).sum()/Cpsum
|
|
171
|
+
dy = (Y*Cp).sum()/Cpsum
|
|
172
|
+
|
|
173
|
+
ds = ndimage.shift(dm, [-dx, -dy])*mask
|
|
174
|
+
|
|
175
|
+
ds_A = (ds - refA)
|
|
176
|
+
|
|
177
|
+
#calculate z offset between actual position and calibration position
|
|
178
|
+
dz = deltaZ*np.dot(ds_A.ravel(), gradIA)*gradIsqr_invA
|
|
179
|
+
|
|
180
|
+
return dx, dy, dz, Cm, dm
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import matplotlib.pyplot as plt
|
|
3
|
+
|
|
4
|
+
import logging
|
|
5
|
+
logger = logging.getLogger(__name__)
|
|
6
|
+
|
|
7
|
+
def interlaceTraces(traces):
|
|
8
|
+
return np.vstack(traces).transpose().flatten()
|
|
9
|
+
|
|
10
|
+
# given a series of detected events as input
|
|
11
|
+
# construct a single channel trace for plotting
|
|
12
|
+
# this works by constructing a time point series and
|
|
13
|
+
# a pulse series (0 and 1 values) that goes up and down
|
|
14
|
+
# as dictated by the event series passed in
|
|
15
|
+
#
|
|
16
|
+
# algorithmicly it detects where series of events in consecutive
|
|
17
|
+
# groups begin and end and injects plotting points at those
|
|
18
|
+
# places
|
|
19
|
+
#
|
|
20
|
+
# the returned two arrays can be directly passed to a plot command
|
|
21
|
+
# to produce a "single channel"-like trace
|
|
22
|
+
def generateSeries(t):
|
|
23
|
+
|
|
24
|
+
if t.shape[0] <= 1:
|
|
25
|
+
return ([t[0],t[0],t[0]+1,t[0]+1],[0,1,1,0])
|
|
26
|
+
|
|
27
|
+
dts = t[1:]-t[0:-1]-1
|
|
28
|
+
dtg = dts[dts>0]
|
|
29
|
+
nts = dtg.shape[0]
|
|
30
|
+
|
|
31
|
+
idx, = np.where(dts>0)
|
|
32
|
+
one = np.ones_like(idx)
|
|
33
|
+
z = np.zeros_like(idx)
|
|
34
|
+
|
|
35
|
+
tSer = interlaceTraces((t[idx],t[idx],t[idx+1],t[idx+1]))
|
|
36
|
+
pulseSer = interlaceTraces((one,z,z,one))
|
|
37
|
+
|
|
38
|
+
tSerAll = np.hstack(([t[0],t[0]],tSer,[t[-1],t[-1]]))
|
|
39
|
+
pulseAll = np.hstack(([0,1],pulseSer,[1,0]))
|
|
40
|
+
|
|
41
|
+
return (tSerAll, pulseAll)
|
|
42
|
+
|
|
43
|
+
# this one uses the functionality of generateSeries
|
|
44
|
+
# for all 'clumps' identified by the same clumpIndex
|
|
45
|
+
#
|
|
46
|
+
# it therefore requires that the 'Track single molecule trajectories"
|
|
47
|
+
# has been run; it must be run on the original 'Localisations' data source,
|
|
48
|
+
# *not* the coalesced data source
|
|
49
|
+
#
|
|
50
|
+
# by default it starts a new figure
|
|
51
|
+
# the plot line colour changes between clumps
|
|
52
|
+
# in an effectively random fashion
|
|
53
|
+
def plotClumpSeries(t,ci,newFig = True):
|
|
54
|
+
cin = np.unique(ci)
|
|
55
|
+
if newFig:
|
|
56
|
+
plt.figure()
|
|
57
|
+
|
|
58
|
+
for c in cin:
|
|
59
|
+
tci = t[ci == c]
|
|
60
|
+
tp, p = generateSeries(tci)
|
|
61
|
+
plt.plot(tp, p)
|
|
62
|
+
|
|
63
|
+
plt.ylim(-0.5,1.5)
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
from __future__ import print_function # (at top of module)
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from scipy import ndimage
|
|
5
|
+
from collections import OrderedDict
|
|
6
|
+
import warnings
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
# this is just a slightly cleaned up version of the core code of
|
|
12
|
+
# Kenny's fiducial tracker from PYMEnf
|
|
13
|
+
|
|
14
|
+
# currently only does x and y dims
|
|
15
|
+
|
|
16
|
+
# in this version we enforce that the returned fiducial track
|
|
17
|
+
# starts out of zero
|
|
18
|
+
|
|
19
|
+
def foffset(t,ft,navg=50):
|
|
20
|
+
tu,idx = np.unique(t.astype('int'), return_index=True)
|
|
21
|
+
fu = ft[idx]
|
|
22
|
+
offs = fu[0:min(navg,fu.shape[0])].mean()
|
|
23
|
+
return offs
|
|
24
|
+
|
|
25
|
+
def makeFilter(filtFunc):
|
|
26
|
+
'''wrapper function for different filters'''
|
|
27
|
+
def ffcn(t, data, scale):
|
|
28
|
+
out = {}
|
|
29
|
+
for k, v in data.items():
|
|
30
|
+
r_v = v[~np.isnan(v)]
|
|
31
|
+
r_t = t[~np.isnan(v)]
|
|
32
|
+
out[k] = filtFunc(np.interp(t, r_t, r_v), scale)
|
|
33
|
+
return out
|
|
34
|
+
return ffcn
|
|
35
|
+
|
|
36
|
+
FILTER_FUNCS = {
|
|
37
|
+
'Gaussian' : makeFilter(ndimage.gaussian_filter),
|
|
38
|
+
'Uniform' : makeFilter(ndimage.uniform_filter),
|
|
39
|
+
# must be int(size), otherwise gets float type error!!!!!
|
|
40
|
+
'Median' : makeFilter(lambda input, size: ndimage.median_filter(input,int(size)))
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
def extractTrajectoriesClump(ds, clumpRadiusVar = 'error_x', clumpRadiusMultiplier=5.0,
|
|
44
|
+
timeWindow=25, clumpMinSize=50, align=True):
|
|
45
|
+
|
|
46
|
+
import PYME.Analysis.points.DeClump.deClump as deClump
|
|
47
|
+
#track beads through frames
|
|
48
|
+
if clumpRadiusVar == '1.0':
|
|
49
|
+
delta_x = 0*ds['x'] + clumpRadiusMultiplier
|
|
50
|
+
else:
|
|
51
|
+
delta_x = clumpRadiusMultiplier*ds[clumpRadiusVar]
|
|
52
|
+
|
|
53
|
+
t = ds['t'].astype('i')
|
|
54
|
+
x = ds['x'].astype('f4')
|
|
55
|
+
y = ds['y'].astype('f4')
|
|
56
|
+
z = ds['z'].astype('f4')
|
|
57
|
+
delta_x = delta_x.astype('f4')
|
|
58
|
+
|
|
59
|
+
I = np.argsort(t)
|
|
60
|
+
|
|
61
|
+
clumpIndex = np.zeros(len(x), dtype='i')
|
|
62
|
+
isFiducial = np.zeros(len(x), dtype='i')
|
|
63
|
+
clumpIndex[I] = deClump.findClumpsN(t[I], x[I], y[I], delta_x[I], timeWindow)
|
|
64
|
+
|
|
65
|
+
tMax = t.max()
|
|
66
|
+
|
|
67
|
+
clumpIndices = list(set(clumpIndex))
|
|
68
|
+
|
|
69
|
+
x_f = []
|
|
70
|
+
y_f = []
|
|
71
|
+
z_f = []
|
|
72
|
+
clump_sizes = []
|
|
73
|
+
|
|
74
|
+
t_f = np.arange(0, tMax + 1, dtype='i')
|
|
75
|
+
|
|
76
|
+
#loop over all our clumps and extract trajectories
|
|
77
|
+
for ci in clumpIndices:
|
|
78
|
+
if ci > 0:
|
|
79
|
+
clump_mask = (clumpIndex == ci)
|
|
80
|
+
x_i = x[clump_mask]
|
|
81
|
+
clump_size = len(x_i)
|
|
82
|
+
|
|
83
|
+
if clump_size > clumpMinSize:
|
|
84
|
+
y_i = y[clump_mask]
|
|
85
|
+
z_i = z[clump_mask]
|
|
86
|
+
t_i = t[clump_mask].astype('i')
|
|
87
|
+
isFiducial[clump_mask] = 1 # mark the event mask that this is a fiducial
|
|
88
|
+
|
|
89
|
+
x_i_f = np.NaN*np.ones_like(t_f)
|
|
90
|
+
if align:
|
|
91
|
+
x_i_f[t_i]= x_i - x_i.mean()
|
|
92
|
+
else:
|
|
93
|
+
x_i_f[t_i]= x_i
|
|
94
|
+
|
|
95
|
+
y_i_f = np.NaN*np.ones_like(t_f)
|
|
96
|
+
if align:
|
|
97
|
+
y_i_f[t_i]= y_i - y_i.mean()
|
|
98
|
+
else:
|
|
99
|
+
y_i_f[t_i]= y_i
|
|
100
|
+
|
|
101
|
+
z_i_f = np.NaN*np.ones_like(t_f)
|
|
102
|
+
if align:
|
|
103
|
+
z_i_f[t_i]= z_i - z_i.mean()
|
|
104
|
+
else:
|
|
105
|
+
z_i_f[t_i]= z_i
|
|
106
|
+
|
|
107
|
+
#clumps.append((x_i_f, y_i_f))
|
|
108
|
+
x_f.append(x_i_f)
|
|
109
|
+
y_f.append(y_i_f)
|
|
110
|
+
z_f.append(z_i_f)
|
|
111
|
+
clump_sizes.append(len(x_i))
|
|
112
|
+
|
|
113
|
+
#re-order to start with the largest clump
|
|
114
|
+
clumpOrder = np.argsort(clump_sizes)[::-1]
|
|
115
|
+
x_f = np.array(x_f)[clumpOrder,:]
|
|
116
|
+
y_f = np.array(y_f)[clumpOrder,:]
|
|
117
|
+
z_f = np.array(z_f)[clumpOrder,:]
|
|
118
|
+
|
|
119
|
+
return (t_f, x_f, y_f, z_f, isFiducial)
|
|
120
|
+
|
|
121
|
+
def AverageTrack(ds, tracks, filter='Gaussian', filterScale=10.0, align=True):
|
|
122
|
+
|
|
123
|
+
t_f, x_f, y_f, z_f = tracks
|
|
124
|
+
t = ds['t'].astype('i')
|
|
125
|
+
|
|
126
|
+
# this function does not appear to be used anywhere
|
|
127
|
+
# def _mf(p, meas):
|
|
128
|
+
# '''calculate the offset between trajectories'''
|
|
129
|
+
# m_adj = meas + np.hstack([[0], p])[:,None]
|
|
130
|
+
|
|
131
|
+
# return np.nansum(np.nanvar(m_adj, axis=0))
|
|
132
|
+
|
|
133
|
+
def _align(meas, tol=.1):
|
|
134
|
+
n_iters = 0
|
|
135
|
+
|
|
136
|
+
dm_old = 5e12
|
|
137
|
+
dm = 4e12
|
|
138
|
+
|
|
139
|
+
mm = np.nanmean(meas, 0)
|
|
140
|
+
|
|
141
|
+
while ((dm_old - dm) > tol) and (n_iters < 50):
|
|
142
|
+
dm_old = dm
|
|
143
|
+
mm = np.nanmean(meas, 0)
|
|
144
|
+
d = np.nanmean(meas - mm, 1)
|
|
145
|
+
dm = sum(d**2)
|
|
146
|
+
meas = meas - d[:,None]
|
|
147
|
+
n_iters +=1
|
|
148
|
+
print(n_iters, dm)
|
|
149
|
+
|
|
150
|
+
# nanmean can generate warnings with all nan values
|
|
151
|
+
# we catch them in this block only
|
|
152
|
+
with warnings.catch_warnings():
|
|
153
|
+
warnings.filterwarnings("ignore", message='Mean of empty slice')
|
|
154
|
+
mm = np.nanmean(meas, 0)
|
|
155
|
+
|
|
156
|
+
print('Finished:', n_iters, dm)
|
|
157
|
+
return mm
|
|
158
|
+
|
|
159
|
+
def _simpleav(meas):
|
|
160
|
+
# as above
|
|
161
|
+
with warnings.catch_warnings():
|
|
162
|
+
warnings.filterwarnings("ignore", message='Mean of empty slice')
|
|
163
|
+
mm = np.nanmean(meas, 0)
|
|
164
|
+
return mm
|
|
165
|
+
|
|
166
|
+
if align:
|
|
167
|
+
x_corr = _align(x_f)
|
|
168
|
+
y_corr = _align(y_f)
|
|
169
|
+
z_corr = _align(z_f)
|
|
170
|
+
else:
|
|
171
|
+
x_corr = _simpleav(x_f)
|
|
172
|
+
y_corr = _simpleav(y_f)
|
|
173
|
+
z_corr = _simpleav(z_f)
|
|
174
|
+
|
|
175
|
+
filtered_corr_woffs = FILTER_FUNCS[filter](t_f, {'x' : x_corr, 'y':y_corr, 'z':z_corr}, filterScale)
|
|
176
|
+
|
|
177
|
+
dims = filtered_corr_woffs.keys()
|
|
178
|
+
filtered_corr = {}
|
|
179
|
+
for dim in dims:
|
|
180
|
+
filtered_corr[dim] = filtered_corr_woffs[dim] - foffset(t_f,filtered_corr_woffs[dim])
|
|
181
|
+
|
|
182
|
+
fcorr_columns = {}
|
|
183
|
+
for dim in dims:
|
|
184
|
+
fcorr_columns[dim] = np.interp(t, t_f, filtered_corr[dim])
|
|
185
|
+
|
|
186
|
+
return fcorr_columns
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# this code is borrowed from PyAstronomy
|
|
2
|
+
# I include their license below under which this code was made available
|
|
3
|
+
|
|
4
|
+
# Copyright (c) 2011, PyA group
|
|
5
|
+
|
|
6
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
|
|
7
|
+
# software and associated documentation files (the "Software"), to deal in the Software
|
|
8
|
+
# without restriction, including without limitation the rights to use, copy, modify, merge,
|
|
9
|
+
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
|
|
10
|
+
# to whom the Software is furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
# The above copyright notice and this permission notice shall be included in all copies
|
|
13
|
+
# or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
|
|
16
|
+
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
|
17
|
+
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
|
|
18
|
+
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
19
|
+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
20
|
+
# DEALINGS IN THE SOFTWARE.
|
|
21
|
+
|
|
22
|
+
from __future__ import division
|
|
23
|
+
import numpy as np
|
|
24
|
+
# from PyAstronomy.pyaC import pyaErrors as PE
|
|
25
|
+
|
|
26
|
+
def zerocross1d(x, y, getIndices=False):
|
|
27
|
+
"""
|
|
28
|
+
Find the zero crossing points in 1d data.
|
|
29
|
+
|
|
30
|
+
Find the zero crossing events in a discrete data set.
|
|
31
|
+
Linear interpolation is used to determine the actual
|
|
32
|
+
locations of the zero crossing between two data points
|
|
33
|
+
showing a change in sign. Data point which are zero
|
|
34
|
+
are counted in as zero crossings if a sign change occurs
|
|
35
|
+
across them. Note that the first and last data point will
|
|
36
|
+
not be considered whether or not they are zero.
|
|
37
|
+
|
|
38
|
+
Parameters
|
|
39
|
+
----------
|
|
40
|
+
x, y : arrays
|
|
41
|
+
Ordinate and abscissa data values.
|
|
42
|
+
getIndices : boolean, optional
|
|
43
|
+
If True, also the indicies of the points preceding
|
|
44
|
+
the zero crossing event will be returned. Default is
|
|
45
|
+
False.
|
|
46
|
+
|
|
47
|
+
Returns
|
|
48
|
+
-------
|
|
49
|
+
xvals : array
|
|
50
|
+
The locations of the zero crossing events determined
|
|
51
|
+
by linear interpolation on the data.
|
|
52
|
+
indices : array, optional
|
|
53
|
+
The indices of the points preceding the zero crossing
|
|
54
|
+
events. Only returned if `getIndices` is set True.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
# Check sorting of x-values
|
|
58
|
+
if np.any((x[1:] - x[0:-1]) <= 0.0):
|
|
59
|
+
raise( RuntimeError("zerocross1d: The x-values must be sorted in ascending order! " +
|
|
60
|
+
"Sort the data prior to calling zerocross1d."))
|
|
61
|
+
|
|
62
|
+
# Indices of points *before* zero-crossing
|
|
63
|
+
indi = np.where(y[1:]*y[0:-1] < 0.0)[0]
|
|
64
|
+
|
|
65
|
+
# Find the zero crossing by linear interpolation
|
|
66
|
+
dx = x[indi+1] - x[indi]
|
|
67
|
+
dy = y[indi+1] - y[indi]
|
|
68
|
+
zc = -y[indi] * (dx/dy) + x[indi]
|
|
69
|
+
|
|
70
|
+
# What about the points, which are actually zero
|
|
71
|
+
zi = np.where(y == 0.0)[0]
|
|
72
|
+
# Do nothing about the first and last point should they
|
|
73
|
+
# be zero
|
|
74
|
+
zi = zi[np.where((zi > 0) & (zi < x.size-1))]
|
|
75
|
+
# Select those point, where zero is crossed (sign change
|
|
76
|
+
# across the point)
|
|
77
|
+
zi = zi[np.where(y[zi-1]*y[zi+1] < 0.0)]
|
|
78
|
+
|
|
79
|
+
# Concatenate indices
|
|
80
|
+
zzindi = np.concatenate((indi, zi))
|
|
81
|
+
# Concatenate zc and locations corresponding to zi
|
|
82
|
+
zz = np.concatenate((zc, x[zi]))
|
|
83
|
+
|
|
84
|
+
# Sort by x-value
|
|
85
|
+
sind = np.argsort(zz)
|
|
86
|
+
zz, zzindi = zz[sind], zzindi[sind]
|
|
87
|
+
|
|
88
|
+
if not getIndices:
|
|
89
|
+
return zz
|
|
90
|
+
else:
|
|
91
|
+
return zz, zzindi
|