PYME-extra 1.0.4.post0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. PYMEcs/Acquire/Actions/__init__.py +0 -0
  2. PYMEcs/Acquire/Actions/custom.py +167 -0
  3. PYMEcs/Acquire/Hardware/LPthreadedSimple.py +248 -0
  4. PYMEcs/Acquire/Hardware/LPthreadedSimpleSim.py +246 -0
  5. PYMEcs/Acquire/Hardware/NikonTiFlaskServer.py +45 -0
  6. PYMEcs/Acquire/Hardware/NikonTiFlaskServerT.py +59 -0
  7. PYMEcs/Acquire/Hardware/NikonTiRESTClient.py +73 -0
  8. PYMEcs/Acquire/Hardware/NikonTiSim.py +35 -0
  9. PYMEcs/Acquire/Hardware/__init__.py +0 -0
  10. PYMEcs/Acquire/Hardware/driftTrackGUI.py +329 -0
  11. PYMEcs/Acquire/Hardware/driftTrackGUI_n.py +472 -0
  12. PYMEcs/Acquire/Hardware/driftTracking.py +424 -0
  13. PYMEcs/Acquire/Hardware/driftTracking_n.py +433 -0
  14. PYMEcs/Acquire/Hardware/fakeCamX.py +15 -0
  15. PYMEcs/Acquire/Hardware/offsetPiezoRESTCorrelLog.py +38 -0
  16. PYMEcs/Acquire/__init__.py +0 -0
  17. PYMEcs/Analysis/MBMcollection.py +552 -0
  18. PYMEcs/Analysis/MINFLUX.py +280 -0
  19. PYMEcs/Analysis/MapUtils.py +77 -0
  20. PYMEcs/Analysis/NPC.py +1176 -0
  21. PYMEcs/Analysis/Paraflux.py +218 -0
  22. PYMEcs/Analysis/Simpler.py +81 -0
  23. PYMEcs/Analysis/Sofi.py +140 -0
  24. PYMEcs/Analysis/__init__.py +0 -0
  25. PYMEcs/Analysis/decSofi.py +211 -0
  26. PYMEcs/Analysis/eventProperties.py +50 -0
  27. PYMEcs/Analysis/fitDarkTimes.py +569 -0
  28. PYMEcs/Analysis/objectVolumes.py +20 -0
  29. PYMEcs/Analysis/offlineTracker.py +130 -0
  30. PYMEcs/Analysis/stackTracker.py +180 -0
  31. PYMEcs/Analysis/timeSeries.py +63 -0
  32. PYMEcs/Analysis/trackFiducials.py +186 -0
  33. PYMEcs/Analysis/zerocross.py +91 -0
  34. PYMEcs/IO/MINFLUX.py +851 -0
  35. PYMEcs/IO/NPC.py +117 -0
  36. PYMEcs/IO/__init__.py +0 -0
  37. PYMEcs/IO/darkTimes.py +19 -0
  38. PYMEcs/IO/picasso.py +219 -0
  39. PYMEcs/IO/tabular.py +11 -0
  40. PYMEcs/__init__.py +0 -0
  41. PYMEcs/experimental/CalcZfactor.py +51 -0
  42. PYMEcs/experimental/FRC.py +338 -0
  43. PYMEcs/experimental/ImageJROItools.py +49 -0
  44. PYMEcs/experimental/MINFLUX.py +1537 -0
  45. PYMEcs/experimental/NPCcalcLM.py +560 -0
  46. PYMEcs/experimental/Simpler.py +369 -0
  47. PYMEcs/experimental/Sofi.py +78 -0
  48. PYMEcs/experimental/__init__.py +0 -0
  49. PYMEcs/experimental/binEventProperty.py +187 -0
  50. PYMEcs/experimental/chaining.py +23 -0
  51. PYMEcs/experimental/clusterTrack.py +179 -0
  52. PYMEcs/experimental/combine_maps.py +104 -0
  53. PYMEcs/experimental/eventProcessing.py +93 -0
  54. PYMEcs/experimental/fiducials.py +323 -0
  55. PYMEcs/experimental/fiducialsNew.py +402 -0
  56. PYMEcs/experimental/mapTools.py +271 -0
  57. PYMEcs/experimental/meas2DplotDh5view.py +107 -0
  58. PYMEcs/experimental/mortensen.py +131 -0
  59. PYMEcs/experimental/ncsDenoise.py +158 -0
  60. PYMEcs/experimental/onTimes.py +295 -0
  61. PYMEcs/experimental/procPoints.py +77 -0
  62. PYMEcs/experimental/pyme2caml.py +73 -0
  63. PYMEcs/experimental/qPAINT.py +965 -0
  64. PYMEcs/experimental/randMap.py +188 -0
  65. PYMEcs/experimental/regExtraCmaps.py +11 -0
  66. PYMEcs/experimental/selectROIfilterTable.py +72 -0
  67. PYMEcs/experimental/showErrs.py +51 -0
  68. PYMEcs/experimental/showErrsDh5view.py +58 -0
  69. PYMEcs/experimental/showShiftMap.py +56 -0
  70. PYMEcs/experimental/snrEvents.py +188 -0
  71. PYMEcs/experimental/specLabeling.py +51 -0
  72. PYMEcs/experimental/splitRender.py +246 -0
  73. PYMEcs/experimental/testChannelByName.py +36 -0
  74. PYMEcs/experimental/timedSpecies.py +28 -0
  75. PYMEcs/experimental/utils.py +31 -0
  76. PYMEcs/misc/ExtraCmaps.py +177 -0
  77. PYMEcs/misc/__init__.py +0 -0
  78. PYMEcs/misc/configUtils.py +169 -0
  79. PYMEcs/misc/guiMsgBoxes.py +27 -0
  80. PYMEcs/misc/mapUtils.py +230 -0
  81. PYMEcs/misc/matplotlib.py +136 -0
  82. PYMEcs/misc/rectsFromSVG.py +182 -0
  83. PYMEcs/misc/shellutils.py +1110 -0
  84. PYMEcs/misc/utils.py +205 -0
  85. PYMEcs/misc/versionCheck.py +20 -0
  86. PYMEcs/misc/zcInfo.py +90 -0
  87. PYMEcs/pyme_warnings.py +4 -0
  88. PYMEcs/recipes/__init__.py +0 -0
  89. PYMEcs/recipes/base.py +75 -0
  90. PYMEcs/recipes/localisations.py +2380 -0
  91. PYMEcs/recipes/manipulate_yaml.py +83 -0
  92. PYMEcs/recipes/output.py +177 -0
  93. PYMEcs/recipes/processing.py +247 -0
  94. PYMEcs/recipes/simpler.py +290 -0
  95. PYMEcs/version.py +2 -0
  96. pyme_extra-1.0.4.post0.dist-info/METADATA +114 -0
  97. pyme_extra-1.0.4.post0.dist-info/RECORD +101 -0
  98. pyme_extra-1.0.4.post0.dist-info/WHEEL +5 -0
  99. pyme_extra-1.0.4.post0.dist-info/entry_points.txt +3 -0
  100. pyme_extra-1.0.4.post0.dist-info/licenses/LICENSE +674 -0
  101. pyme_extra-1.0.4.post0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,218 @@
1
+ import matplotlib.pyplot as plt
2
+ import numpy as np
3
+ import logging
4
+ logger = logging.getLogger(__file__)
5
+ import pandas as pd
6
+ import os
7
+
8
+ from PYMEcs.pyme_warnings import warn
9
+
10
+ # note that currently everything is hard coded for the standard 3D sequence
11
+ # TODO: should really check the metadata we have for (recent) zarr and check the CCRLimit array etc
12
+ # and figure things out from that!!
13
+
14
+
15
+ # --- start Alex B contributed functions to (possibly save) and plot ITR stats (Paraflux like) ---
16
+ def paraflux_mk_df_fm(mfxdata):
17
+ # Convert structured array (original mfx data) to DataFrame (containing all data from MFX experiment)
18
+ df_mfx = pd.DataFrame()
19
+ for name in mfxdata.dtype.names:
20
+ col = mfxdata[name]
21
+ if col.ndim == 1:
22
+ df_mfx[name] = col
23
+ else:
24
+ n_dim = col.shape[1]
25
+ expanded = pd.DataFrame(col.tolist(), index=df_mfx.index if not df_mfx.empty else None)
26
+ if name in ['loc', 'lnc']:
27
+ labels = ["x", "y", "z"]
28
+ expanded.columns = [f"{name}_{labels[i]}" for i in range(n_dim)]
29
+ elif name == 'dcr':
30
+ expanded.columns = [f"{name}_{i+1}" for i in range(n_dim)]
31
+ else:
32
+ expanded.columns = [f"{name}_{i}" for i in range(n_dim)]
33
+ df_mfx = pd.concat([df_mfx, expanded], axis=1)
34
+
35
+ # Create failure_map (used to interpret failure reasons in analyze_failures)
36
+ failure_map = {
37
+ 1: "Valid final", 2: "Valid not final",
38
+ 4: "Derived iteration", 5: "Reserved",
39
+ 6: "CFR failure", 8: "No signal",
40
+ 9: "DAC out of range", 11: "Background measurement"
41
+ }
42
+
43
+ return (df_mfx, failure_map)
44
+
45
+ # ==================================================
46
+ # --- Analysis functions ( Paraflux-like) ---
47
+ # ==================================================
48
+
49
+ # Create a df with list of vld tids per iteration + additional basic stats
50
+ def build_valid_df(df_mfx):
51
+ if not isinstance(df_mfx, pd.DataFrame): # Convert to DataFrame if input is structured array
52
+ df = pd.DataFrame(df_mfx)
53
+ else:
54
+ df = df_mfx.copy()
55
+ df_valid = df[df['vld']] # Select only valid localizations
56
+ vld_itr = df_valid.groupby('itr')['tid'].apply(lambda x: list(set(x))).reset_index() # Get list of unique tids per iteration
57
+ vld_itr['Axis'] = np.where(vld_itr['itr'] % 2 == 0, 'x,y', 'z') # Add a col with axis of each iteration
58
+ vld_itr['vld loc count'] = vld_itr['tid'].apply(len) # Count valid locs per iteration
59
+ vld_itr['failed loc count'] = vld_itr['vld loc count'].shift(1, fill_value=vld_itr['vld loc count'].iloc[0]) - vld_itr['vld loc count'] # Calculate failed loc count per iteration
60
+ vld_itr.loc[0, 'failed loc count'] = 0 # Set failed loc count of first iteration to 0 (instead of NaN)
61
+ vld_itr['failed loc cum sum'] = vld_itr['failed loc count'].cumsum() # Cumulative sum of failed locs
62
+ return vld_itr
63
+
64
+ # Compute percentages of passed and failed localizations (from build_valid_df)
65
+ def compute_percentages(vld_itr):
66
+ initial_count = vld_itr['vld loc count'].iloc[0] # Percentage calculations are based on initial count of valid locs
67
+ vld_itr['passed itr %'] = (vld_itr['vld loc count'] * 100 / initial_count).round(1)
68
+ vld_itr['failed % per itr'] = (vld_itr['failed loc count'] * 100 / initial_count).round(1)
69
+ pair_sums = {} # This is done to mimic results from Paraflux
70
+ for i in range(1, len(vld_itr), 2):
71
+ pair_sums[i] = vld_itr.loc[i-1:i, 'failed % per itr'].sum().round(1)
72
+ vld_itr['failed % per itr pairs'] = vld_itr.index.map(pair_sums)
73
+ vld_itr['failed cum sum %'] = vld_itr['failed % per itr'].cumsum().round(1)
74
+ return vld_itr
75
+
76
+ # Analyze failures between consecutive iterations and categorize them based on failure_map (found on wiki from Abberior)
77
+ def analyze_failures(vld_itr, df_mfx, failure_map):
78
+ def analyze_failures_single_steps(vld_itr, df, itr_from, itr_to, failure_map):
79
+ tids_from = set(vld_itr.loc[vld_itr['itr'] == itr_from, 'tid'].iloc[0]) # Select valid tids of the previous iteration
80
+ tids_to = set(vld_itr.loc[vld_itr['itr'] == itr_to, 'tid'].iloc[0]) # Select valid tids of the current iteration
81
+ failed_tids = tids_from - tids_to # Determine tids that failed in the current iteration
82
+ failed_df = df[df['tid'].isin(failed_tids) & (df['itr'] == itr_to)] # Create a df with only failed tids in the current iteration
83
+ counts = failed_df['sta'].value_counts().rename_axis("sta").reset_index(name="count") # Count failure reasons
84
+ counts["reason"] = counts["sta"].map(failure_map).fillna("Other") # Map failure reasons using failure_map
85
+ counts.insert(0, "itr", itr_to) # Add iteration column
86
+ return counts
87
+
88
+ pairs = [(i, i+1) for i in range(vld_itr['itr'].max())] # Create pairs of consecutive iterations
89
+ # Analyze failures for each pair and concatenate results
90
+ failure_results = pd.concat(
91
+ [analyze_failures_single_steps(vld_itr, df_mfx, i_from, i_to, failure_map) for i_from, i_to in pairs],
92
+ ignore_index=True
93
+ )
94
+ # Pivot the results to have failure reasons as columns
95
+ failure_pivot = failure_results.pivot_table(
96
+ index="itr", columns="reason", values="count", fill_value=0
97
+ ).reset_index()
98
+ return vld_itr.merge(failure_pivot, on="itr", how="left")
99
+
100
+ # Compute percentages for failure reasons
101
+ def add_failure_metrics(vld_itr, initial_count):
102
+ cfr_map = {5: 4, 7: 6} #map ITR where CFR failures occurs
103
+ vld_itr['CFR failure %'] = np.nan # Initialize column with NaNs
104
+ # Calculate CFR failure percentages based on cfr_map
105
+ for target_itr, source_itr in cfr_map.items():
106
+ if not vld_itr.loc[vld_itr['itr'] == source_itr, 'CFR failure'].empty: # Check if CFR failure data exists for the source iteration
107
+ val = vld_itr.loc[vld_itr['itr'] == source_itr, 'CFR failure'].values[0] # Get the CFR failure count
108
+ vld_itr.loc[vld_itr['itr'] == target_itr, 'CFR failure %'] = (val / initial_count * 100).round(1) # Calculate percentage and assign to target iteration
109
+ # Calculate No signal percentage for each iteration
110
+ vld_itr['No signal %'] = (vld_itr['No signal'] * 100 / initial_count).round(1)
111
+ # Define groups of iterations for No signal percentage calculation
112
+ no_signal_groups = {1: [0, 1],3: [2, 3], 5: [4, 5], 7: [6, 7], 9: [8, 9]}
113
+ # Calculate No signal percentage for each group and map to iterations
114
+ no_signal_pct = {
115
+ target_itr: (vld_itr.loc[vld_itr['itr'].isin(group), 'No signal'].sum() / initial_count * 100).round(1)
116
+ for target_itr, group in no_signal_groups.items()
117
+ }
118
+ vld_itr['No signal % per itr pairs'] = vld_itr['itr'].map(no_signal_pct)
119
+ return vld_itr
120
+
121
+ # Plot like in Paraflux
122
+ def paraflux_itr_plot(vld_paraflux):
123
+ # Mapping rules
124
+ label_map = {
125
+ "passed": "Passed",
126
+ "CFR": "CFR-filtered",
127
+ "No signal": "Dark",
128
+ "DAC": "Out of range",
129
+ "Other": "Other",
130
+ }
131
+
132
+ # Function to get pretty label based on the label map (substring matching from vld_paraflux col names)
133
+ def pretty_label(colname):
134
+ """Map colname to user-friendly label based on substring rules."""
135
+ for key, label in label_map.items():
136
+ if key.lower() in colname.lower():
137
+ return label
138
+ return colname # fallback: keep original name
139
+
140
+ # Keep only odd iterations (Paraflux style, i.e., 1, 3, 5, 7, 9)
141
+ vld_paraflux = vld_paraflux[vld_paraflux['itr'] % 2 == 1]
142
+
143
+ # Set figure size
144
+ plt.figure(figsize=(8, 6))
145
+
146
+ # Base positions
147
+ r1 = np.arange(len(vld_paraflux)) # Define the positions for each bar
148
+ names = vld_paraflux['itr'] # Names of group
149
+ barWidth = 0.85 # Bar width
150
+
151
+ # Colors (extendable if more cols are added)
152
+ colors = ["#0072B2", "#009E73", "#D55E00", "#E69F00", "#CC79A7"]
153
+
154
+ # Define the bottom position for stacking
155
+ bottompos = np.zeros(len(vld_paraflux))
156
+
157
+ # Plot each column as a stacked bar
158
+ for i, col in enumerate(vld_paraflux.columns[1:]): # Enumerate over all columns except 'itr'
159
+ vals = vld_paraflux[col].fillna(0) # Get the values for the current column, filling NaNs with 0
160
+ labels = pretty_label(col) # Get the pretty label for the legend
161
+
162
+ plt.bar(
163
+ r1, vals, bottom=bottompos,
164
+ color=colors[i % len(colors)],
165
+ edgecolor="white", width=barWidth, label=labels
166
+ ) # Create the bar
167
+
168
+ # Add labels inside each bar
169
+ for j, v in enumerate(vals):
170
+ if v > 0:
171
+ plt.text(r1[j], bottompos[j] + v / 2, f"{v:.1f}%", # Only add text if value > 0
172
+ ha="center", va="center",
173
+ color="black",
174
+ fontsize=9)
175
+
176
+ bottompos += vals.values
177
+
178
+ # X/Y labels
179
+ plt.xticks(r1, names)
180
+ plt.xlabel("Iteration")
181
+ plt.ylabel("Events (%)")
182
+ plt.axhline(y=100, color="gray", linestyle="--", linewidth=1)
183
+
184
+ plt.legend(loc="upper right", fontsize=9)
185
+ plt.tight_layout()
186
+ plt.show()
187
+
188
+ # Main function to compute stats of failed and valid localizations and save results
189
+ def compute_vld_stats(df_mfx, failure_map, store_path=None, ts=None):
190
+ # Run the analysis steps
191
+ vld_itr = build_valid_df(df_mfx)
192
+ vld_itr = compute_percentages(vld_itr)
193
+ vld_itr = analyze_failures(vld_itr, df_mfx, failure_map)
194
+ initial_count = vld_itr['vld loc count'].iloc[0]
195
+ vld_itr = add_failure_metrics(vld_itr, initial_count)
196
+ vld_paraflux = paraflux_itr_plot(vld_itr[['itr', 'passed itr %', 'CFR failure %', 'No signal % per itr pairs']])
197
+
198
+ if store_path is not None:
199
+ if ts is None:
200
+ warn("No timestamp found in metadata, saving as default.")
201
+ timestamp = "no_ts"
202
+
203
+ vld_itr = vld_itr.drop(columns='tid', errors='ignore')
204
+ default_dir = str(store_path.parent)
205
+ full_path = os.path.join(default_dir, f"{timestamp}_iteration_stats_full.csv")
206
+ paraflux_path = os.path.join(default_dir, f"{timestamp}_iteration_stats_Paraflux_only.csv")
207
+
208
+ vld_itr.to_csv(full_path, index=False)
209
+ keep_cols = ["itr", 'passed itr %', 'CFR failure %', 'No signal % per itr pairs']
210
+ vld_paraflux = vld_itr[keep_cols]
211
+ vld_paraflux.to_csv(paraflux_path, index=False)
212
+
213
+ logger.debug(f"✔ Saved full results to: {full_path}")
214
+ logger.debug(f"✔ Saved cleaned results to: {paraflux_path}")
215
+
216
+ return vld_itr
217
+
218
+ ### --- End of Alex B contributed functions ---
@@ -0,0 +1,81 @@
1
+ import numpy as np
2
+ from scipy.stats import iqr
3
+
4
+ import sys
5
+ if sys.version_info > (3,):
6
+ xrange = range
7
+
8
+ # halfmode is apparently a robust measure of the mode:
9
+ # 1.Bickel, D. R. & Frühwirth, R. On a fast, robust estimator of the mode: Comparisons to other robust estimators with applications. Computational Statistics & Data Analysis 50, 3500–3530 (2006).
10
+
11
+ def halfmode(inputData, axis=None, dtype=None):
12
+ """
13
+ Robust estimator of the mode of a data set using the half-sample mode.
14
+
15
+ .. versionadded: 1.0.3
16
+ """
17
+
18
+ if axis is not None:
19
+ fnc = lambda x: mode(x, dtype=dtype)
20
+ dataMode = np.apply_along_axis(fnc, axis, inputData)
21
+ else:
22
+ # Create the function that we can use for the half-sample mode
23
+ def _hsm(data):
24
+ if data.size == 1:
25
+ return data[0]
26
+ elif data.size == 2:
27
+ return data.mean()
28
+ elif data.size == 3:
29
+ i1 = data[1] - data[0]
30
+ i2 = data[2] - data[1]
31
+ if i1 < i2:
32
+ return data[:2].mean()
33
+ elif i2 > i1:
34
+ return data[1:].mean()
35
+ else:
36
+ return data[1]
37
+ else:
38
+ wMin = data[-1] - data[0]
39
+ N = int(data.size / 2) + data.size % 2
40
+ for i in xrange(0, int(N)):
41
+ w = data[i+N-1] - data[i]
42
+ if w < wMin:
43
+ wMin = w
44
+ j = i
45
+ return _hsm(data[j:j+N])
46
+
47
+ data = inputData.ravel()
48
+ if type(data).__name__ == "MaskedArray":
49
+ data = data.compressed()
50
+ if dtype is not None:
51
+ data = data.astype(dtype)
52
+
53
+ # The data need to be sorted for this to work
54
+ data = np.sort(data)
55
+
56
+ # Find the mode
57
+ dataMode = _hsm(data)
58
+
59
+ return dataMode
60
+
61
+
62
+ def clusterModes(x,y,eid,prop):
63
+ clusterMode = np.zeros_like(x)
64
+ clusterErr = np.zeros_like(x)
65
+ clusterCentroid_x = np.zeros_like(x)
66
+ clusterCentroid_y = np.zeros_like(x)
67
+
68
+ uids = np.unique(eid)
69
+ for j,i in enumerate(uids):
70
+ if not i == 0: # cluster id 0 means not a cluster
71
+ ind = eid == i
72
+ xi = x[ind]
73
+ yi = y[ind]
74
+ pi = prop[ind]
75
+
76
+ clusterMode[ind] = halfmode(pi)
77
+ clusterErr[ind] = iqr(pi)
78
+ clusterCentroid_x[ind] = np.sum(xi*pi)/pi.sum()
79
+ clusterCentroid_y[ind] = np.sum(yi*pi)/pi.sum()
80
+
81
+ return clusterMode, clusterErr, clusterCentroid_x, clusterCentroid_y
@@ -0,0 +1,140 @@
1
+ # d.baddeley@auckland.ac.nz
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+ #
8
+ # This program is distributed in the hope that it will be useful,
9
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
+ # GNU General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU General Public License
14
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
15
+ #
16
+ ##################
17
+
18
+ # this code lifted from PYMEnf and made compatible with py3 (change to sys.maxsize)
19
+
20
+ import sys
21
+ import numpy as np
22
+ import scipy as sp
23
+ import scipy.ndimage as ndimage
24
+
25
+ # note this code assumes still the older data model and uses methods like `getSliceShape`
26
+ # whereas the newer access modes (e.g. data_xyztc) do not have these
27
+
28
+ def calcCorrelates(data, nOrders=5, startAt=50, filtHalfWidth=25, stopAt=sys.maxsize):
29
+ '''Calculate the autocorrelations up to nOrders for SOFI inmaging'''
30
+ d3c = np.zeros(list(data.getSliceShape()) + [1, nOrders])
31
+ d_m = np.zeros(data.getSliceShape())
32
+ d_mm = np.zeros(list(data.getSliceShape()) + [1])
33
+
34
+ nm = 1./(2*filtHalfWidth)
35
+ stopAt = min(stopAt, data.shape[2]-filtHalfWidth)
36
+
37
+ for i in range(startAt-filtHalfWidth,startAt+filtHalfWidth):
38
+ d_m += nm*data.getSlice(i)
39
+
40
+ for i in range(startAt+1, stopAt):
41
+ #print data[:,:,i-filtHalfWidth - 1].shape
42
+ d_m = d_m - data.getSlice(i-filtHalfWidth - 1)*nm + data.getSlice(i+filtHalfWidth-1)*nm
43
+ d_ = data.getSlice(i)
44
+ d_mm[:,:,0] += d_
45
+ d_ = d_ - d_m
46
+ d_2 = np.ones(d_.shape)
47
+ for j in range(d3c.shape[3]):
48
+ d_2 *= d_
49
+ if (j % 2):
50
+ #print d3c.shape, d_2.shape, d_m.shape
51
+ d3c[:,:,0,j] += d_2
52
+ else:
53
+ d3c[:,:,0,j] += np.abs(d_2)
54
+
55
+ return d3c/(stopAt - startAt), d_mm/(stopAt - startAt)
56
+
57
+ def calcCorrelatesI(data, nOrders=5, zoom=4, startAt=50, filtHalfWidth=25, stopAt=sys.maxsize):
58
+ '''Calculate the autocorrelations up to nOrders for SOFI inmaging'''
59
+ slShape = list(np.array(data.getSliceShape())*zoom)
60
+ d3c = np.zeros(slShape + [1, nOrders])
61
+ d_m = np.zeros(slShape)
62
+ d_mm = np.zeros(slShape + [1])
63
+
64
+ nm = 1./(2*filtHalfWidth)
65
+ stopAt = min(stopAt, data.shape[2]-filtHalfWidth)
66
+
67
+ for i in range(startAt-filtHalfWidth,startAt+filtHalfWidth):
68
+ d_m += nm*ndimage.zoom(data.getSlice(i), zoom)
69
+
70
+ for i in range(startAt+1, stopAt):
71
+ #print data[:,:,i-filtHalfWidth - 1].shape
72
+ d_m = d_m - ndimage.zoom(data.getSlice(i-filtHalfWidth - 1), zoom)*nm + ndimage.zoom(data.getSlice(i+filtHalfWidth-1), zoom)*nm
73
+ d_ = ndimage.zoom(data.getSlice(i), zoom)
74
+ d_mm[:,:,0] += d_
75
+ d_ = d_ - d_m
76
+ d_2 = np.ones(d_.shape)
77
+ for j in range(d3c.shape[3]):
78
+ d_2 *= d_
79
+ if (j % 2):
80
+ #print d3c.shape, d_2.shape, d_m.shape
81
+ d3c[:,:,0,j] += d_2
82
+ else:
83
+ d3c[:,:,0,j] += np.abs(d_2)
84
+
85
+ return d3c/(stopAt - startAt), d_mm/(stopAt - startAt)
86
+
87
+ def calcCumulants(corr):
88
+ cum = corr.copy()
89
+
90
+ for n in range(3,cum.shape[3]):
91
+ for i in range(1, n-1):
92
+ #print n+1, i+1, sp.comb(n, (i+1)), ((n+1)-(i+1), i+1), (n-i - 1, i)
93
+ cum[:,:,:,n] -= sp.comb(n, (i+1))*cum[:,:,:,(n-i -1)]*corr[:,:,:,i]
94
+
95
+ return cum
96
+
97
+
98
+ def calcCorrelatesZ(data, zm, nOrders=5, startAt=50, filtHalfWidth=25, stopAt=sys.maxsize):
99
+ '''Calculate the autocorrelations up to nOrders for SOFI inmaging'''
100
+
101
+ zvals = np.array(list(set(zm.yvals)))
102
+ zvals.sort()
103
+ nZ = zvals.shape[0]
104
+
105
+ d3c = np.zeros(list(data.getSliceShape()) + [nZ, nOrders])
106
+ d_m = np.zeros(list(data.getSliceShape()))
107
+ d_mm = np.zeros(list(data.getSliceShape()) + [nZ])
108
+
109
+ nm = 1./(2*filtHalfWidth)
110
+ stopAt = min(stopAt, data.shape[2]-filtHalfWidth)
111
+
112
+ for i in range(startAt-filtHalfWidth,startAt+filtHalfWidth):
113
+ d_m += nm*data.getSlice(i)
114
+
115
+ for i in range(startAt+1, stopAt):
116
+ #print zvals, zm(i)
117
+ #print np.where((zvals - zm(i))**2 < .02)[0]
118
+ z = int(np.where((zvals - zm(i))**2 < .02)[0])
119
+ #print z, d3c.shape
120
+
121
+ #print data[:,:,i-filtHalfWidth - 1].shape
122
+ d_m = d_m - data.getSlice(i-filtHalfWidth - 1)*nm + data.getSlice(i+filtHalfWidth-1)*nm
123
+ d_ = data.getSlice(i)
124
+ d_mm[:,:,z] += d_
125
+ d_ = d_ - d_m
126
+ d_2 = np.ones(d_.shape)
127
+
128
+
129
+
130
+ for j in range(d3c.shape[3]):
131
+ d_2 *= d_
132
+ if (j % 2):
133
+ #print d3c.shape, d_2.shape, d_m.shape, z, j
134
+ #print d3c[:,:,z,j].shape
135
+
136
+ d3c[:,:,z,j] += d_2[:,:]
137
+ else:
138
+ d3c[:,:,z,j] += np.abs(d_2)
139
+
140
+ return d3c/(stopAt - startAt), d_mm/(stopAt - startAt)
File without changes
@@ -0,0 +1,211 @@
1
+ from PYME.Deconv.dec import *
2
+
3
+
4
+ class dec_sofi(dec):
5
+ def psf_calc(self, psf, data_size, orders, weights):
6
+ from PYME.pad import pad
7
+ pw = (numpy.array(data_size[:3]) - psf.shape)/2.
8
+ pw1 = numpy.floor(pw)
9
+ pw2 = numpy.ceil(pw)
10
+
11
+ g = psf#/psf.sum();
12
+
13
+ if pw1[0] < 0:
14
+ if pw2[0] < 0:
15
+ g = g[-pw1[0]:pw2[0]]
16
+ else:
17
+ g = g[-pw1[0]:]
18
+
19
+ pw1[0] = 0
20
+ pw2[0] = 0
21
+
22
+ if pw1[1] < 0:
23
+ if pw2[1] < 0:
24
+ g = g[-pw1[1]:pw2[1]]
25
+ else:
26
+ g = g[-pw1[1]:]
27
+
28
+ pw1[1] = 0
29
+ pw2[1] = 0
30
+
31
+ if pw1[2] < 0:
32
+ if pw2[2] < 0:
33
+ g = g[-pw1[2]:pw2[2]]
34
+ else:
35
+ g = g[-pw1[2]:]
36
+
37
+ pw1[2] = 0
38
+ pw2[2] = 0
39
+
40
+
41
+ g = pad.with_constant(g, ((pw2[0], pw1[0]), (pw2[1], pw1[1]),(pw2[2], pw1[2])), (0,))
42
+
43
+ self.height = data_size[0]
44
+ self.width = data_size[1]
45
+ self.depth = data_size[2]
46
+
47
+ self.shape = data_size[:2]
48
+
49
+ self.nOrders = data_size[3]
50
+ self.orders = orders
51
+ self.weights = weights
52
+ #
53
+ # (x,y,z) = mgrid[-floor(self.height/2.0):(ceil(self.height/2.0)), -floor(self.width/2.0):(ceil(self.width/2.0)), -floor(self.depth/2.0):(ceil(self.depth/2.0))]
54
+ #
55
+ # gs = shape(g);
56
+ #
57
+ # g = g[int(floor((gs[0] - self.height)/2)):int(self.height + floor((gs[0] - self.height)/2)), int(floor((gs[1] - self.width)/2)):int(self.width + floor((gs[1] - self.width)/2)), int(floor((gs[2] - self.depth)/2)):int(self.depth + floor((gs[2] - self.depth)/2))]
58
+ #
59
+ # #g = abs(ifftshift(ifftn(abs(fftn(g)))));
60
+ # g = (g/sum(sum(sum(g))));
61
+
62
+ self.g = g.astype('float32');
63
+
64
+ #%g = circshift(g, [0, -1]);
65
+ self.H = []
66
+ self.Ht = []
67
+ for i in range(self.nOrders):
68
+ gp = self.g**(self.orders[i])
69
+ gp = gp/sum(gp)
70
+ print gp.dtype
71
+ self.H.append(((fftn((gp)))))
72
+ self.Ht.append((gp.size*(ifftn((gp)))))
73
+
74
+ def startGuess(self, data):
75
+ guess = zeros(data.shape[:3])
76
+
77
+ for i in range(self.nOrders):
78
+ guess += data[:,:,:,i]**(1./self.orders[i])
79
+
80
+ return guess/self.nOrders
81
+
82
+ def subsearch(self, f0, res, fdef, Afunc, Lfunc, lam, S):
83
+ nsrch = size(S,1)
84
+ pref = Lfunc(f0-fdef)
85
+ w0 = dot(pref, pref)
86
+ c0 = dot(res,res)
87
+
88
+ AS = zeros((size(res), nsrch), 'f')
89
+ LS = zeros((size(pref), nsrch), 'f')
90
+
91
+ for k in range(nsrch):
92
+ AS[:,k] = cast['f'](Afunc(f0 + S[:,k]) - Afunc(f0))
93
+ LS[:,k] = cast['f'](Lfunc(S[:,k]))
94
+
95
+ Hc = dot(transpose(AS), AS)
96
+ Hw = dot(transpose(LS), LS)
97
+ Gc = dot(transpose(AS), res)
98
+ Gw = dot(transpose(-LS), pref)
99
+
100
+ c = solve(Hc + pow(lam, 2)*Hw, Gc + pow(lam, 2)*Gw)
101
+
102
+ cpred = c0 + dot(dot(transpose(c), Hc), c) - dot(transpose(c), Gc)
103
+ wpred = w0 + dot(dot(transpose(c), Hw), c) - dot(transpose(c), Gw)
104
+
105
+ fnew = f0 + dot(S, c)
106
+
107
+ return (fnew, cpred, wpred)
108
+
109
+ def deconv(self, data, lamb, num_iters=10, alpha = None):#, Afunc=self.Afunc, Ahfunc=self.Ahfunc, Lfunc=self.Lfunc, Lhfunc=self.Lhfunc):
110
+ self.dataShape = data.shape
111
+
112
+ #lamb = 2e-2
113
+ if (not alpha == None):
114
+ self.alpha = alpha
115
+ self.e1 = fftshift(exp(1j*self.alpha))
116
+ self.e2 = fftshift(exp(2j*self.alpha))
117
+
118
+ self.f = self.startGuess(data)
119
+
120
+ self.f = self.f.ravel()
121
+ data = data.ravel()
122
+
123
+ fdef = zeros(self.f.shape, 'f')
124
+
125
+ S = zeros((size(self.f), 3), 'f')
126
+
127
+ #print type(S)
128
+ #print shape(S)
129
+
130
+ #type(Afunc)
131
+
132
+ nsrch = 2
133
+
134
+ for loopcount in range(num_iters):
135
+ pref = self.Lfunc(self.f - fdef);
136
+ self.res = data - self.Afunc(self.f);
137
+
138
+ #print type(Afunc(res))
139
+ #print shape(Afunc(res))
140
+
141
+ #print pref.typecode()
142
+
143
+ S[:,0] = cast['f'](self.Ahfunc(self.res))
144
+ S[:,1] = cast['f'](-self.Lhfunc(pref))
145
+
146
+ #print S
147
+
148
+ test = 1 - abs(dot(S[:,0], S[:,1])/(norm(S[:,0])*norm(S[:,1])))
149
+
150
+ print 'Test Statistic %f\n' % (test,)
151
+ self.tests.append(test)
152
+ self.ress.append(norm(self.res))
153
+ self.prefs.append(norm(pref))
154
+
155
+ (fnew, cpred, spred) = self.subsearch(self.f, self.res, fdef, self.Afunc, self.Lfunc, lamb, S[:, 0:nsrch])
156
+
157
+ fnew = cast['f'](fnew*(fnew > 0))
158
+
159
+ S[:,2] = cast['f'](fnew - self.f)
160
+ nsrch = 3
161
+
162
+ self.f = fnew
163
+
164
+ return real(self.f)
165
+
166
+ def Lfunc(self, f):
167
+ fs = reshape(f, (self.height, self.width, self.depth))
168
+ a = -6*fs
169
+
170
+ a[:,:,0:-1] += fs[:,:,1:]
171
+ a[:,:,1:] += fs[:,:,0:-1]
172
+
173
+ a[:,0:-1,:] += fs[:,1:,:]
174
+ a[:,1:,:] += fs[:,0:-1,:]
175
+
176
+ a[0:-1,:,:] += fs[1:,:,:]
177
+ a[1:,:,:] += fs[0:-1,:,:]
178
+
179
+ return ravel(cast['f'](a))
180
+
181
+ Lhfunc=Lfunc
182
+
183
+ def Afunc(self, f):
184
+ fs = reshape(f, (self.height, self.width, self.depth))
185
+ #print fs.shape
186
+
187
+ ds = []
188
+
189
+ for i in range(self.nOrders):
190
+ F = (fftn((fs**(self.orders[i]))))
191
+ d = ifftshift(ifftn(F*self.H[i])).real
192
+ ds.append(d[:,:,:,None])
193
+
194
+ #print ravel(array(ds)).shape
195
+ return ravel(concatenate(ds, 3))
196
+
197
+ def Ahfunc(self, f):
198
+ #print f.shape
199
+ fs = reshape(f, (self.height, self.width, self.depth, self.nOrders))
200
+
201
+ ds = []
202
+
203
+ for i in range(self.nOrders):
204
+ F = fftn(fs[:,:,:,i])
205
+ d = ifftshift(ifftn(F*self.Ht[i])).real
206
+ #d = sign(d)*abs(d)**(1./self.orders[i])
207
+ d = d/abs(d).max()
208
+ #print d.shape
209
+ ds.append(d*self.weights[i])
210
+
211
+ return ravel(sum(ds, axis=0))