phasor-handler 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- phasor_handler/__init__.py +9 -0
- phasor_handler/app.py +249 -0
- phasor_handler/img/icons/chevron-down.svg +3 -0
- phasor_handler/img/icons/chevron-up.svg +3 -0
- phasor_handler/img/logo.ico +0 -0
- phasor_handler/models/dir_manager.py +100 -0
- phasor_handler/scripts/contrast.py +131 -0
- phasor_handler/scripts/convert.py +155 -0
- phasor_handler/scripts/meta_reader.py +467 -0
- phasor_handler/scripts/plot.py +110 -0
- phasor_handler/scripts/register.py +86 -0
- phasor_handler/themes/__init__.py +8 -0
- phasor_handler/themes/dark_theme.py +330 -0
- phasor_handler/tools/__init__.py +1 -0
- phasor_handler/tools/check_stylesheet.py +15 -0
- phasor_handler/tools/misc.py +20 -0
- phasor_handler/widgets/__init__.py +5 -0
- phasor_handler/widgets/analysis/components/__init__.py +9 -0
- phasor_handler/widgets/analysis/components/bnc.py +426 -0
- phasor_handler/widgets/analysis/components/circle_roi.py +850 -0
- phasor_handler/widgets/analysis/components/image_view.py +667 -0
- phasor_handler/widgets/analysis/components/meta_info.py +481 -0
- phasor_handler/widgets/analysis/components/roi_list.py +659 -0
- phasor_handler/widgets/analysis/components/trace_plot.py +621 -0
- phasor_handler/widgets/analysis/view.py +1735 -0
- phasor_handler/widgets/conversion/view.py +83 -0
- phasor_handler/widgets/registration/view.py +110 -0
- phasor_handler/workers/__init__.py +2 -0
- phasor_handler/workers/analysis_worker.py +0 -0
- phasor_handler/workers/histogram_worker.py +55 -0
- phasor_handler/workers/registration_worker.py +242 -0
- phasor_handler-2.2.0.dist-info/METADATA +134 -0
- phasor_handler-2.2.0.dist-info/RECORD +37 -0
- phasor_handler-2.2.0.dist-info/WHEEL +5 -0
- phasor_handler-2.2.0.dist-info/entry_points.txt +5 -0
- phasor_handler-2.2.0.dist-info/licenses/LICENSE.md +21 -0
- phasor_handler-2.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,467 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
import numpy as np
|
|
3
|
+
import os
|
|
4
|
+
import xml.etree.ElementTree as ET
|
|
5
|
+
import re
|
|
6
|
+
import csv
|
|
7
|
+
import argparse
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
import pickle
|
|
10
|
+
import json
|
|
11
|
+
|
|
12
|
+
# -------------- INPUTS --------------
|
|
13
|
+
parser = argparse.ArgumentParser(description='Process a folder of .yaml files.')
|
|
14
|
+
parser.add_argument('-f', '--folder_path', required=True, type=str, help='Path to the folder containing .yaml files')
|
|
15
|
+
args = parser.parse_args()
|
|
16
|
+
|
|
17
|
+
folder_path = args.folder_path
|
|
18
|
+
|
|
19
|
+
# ------------- FUNCTIONS -------------
|
|
20
|
+
def open_overwrite(path, *args, **kwargs):
|
|
21
|
+
path = Path(path)
|
|
22
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
23
|
+
path.unlink(missing_ok=True)
|
|
24
|
+
return open(path, *args, **kwargs)
|
|
25
|
+
|
|
26
|
+
def load_classes(yaml_path):
|
|
27
|
+
classes = {}
|
|
28
|
+
with open(yaml_path, "r", encoding="utf-8") as f:
|
|
29
|
+
content = f.read()
|
|
30
|
+
|
|
31
|
+
# Split on "StartClass:", then re-add it to each chunk
|
|
32
|
+
chunks = ["StartClass:" + c for c in content.split("StartClass:") if c.strip()]
|
|
33
|
+
|
|
34
|
+
for chunk in chunks:
|
|
35
|
+
try:
|
|
36
|
+
data = yaml.safe_load(chunk)
|
|
37
|
+
|
|
38
|
+
if not isinstance(data, dict):
|
|
39
|
+
continue
|
|
40
|
+
|
|
41
|
+
class_data = data.get("StartClass", {})
|
|
42
|
+
class_name = class_data.get("ClassName")
|
|
43
|
+
if not class_name:
|
|
44
|
+
continue
|
|
45
|
+
|
|
46
|
+
# ✅ If class_name already exists, turn it into a list
|
|
47
|
+
if class_name in classes:
|
|
48
|
+
if isinstance(classes[class_name], list):
|
|
49
|
+
classes[class_name].append(class_data)
|
|
50
|
+
else:
|
|
51
|
+
classes[class_name] = [classes[class_name], class_data]
|
|
52
|
+
else:
|
|
53
|
+
classes[class_name] = class_data
|
|
54
|
+
|
|
55
|
+
except yaml.YAMLError as e:
|
|
56
|
+
print("YAML parse error in chunk:\n", chunk[:200], "...", e)
|
|
57
|
+
|
|
58
|
+
return classes
|
|
59
|
+
|
|
60
|
+
def get_organized_experiment_data(yaml_path):
|
|
61
|
+
with open(yaml_path, "r", encoding="utf-8") as f:
|
|
62
|
+
content = f.read()
|
|
63
|
+
|
|
64
|
+
chunks = content.strip().split("theTimepointIndex:")
|
|
65
|
+
|
|
66
|
+
# --- 1. Parse the Main Data Header ---
|
|
67
|
+
# The header is the very first part, before any time points.
|
|
68
|
+
header_chunk = chunks[0]
|
|
69
|
+
header_metadata = yaml.safe_load(header_chunk.split("EndClass:")[0] + "EndClass: CDataTableHeaderRecord70")
|
|
70
|
+
header = header_metadata.get("StartClass", {})
|
|
71
|
+
|
|
72
|
+
# --- 2. Parse the Initial ROI Definitions (from Timepoint 0) ---
|
|
73
|
+
# These are the ROIs defined before any stimulations.
|
|
74
|
+
initial_rois = {}
|
|
75
|
+
if len(chunks) > 1:
|
|
76
|
+
timepoint_zero_chunk = "theTimepointIndex:" + chunks[1]
|
|
77
|
+
sub_chunks = timepoint_zero_chunk.split("StartClass:")
|
|
78
|
+
|
|
79
|
+
annotations = []
|
|
80
|
+
for ann_chunk in sub_chunks[1:]:
|
|
81
|
+
ann_data = yaml.safe_load("StartClass:" + ann_chunk)
|
|
82
|
+
if ann_data and "StartClass" in ann_data:
|
|
83
|
+
annotations.append(ann_data["StartClass"])
|
|
84
|
+
|
|
85
|
+
# Organize the initial ROIs into a clean dictionary by their index
|
|
86
|
+
for i, ann in enumerate(annotations):
|
|
87
|
+
if ann.get("ClassName") == 'CCubeAnnotation70':
|
|
88
|
+
roi_id = ann['mRegionIndex']
|
|
89
|
+
if i + 1 < len(annotations):
|
|
90
|
+
initial_rois[roi_id] = annotations[i + 1]
|
|
91
|
+
|
|
92
|
+
# --- 3. Parse and Filter for Stimulation Events (Timepoint 1 onwards) ---
|
|
93
|
+
stimulation_events = []
|
|
94
|
+
# The loop now correctly starts from the third chunk (index 2), skipping the header and timepoint 0.
|
|
95
|
+
for chunk in chunks[2:]:
|
|
96
|
+
full_chunk = "theTimepointIndex:" + chunk
|
|
97
|
+
try:
|
|
98
|
+
sub_chunks = full_chunk.split("StartClass:")
|
|
99
|
+
metadata_part = sub_chunks[0]
|
|
100
|
+
timepoint_metadata = yaml.safe_load(metadata_part)
|
|
101
|
+
|
|
102
|
+
annotations = []
|
|
103
|
+
for ann_chunk in sub_chunks[1:]:
|
|
104
|
+
ann_data = yaml.safe_load("StartClass:" + ann_chunk)
|
|
105
|
+
if ann_data and "StartClass" in ann_data:
|
|
106
|
+
annotations.append(ann_data["StartClass"])
|
|
107
|
+
timepoint_metadata["annotations"] = annotations
|
|
108
|
+
|
|
109
|
+
# Now, check if this timepoint is a stimulation event
|
|
110
|
+
frap_annotation = next((ann for ann in annotations if ann.get("ClassName") == "CFRAPRegionAnnotation70"), None)
|
|
111
|
+
|
|
112
|
+
if frap_annotation:
|
|
113
|
+
stimulation_events.append({
|
|
114
|
+
"timepoint_index": timepoint_metadata["theTimepointIndex"],
|
|
115
|
+
"stimulation_data": frap_annotation,
|
|
116
|
+
"roi_annotations": [ann for ann in annotations if ann.get("ClassName") != "CFRAPRegionAnnotation70"]
|
|
117
|
+
})
|
|
118
|
+
except yaml.YAMLError as e:
|
|
119
|
+
print(f"Skipping problematic chunk due to YAML error: {e}")
|
|
120
|
+
continue
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"header": header,
|
|
124
|
+
"initial_rois": initial_rois,
|
|
125
|
+
"stimulation_events": stimulation_events
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
def parse_stimulation_xml(xml_string):
|
|
129
|
+
"""
|
|
130
|
+
Cleans and parses the escaped XML string from the annotation file.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
xml_string (str): The XML data string with escaped characters.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
dict: A dictionary containing the extracted metadata, or None if parsing fails.
|
|
137
|
+
"""
|
|
138
|
+
# --- 1. Clean the XML String ---
|
|
139
|
+
# This replacement map handles the specific escaped characters.
|
|
140
|
+
replacements = {
|
|
141
|
+
'_#60;': '<',
|
|
142
|
+
'_#62;': '>',
|
|
143
|
+
'_#34;': '"',
|
|
144
|
+
'_#32;': ' ',
|
|
145
|
+
'_#10;': '\n',
|
|
146
|
+
'_#58;': ':',
|
|
147
|
+
'_#91;': '[',
|
|
148
|
+
'_#93;': ']'
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
for old, new in replacements.items():
|
|
152
|
+
xml_string = xml_string.replace(old, new)
|
|
153
|
+
|
|
154
|
+
# --- 2. Parse the Cleaned XML and Extract Data ---
|
|
155
|
+
try:
|
|
156
|
+
root = ET.fromstring(xml_string)
|
|
157
|
+
|
|
158
|
+
# Find the <Description> tag, which contains the most useful info
|
|
159
|
+
description_tag = root.find('.//Description')
|
|
160
|
+
if description_tag is None:
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
# Extract the full description attribute
|
|
164
|
+
description_text = description_tag.get('Description', '')
|
|
165
|
+
|
|
166
|
+
# --- 3. Extract Key Values from the Description Text ---
|
|
167
|
+
# Use regular expressions to find the timepoint and ROI list
|
|
168
|
+
timepoint_match = re.search(r'timepoint: (\d+)', description_text)
|
|
169
|
+
roi_match = re.search(r'ROI: ([\d\s]+)power', description_text)
|
|
170
|
+
|
|
171
|
+
timepoint = int(timepoint_match.group(1)) if timepoint_match else None
|
|
172
|
+
|
|
173
|
+
rois = []
|
|
174
|
+
if roi_match:
|
|
175
|
+
# Split the string of numbers and convert each to an integer
|
|
176
|
+
rois = [int(n) for n in roi_match.group(1).strip().split()]
|
|
177
|
+
|
|
178
|
+
return {
|
|
179
|
+
'device_name': root.find('.//Device').get('LongName'),
|
|
180
|
+
'duration_ms': int(root.find('.//Duration').get('Time')),
|
|
181
|
+
'description_text': description_text,
|
|
182
|
+
'event_timepoint_ms': timepoint,
|
|
183
|
+
'stimulated_rois': rois
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
except ET.ParseError as e:
|
|
187
|
+
print(f"Error parsing XML: {e}")
|
|
188
|
+
return None
|
|
189
|
+
|
|
190
|
+
def extract_roi_info(events):
|
|
191
|
+
all_events_roi_data = []
|
|
192
|
+
|
|
193
|
+
for event in events:
|
|
194
|
+
event_info = {
|
|
195
|
+
"timepoint_index": event['timepoint_index'],
|
|
196
|
+
"rois": []
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
roi_annotations = event['roi_annotations']
|
|
200
|
+
organized_rois = {}
|
|
201
|
+
|
|
202
|
+
for i, ann in enumerate(roi_annotations):
|
|
203
|
+
if ann.get('ClassName') == 'CCubeAnnotation70':
|
|
204
|
+
roi_id = ann.get('mRegionIndex')
|
|
205
|
+
if i + 1 < len(roi_annotations):
|
|
206
|
+
organized_rois[roi_id] = roi_annotations[i + 1]
|
|
207
|
+
|
|
208
|
+
for roi_id, roi_data in organized_rois.items():
|
|
209
|
+
# Get the pixel coordinates from StructArrayValues
|
|
210
|
+
coords = roi_data.get('StructArrayValues', [])
|
|
211
|
+
|
|
212
|
+
# Format the coordinates into two (X, Y) points
|
|
213
|
+
point1 = (coords[0], coords[1], coords[2]) if len(coords) >= 3 else None
|
|
214
|
+
point2 = (coords[3], coords[4], coords[5]) if len(coords) >= 6 else None
|
|
215
|
+
|
|
216
|
+
event_info["rois"].append({
|
|
217
|
+
"roi_index": roi_id,
|
|
218
|
+
"target_power": roi_data.get('mTargetPower'),
|
|
219
|
+
"corner_1_xyz": point1,
|
|
220
|
+
"corner_2_xyz": point2
|
|
221
|
+
})
|
|
222
|
+
|
|
223
|
+
all_events_roi_data.append(event_info)
|
|
224
|
+
return all_events_roi_data
|
|
225
|
+
|
|
226
|
+
# Read all the files in the folder using yaml
|
|
227
|
+
|
|
228
|
+
data = {}
|
|
229
|
+
for filename in os.listdir(folder_path):
|
|
230
|
+
if filename.endswith(".yaml"):
|
|
231
|
+
if filename in ["ImageRecord.yaml", "ChannelRecord.yaml"]:
|
|
232
|
+
with open(os.path.join(folder_path, filename), 'r') as file:
|
|
233
|
+
try:
|
|
234
|
+
classes = load_classes(os.path.join(folder_path, filename))
|
|
235
|
+
data[filename] = classes
|
|
236
|
+
except yaml.YAMLError as e:
|
|
237
|
+
print(f"Error reading {filename}: {e}")
|
|
238
|
+
elif filename == "AnnotationRecord.yaml":
|
|
239
|
+
try:
|
|
240
|
+
content = get_organized_experiment_data(os.path.join(folder_path, filename))
|
|
241
|
+
data[filename] = content
|
|
242
|
+
except yaml.YAMLError as e:
|
|
243
|
+
print(f"Error reading {filename}: {e}")
|
|
244
|
+
else:
|
|
245
|
+
with open(os.path.join(folder_path, filename), 'r') as file:
|
|
246
|
+
try:
|
|
247
|
+
content = yaml.safe_load(file)
|
|
248
|
+
data[filename] = content
|
|
249
|
+
except yaml.YAMLError as e:
|
|
250
|
+
print(f"Error reading {filename}: {e}")
|
|
251
|
+
|
|
252
|
+
# Extract date/time values with error handling
|
|
253
|
+
try:
|
|
254
|
+
day = data["ImageRecord.yaml"]["CImageRecord70"]["mDay"]
|
|
255
|
+
except (KeyError, TypeError):
|
|
256
|
+
day = "NA"
|
|
257
|
+
|
|
258
|
+
try:
|
|
259
|
+
month = data["ImageRecord.yaml"]["CImageRecord70"]["mMonth"]
|
|
260
|
+
except (KeyError, TypeError):
|
|
261
|
+
month = "NA"
|
|
262
|
+
|
|
263
|
+
try:
|
|
264
|
+
year = data["ImageRecord.yaml"]["CImageRecord70"]["mYear"]
|
|
265
|
+
except (KeyError, TypeError):
|
|
266
|
+
year = "NA"
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
hour = data["ImageRecord.yaml"]["CImageRecord70"]["mHour"]
|
|
270
|
+
except (KeyError, TypeError):
|
|
271
|
+
hour = "NA"
|
|
272
|
+
|
|
273
|
+
try:
|
|
274
|
+
minute = data["ImageRecord.yaml"]["CImageRecord70"]["mMinute"]
|
|
275
|
+
except (KeyError, TypeError):
|
|
276
|
+
minute = "NA"
|
|
277
|
+
|
|
278
|
+
try:
|
|
279
|
+
second = data["ImageRecord.yaml"]["CImageRecord70"]["mSecond"]
|
|
280
|
+
except (KeyError, TypeError):
|
|
281
|
+
second = "NA"
|
|
282
|
+
|
|
283
|
+
# Helper function to safely extract values
|
|
284
|
+
def safe_extract(func, default="NA"):
|
|
285
|
+
"""Safely execute a function and return default if it fails."""
|
|
286
|
+
try:
|
|
287
|
+
result = func()
|
|
288
|
+
return result if result is not None else default
|
|
289
|
+
except (KeyError, TypeError, IndexError, AttributeError, ValueError):
|
|
290
|
+
return default
|
|
291
|
+
|
|
292
|
+
variables = {
|
|
293
|
+
"device_name": safe_extract(lambda: parse_stimulation_xml(data["AnnotationRecord.yaml"]["stimulation_events"][0]["stimulation_data"]["mXML"])["device_name"] if data["AnnotationRecord.yaml"]["stimulation_events"] else "NA"),
|
|
294
|
+
"n_frames": safe_extract(lambda: data["ElapsedTimes.yaml"]["theElapsedTimes"][0]),
|
|
295
|
+
"pixel_size": safe_extract(lambda: data["ImageRecord.yaml"]["CLensDef70"]["mMicronPerPixel"]),
|
|
296
|
+
"height": safe_extract(lambda: data["ImageRecord.yaml"]["CImageRecord70"]["mHeight"]),
|
|
297
|
+
"width": safe_extract(lambda: data["ImageRecord.yaml"]["CImageRecord70"]["mWidth"]),
|
|
298
|
+
"FOV_size": safe_extract(lambda: f"{data['ImageRecord.yaml']['CLensDef70']['mMicronPerPixel'] * data['ImageRecord.yaml']['CImageRecord70']['mHeight']} x {data['ImageRecord.yaml']['CLensDef70']['mMicronPerPixel'] * data['ImageRecord.yaml']['CImageRecord70']['mWidth']} microns"),
|
|
299
|
+
"Elapsed_time_offset": safe_extract(lambda: data["ImageRecord.yaml"]["CImageRecord70"]["mElapsedTimeOffset"]),
|
|
300
|
+
"green_channel": safe_extract(lambda: data["ImageRecord.yaml"]["CMainViewRecord70"]["mGreenChannel"]),
|
|
301
|
+
"red_channel": safe_extract(lambda: data["ImageRecord.yaml"]["CMainViewRecord70"]["mRedChannel"]),
|
|
302
|
+
"blue_channel": safe_extract(lambda: data["ImageRecord.yaml"]["CMainViewRecord70"]["mBlueChannel"]),
|
|
303
|
+
"X_start_position": safe_extract(lambda: data["ChannelRecord.yaml"]["CExposureRecord70"][0]["mXStartPosition"]),
|
|
304
|
+
"Y_start_position": safe_extract(lambda: data["ChannelRecord.yaml"]["CExposureRecord70"][0]["mYStartPosition"]),
|
|
305
|
+
"Z_start_position": safe_extract(lambda: data["ChannelRecord.yaml"]["CExposureRecord70"][0]["mZStartPosition"]),
|
|
306
|
+
"day": day,
|
|
307
|
+
"month": month,
|
|
308
|
+
"year": year,
|
|
309
|
+
"hour": hour,
|
|
310
|
+
"minute": minute,
|
|
311
|
+
"second": second,
|
|
312
|
+
"stimulation_events": safe_extract(lambda: len([x["timepoint_index"] for x in data["AnnotationRecord.yaml"]["stimulation_events"]]), 0),
|
|
313
|
+
"repetitions": safe_extract(lambda: [
|
|
314
|
+
int(re.search(r"(\d+)\s+repetition", parse_stimulation_xml(event["stimulation_data"]["mXML"])["description_text"]).group(1))
|
|
315
|
+
if parse_stimulation_xml(event["stimulation_data"]["mXML"]) and
|
|
316
|
+
re.search(r"(\d+)\s+repetition", parse_stimulation_xml(event["stimulation_data"]["mXML"])["description_text"])
|
|
317
|
+
else "NA"
|
|
318
|
+
for event in data["AnnotationRecord.yaml"]["stimulation_events"]
|
|
319
|
+
], []),
|
|
320
|
+
"duty_cycle": safe_extract(lambda: [
|
|
321
|
+
re.search(r"user defined analog:\s+(.*?)\s+1 repetition", parse_stimulation_xml(event["stimulation_data"]["mXML"])["description_text"]).group(1)
|
|
322
|
+
if parse_stimulation_xml(event["stimulation_data"]["mXML"]) and
|
|
323
|
+
re.search(r"user defined analog:\s+(.*?)\s+1 repetition", parse_stimulation_xml(event["stimulation_data"]["mXML"])["description_text"])
|
|
324
|
+
else "NA"
|
|
325
|
+
for event in data["AnnotationRecord.yaml"]["stimulation_events"]
|
|
326
|
+
], []),
|
|
327
|
+
"stimulation_timeframes": safe_extract(lambda: [x["timepoint_index"] for x in data["AnnotationRecord.yaml"]["stimulation_events"]], []),
|
|
328
|
+
"stimulation_ms": safe_extract(lambda: [parse_stimulation_xml(x["stimulation_data"]["mXML"])["event_timepoint_ms"] for x in data["AnnotationRecord.yaml"]["stimulation_events"]], []),
|
|
329
|
+
"duration_ms": safe_extract(lambda: [parse_stimulation_xml(x["stimulation_data"]["mXML"])["duration_ms"] for x in data["AnnotationRecord.yaml"]["stimulation_events"]], []),
|
|
330
|
+
"stimulated_rois": safe_extract(lambda: [parse_stimulation_xml(x["stimulation_data"]["mXML"])["stimulated_rois"] for x in data["AnnotationRecord.yaml"]["stimulation_events"]], []),
|
|
331
|
+
"stimulated_roi_powers": safe_extract(lambda: [
|
|
332
|
+
[(x["roi_index"], x["target_power"]) for x in ev["rois"]] for ev in extract_roi_info(data["AnnotationRecord.yaml"]["stimulation_events"])
|
|
333
|
+
], []),
|
|
334
|
+
"stimulated_roi_location": safe_extract(lambda: [
|
|
335
|
+
[(x["roi_index"], x["corner_1_xyz"], x["corner_2_xyz"]) for x in ev["rois"]] for ev in extract_roi_info(data["AnnotationRecord.yaml"]["stimulation_events"])
|
|
336
|
+
], []),
|
|
337
|
+
"time_stamps": safe_extract(lambda: data["ElapsedTimes.yaml"]["theElapsedTimes"][1:], []),
|
|
338
|
+
"initial_roi_powers": safe_extract(lambda: [
|
|
339
|
+
(roi_id, roi_data.get('mTargetPower')) for roi_id, roi_data in data["AnnotationRecord.yaml"]["initial_rois"].items()
|
|
340
|
+
], []),
|
|
341
|
+
"initial_roi_location": safe_extract(lambda: [
|
|
342
|
+
(roi_id,
|
|
343
|
+
tuple(roi_data.get('StructArrayValues', [])[0:3]),
|
|
344
|
+
tuple(roi_data.get('StructArrayValues', [])[3:6]))
|
|
345
|
+
for roi_id, roi_data in data["AnnotationRecord.yaml"]["initial_rois"].items()
|
|
346
|
+
], [])
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
# Validate that list lengths match stimulation_events count
|
|
350
|
+
if variables['stimulation_events'] > 0:
|
|
351
|
+
list_vars = ['stimulation_timeframes', 'stimulation_ms', 'duration_ms',
|
|
352
|
+
'repetitions', 'duty_cycle', 'stimulated_rois',
|
|
353
|
+
'stimulated_roi_powers', 'stimulated_roi_location']
|
|
354
|
+
|
|
355
|
+
for var_name in list_vars:
|
|
356
|
+
var_value = variables[var_name]
|
|
357
|
+
if isinstance(var_value, list) and len(var_value) != variables['stimulation_events']:
|
|
358
|
+
print(f"⚠️ Warning: {var_name} has {len(var_value)} entries but {variables['stimulation_events']} events expected.")
|
|
359
|
+
|
|
360
|
+
print("Saving experiment_summary.csv...")
|
|
361
|
+
with open_overwrite(Path(folder_path) / 'experiment_summary.csv', 'w', newline='', encoding='utf-8') as f:
|
|
362
|
+
writer = csv.writer(f)
|
|
363
|
+
writer.writerow(['Parameter', 'Value']) # Write header
|
|
364
|
+
|
|
365
|
+
# Loop through the dictionary and save single-value items
|
|
366
|
+
for key, value in variables.items():
|
|
367
|
+
if not isinstance(value, list) and not isinstance(value, np.ndarray):
|
|
368
|
+
writer.writerow([key, value])
|
|
369
|
+
|
|
370
|
+
print("Saving stimulation_events.csv...")
|
|
371
|
+
# Define the headers for our events file
|
|
372
|
+
event_headers = [
|
|
373
|
+
'event_index',
|
|
374
|
+
'timepoint_frame',
|
|
375
|
+
'timepoint_ms',
|
|
376
|
+
'duration_ms',
|
|
377
|
+
'repetitions',
|
|
378
|
+
'duty_cycle',
|
|
379
|
+
'stimulated_rois'
|
|
380
|
+
]
|
|
381
|
+
|
|
382
|
+
with open_overwrite(Path(folder_path) / 'stimulation_events.csv', 'w', newline='', encoding='utf-8') as f:
|
|
383
|
+
writer = csv.DictWriter(f, fieldnames=event_headers)
|
|
384
|
+
writer.writeheader()
|
|
385
|
+
|
|
386
|
+
# Loop through each event to create a row
|
|
387
|
+
for i in range(variables['stimulation_events']):
|
|
388
|
+
# Helper function to safely get list item or return 'NA'
|
|
389
|
+
def safe_list_get(lst, index, default='NA'):
|
|
390
|
+
try:
|
|
391
|
+
return lst[index] if index < len(lst) else default
|
|
392
|
+
except (IndexError, TypeError):
|
|
393
|
+
return default
|
|
394
|
+
|
|
395
|
+
writer.writerow({
|
|
396
|
+
'event_index': i + 1,
|
|
397
|
+
'timepoint_frame': safe_list_get(variables['stimulation_timeframes'], i),
|
|
398
|
+
'timepoint_ms': safe_list_get(variables['stimulation_ms'], i),
|
|
399
|
+
'duration_ms': safe_list_get(variables['duration_ms'], i),
|
|
400
|
+
'repetitions': safe_list_get(variables['repetitions'], i),
|
|
401
|
+
'duty_cycle': safe_list_get(variables['duty_cycle'], i),
|
|
402
|
+
'stimulated_rois': safe_list_get(variables['stimulated_rois'], i)
|
|
403
|
+
})
|
|
404
|
+
|
|
405
|
+
# --- 3. Save the Detailed ROI Data ---
|
|
406
|
+
print("Saving roi_details.csv...")
|
|
407
|
+
roi_headers = [
|
|
408
|
+
'event_index',
|
|
409
|
+
'roi_index',
|
|
410
|
+
'target_power',
|
|
411
|
+
'corner_1_x',
|
|
412
|
+
'corner_1_y',
|
|
413
|
+
'corner_1_z',
|
|
414
|
+
'corner_2_x',
|
|
415
|
+
'corner_2_y',
|
|
416
|
+
'corner_2_z'
|
|
417
|
+
]
|
|
418
|
+
|
|
419
|
+
with open_overwrite(Path(folder_path) / 'roi_details.csv', 'w', newline='', encoding='utf-8') as f:
|
|
420
|
+
writer = csv.DictWriter(f, fieldnames=roi_headers)
|
|
421
|
+
writer.writeheader()
|
|
422
|
+
|
|
423
|
+
# Nested loop: outer loop for events, inner loop for ROIs within that event
|
|
424
|
+
for i in range(variables['stimulation_events']):
|
|
425
|
+
# Safely get the power and location data for the current event
|
|
426
|
+
try:
|
|
427
|
+
powers = variables['stimulated_roi_powers'][i] if i < len(variables['stimulated_roi_powers']) else []
|
|
428
|
+
locations = variables['stimulated_roi_location'][i] if i < len(variables['stimulated_roi_location']) else []
|
|
429
|
+
except (IndexError, TypeError):
|
|
430
|
+
print(f"Warning: Missing ROI data for event {i+1}, skipping...")
|
|
431
|
+
continue
|
|
432
|
+
|
|
433
|
+
# Create a dictionary for easy lookup: {roi_index: (p1, p2)}
|
|
434
|
+
locations_dict = {loc[0]: (loc[1], loc[2]) for loc in locations}
|
|
435
|
+
|
|
436
|
+
# Loop through the list of tuples and unpack them directly
|
|
437
|
+
for roi_id, power in powers:
|
|
438
|
+
corner1, corner2 = locations_dict.get(roi_id, ((None, None), (None, None)))
|
|
439
|
+
writer.writerow({
|
|
440
|
+
'event_index': i + 1,
|
|
441
|
+
'roi_index': roi_id,
|
|
442
|
+
'target_power': power,
|
|
443
|
+
'corner_1_x': corner1[0] if corner1 else np.nan,
|
|
444
|
+
'corner_1_y': corner1[1] if corner1 else np.nan,
|
|
445
|
+
'corner_1_z': corner1[2] if corner1 else np.nan,
|
|
446
|
+
'corner_2_x': corner2[0] if corner2 else np.nan,
|
|
447
|
+
'corner_2_y': corner2[1] if corner2 else np.nan,
|
|
448
|
+
'corner_2_z': corner2[2] if corner2 else np.nan
|
|
449
|
+
})
|
|
450
|
+
|
|
451
|
+
with open(Path(folder_path) / 'experiment_summary.pkl', 'wb') as f:
|
|
452
|
+
# 'wb' is used for writing in binary mode
|
|
453
|
+
pickle.dump(variables, f)
|
|
454
|
+
|
|
455
|
+
# Output a json file
|
|
456
|
+
with open_overwrite(Path(folder_path) / 'experiment_summary.json', 'w', encoding='utf-8') as f:
|
|
457
|
+
json.dump(variables, f, indent=4)
|
|
458
|
+
|
|
459
|
+
print(f"JSON file and Pickle file saved to {folder_path}")
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
if variables["Elapsed_time_offset"] != "NA" and variables["Elapsed_time_offset"] != 0:
|
|
463
|
+
print(f"⚠️ Warning: Elapsed time offset is {variables['Elapsed_time_offset']} ms, not zero as expected.")
|
|
464
|
+
if variables["n_frames"] != "NA" and variables["time_stamps"] != "NA" and variables["n_frames"] != len(variables["time_stamps"]):
|
|
465
|
+
print(f"⚠️ Warning: Number of frames ({variables['n_frames']}) does not match length of time stamps ({len(variables['time_stamps'])}).")
|
|
466
|
+
|
|
467
|
+
print("\n[OK] Files saved successfully.")
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"""Simple plotting utility for toolbox `trace.txt` files.
|
|
2
|
+
|
|
3
|
+
Usage:
|
|
4
|
+
python plot.py input_trace.txt output.pdf
|
|
5
|
+
|
|
6
|
+
The script produces a multi-page PDF with:
|
|
7
|
+
- Page 1: heatmap of all ROI traces (frames x ROIs)
|
|
8
|
+
- Page 2: mean trace and standard deviation band
|
|
9
|
+
- Subsequent pages: individual ROI traces with basic stats
|
|
10
|
+
|
|
11
|
+
Dependencies: pandas, numpy, matplotlib
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import sys
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
import numpy as np
|
|
19
|
+
import pandas as pd
|
|
20
|
+
import matplotlib
|
|
21
|
+
matplotlib.use('Agg')
|
|
22
|
+
import matplotlib.pyplot as plt
|
|
23
|
+
from matplotlib.backends.backend_pdf import PdfPages
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def read_trace(path: Path) -> pd.DataFrame:
|
|
27
|
+
# trace.txt is a tab-delimited file with a header row
|
|
28
|
+
return pd.read_csv(path, sep='\t', index_col=0)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def make_overview_heatmap(df: pd.DataFrame, pdf: PdfPages):
|
|
32
|
+
# Select only columns that represent Trace_ROI* or numeric columns after frame
|
|
33
|
+
# Many generated files have columns like 'Trace_ROI1', try to select them.
|
|
34
|
+
trace_cols = [c for c in df.columns if 'Trace' in c or c.startswith('Trace_')]
|
|
35
|
+
if not trace_cols:
|
|
36
|
+
# fall back to every column except any that look like mean channels
|
|
37
|
+
trace_cols = [c for c in df.columns if 'Mean' not in c]
|
|
38
|
+
|
|
39
|
+
data = df[trace_cols].to_numpy()
|
|
40
|
+
|
|
41
|
+
fig, ax = plt.subplots(figsize=(8.5, 11))
|
|
42
|
+
im = ax.imshow(data.T, aspect='auto', cmap='viridis', interpolation='nearest')
|
|
43
|
+
ax.set_ylabel('ROI')
|
|
44
|
+
ax.set_xlabel('Frame')
|
|
45
|
+
ax.set_title('ROI traces (heatmap)')
|
|
46
|
+
fig.colorbar(im, ax=ax, orientation='vertical', label='Trace value')
|
|
47
|
+
pdf.savefig(fig)
|
|
48
|
+
plt.close(fig)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def make_summary_page(df: pd.DataFrame, pdf: PdfPages):
|
|
52
|
+
trace_cols = [c for c in df.columns if 'Trace' in c or c.startswith('Trace_')]
|
|
53
|
+
data = df[trace_cols]
|
|
54
|
+
mean = data.mean(axis=1)
|
|
55
|
+
std = data.std(axis=1)
|
|
56
|
+
|
|
57
|
+
fig, ax = plt.subplots(figsize=(8.5, 11))
|
|
58
|
+
ax.plot(df.index, mean, color='black', label='Mean')
|
|
59
|
+
ax.fill_between(df.index, mean-std, mean+std, color='gray', alpha=0.3, label='±1 std')
|
|
60
|
+
ax.set_xlabel('Frame')
|
|
61
|
+
ax.set_ylabel('Trace')
|
|
62
|
+
ax.set_title('Mean ROI trace ±1 std')
|
|
63
|
+
ax.legend()
|
|
64
|
+
pdf.savefig(fig)
|
|
65
|
+
plt.close(fig)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def make_individual_pages(df: pd.DataFrame, pdf: PdfPages, max_per_page: int = 6):
|
|
69
|
+
trace_cols = [c for c in df.columns if 'Trace' in c or c.startswith('Trace_')]
|
|
70
|
+
n = len(trace_cols)
|
|
71
|
+
frames = df.index
|
|
72
|
+
for i, col in enumerate(trace_cols):
|
|
73
|
+
fig, ax = plt.subplots(figsize=(8.5, 11))
|
|
74
|
+
ax.plot(frames, df[col], lw=0.8)
|
|
75
|
+
ax.set_title(f'ROI {i+1}: {col}')
|
|
76
|
+
ax.set_xlabel('Frame')
|
|
77
|
+
ax.set_ylabel('Trace')
|
|
78
|
+
# simple stats
|
|
79
|
+
ax.text(0.98, 0.02, f'mean={df[col].mean():.4f}\nstd={df[col].std():.4f}',
|
|
80
|
+
ha='right', va='bottom', transform=ax.transAxes, fontsize=8,
|
|
81
|
+
bbox=dict(facecolor='white', alpha=0.6, edgecolor='none'))
|
|
82
|
+
pdf.savefig(fig)
|
|
83
|
+
plt.close(fig)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def main(argv):
|
|
87
|
+
if len(argv) < 3:
|
|
88
|
+
print('Usage: python plot.py input_trace.txt output.pdf')
|
|
89
|
+
return 2
|
|
90
|
+
|
|
91
|
+
in_path = Path(argv[1])
|
|
92
|
+
out_path = Path(argv[2])
|
|
93
|
+
if not in_path.exists():
|
|
94
|
+
print(f'Input file {in_path} not found')
|
|
95
|
+
return 2
|
|
96
|
+
|
|
97
|
+
df = read_trace(in_path)
|
|
98
|
+
|
|
99
|
+
with PdfPages(out_path) as pdf:
|
|
100
|
+
make_overview_heatmap(df, pdf)
|
|
101
|
+
make_summary_page(df, pdf)
|
|
102
|
+
make_individual_pages(df, pdf)
|
|
103
|
+
|
|
104
|
+
print(f'Wrote {out_path}')
|
|
105
|
+
return 0
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
if __name__ == '__main__':
|
|
109
|
+
raise SystemExit(main(sys.argv))
|
|
110
|
+
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import suite2p
|
|
2
|
+
import argparse
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
parser = argparse.ArgumentParser(description="Suite2p registration runner (minimal)")
|
|
6
|
+
parser.add_argument("--movie", required=True, type=str, help="Path to the single TIF to process")
|
|
7
|
+
parser.add_argument("--outdir", type=str, default=None, help="(Optional) output folder. Default: <movie_dir>/<movie_stem>_regscan")
|
|
8
|
+
parser.add_argument("--param", action="append", default=[], help="Suite2p parameter as key=value (repeat for multiple)")
|
|
9
|
+
args = parser.parse_args()
|
|
10
|
+
|
|
11
|
+
def parse_param_list(param_list):
|
|
12
|
+
d = {}
|
|
13
|
+
for p in param_list:
|
|
14
|
+
if "=" not in p:
|
|
15
|
+
continue
|
|
16
|
+
k, v = p.split("=", 1)
|
|
17
|
+
# Map GUI parameter names to suite2p names
|
|
18
|
+
if k == "n_channels":
|
|
19
|
+
k = "nchannels"
|
|
20
|
+
try:
|
|
21
|
+
if v.startswith("[") and v.endswith("]"):
|
|
22
|
+
d[k] = eval(v)
|
|
23
|
+
elif "." in v:
|
|
24
|
+
d[k] = float(v)
|
|
25
|
+
else:
|
|
26
|
+
d[k] = int(v)
|
|
27
|
+
except Exception:
|
|
28
|
+
d[k] = v
|
|
29
|
+
return d
|
|
30
|
+
|
|
31
|
+
param_dict = parse_param_list(args.param)
|
|
32
|
+
|
|
33
|
+
movie = Path(args.movie).expanduser().resolve()
|
|
34
|
+
root_out = (Path(args.outdir).expanduser().resolve()
|
|
35
|
+
if args.outdir is not None
|
|
36
|
+
else movie.parent)
|
|
37
|
+
root_out.mkdir(exist_ok=True, parents=True)
|
|
38
|
+
|
|
39
|
+
ops = suite2p.default_ops()
|
|
40
|
+
|
|
41
|
+
# Set reasonable defaults (will be overridden by GUI params)
|
|
42
|
+
default_ops = {
|
|
43
|
+
"nplanes": 1,
|
|
44
|
+
"nchannels": 2, # Default to 2 channels
|
|
45
|
+
"functional_chan": 1,
|
|
46
|
+
"fs": 10.535,
|
|
47
|
+
"tau": 0.7,
|
|
48
|
+
"align_by_chan": 2,
|
|
49
|
+
"do_registration": 1,
|
|
50
|
+
"reg_tif": True,
|
|
51
|
+
"reg_tif_chan2": True if param_dict.get("n_channels", 2) >= 2 else False,
|
|
52
|
+
"keep_movie_raw": True,
|
|
53
|
+
"data_path": [str(movie.parent)],
|
|
54
|
+
"save_path0": str(root_out),
|
|
55
|
+
"sparse_mode": True,
|
|
56
|
+
"spatial_scale": 0,
|
|
57
|
+
"anatomical_only": 1,
|
|
58
|
+
"threshold_scaling": 0.5,
|
|
59
|
+
"soma_crop": True,
|
|
60
|
+
"neuropil_extract": True
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
ops.update(default_ops)
|
|
64
|
+
ops.update(param_dict) # GUI params override defaults
|
|
65
|
+
|
|
66
|
+
# Set channel-dependent parameters based on nchannels
|
|
67
|
+
n_channels = ops.get("nchannels", 1)
|
|
68
|
+
if n_channels >= 2:
|
|
69
|
+
ops["align_by_chan"] = ops.get("align_by_chan", 2)
|
|
70
|
+
ops["reg_tif_chan2"] = True
|
|
71
|
+
ops["1Preg"] = ops.get("1Preg", 1)
|
|
72
|
+
else:
|
|
73
|
+
# For single channel, don't try to register/save channel 2
|
|
74
|
+
ops["align_by_chan"] = 1
|
|
75
|
+
ops["reg_tif_chan2"] = False
|
|
76
|
+
ops["1Preg"] = 0
|
|
77
|
+
|
|
78
|
+
db = {
|
|
79
|
+
"data_path": [str(movie.parent)],
|
|
80
|
+
"tiff_list": [movie.name],
|
|
81
|
+
"save_path0": str(root_out),
|
|
82
|
+
"fast_disk": str(root_out),
|
|
83
|
+
"subfolders": [],
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
suite2p.run_s2p(ops=ops, db=db)
|