fibphot 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fibphot/__init__.py +6 -0
- fibphot/analysis/__init__.py +0 -0
- fibphot/analysis/aggregate.py +257 -0
- fibphot/analysis/auc.py +354 -0
- fibphot/analysis/irls.py +350 -0
- fibphot/analysis/peaks.py +1163 -0
- fibphot/analysis/photobleaching.py +290 -0
- fibphot/analysis/plotting.py +105 -0
- fibphot/analysis/report.py +56 -0
- fibphot/collection.py +207 -0
- fibphot/fit/__init__.py +0 -0
- fibphot/fit/regression.py +269 -0
- fibphot/io/__init__.py +6 -0
- fibphot/io/doric.py +435 -0
- fibphot/io/excel.py +76 -0
- fibphot/io/h5.py +321 -0
- fibphot/misc.py +11 -0
- fibphot/peaks.py +628 -0
- fibphot/pipeline.py +14 -0
- fibphot/plotting.py +594 -0
- fibphot/stages/__init__.py +22 -0
- fibphot/stages/base.py +101 -0
- fibphot/stages/baseline.py +354 -0
- fibphot/stages/control_dff.py +214 -0
- fibphot/stages/filters.py +273 -0
- fibphot/stages/normalisation.py +260 -0
- fibphot/stages/regression.py +139 -0
- fibphot/stages/smooth.py +442 -0
- fibphot/stages/trim.py +141 -0
- fibphot/state.py +309 -0
- fibphot/tags.py +130 -0
- fibphot/types.py +6 -0
- fibphot-0.1.0.dist-info/METADATA +63 -0
- fibphot-0.1.0.dist-info/RECORD +37 -0
- fibphot-0.1.0.dist-info/WHEEL +5 -0
- fibphot-0.1.0.dist-info/licenses/LICENSE.md +21 -0
- fibphot-0.1.0.dist-info/top_level.txt +1 -0
fibphot/state.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field, replace
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from .types import FloatArray
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass(frozen=True, slots=True)
|
|
13
|
+
class StageRecord:
|
|
14
|
+
"""Concise record of an applied preprocessing stage."""
|
|
15
|
+
|
|
16
|
+
stage_id: str
|
|
17
|
+
name: str
|
|
18
|
+
params: dict[str, Any] = field(default_factory=dict)
|
|
19
|
+
metrics: dict[str, float] = field(default_factory=dict)
|
|
20
|
+
notes: str | None = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(frozen=True, slots=True)
|
|
24
|
+
class PhotometryState:
|
|
25
|
+
"""
|
|
26
|
+
Immutable photometry data state.
|
|
27
|
+
|
|
28
|
+
signals: stacked 2D array with shape (n_signals, n_samples).
|
|
29
|
+
history: stacked 3D array with shape (h, n_signals, n_samples),
|
|
30
|
+
storing previous *signals* snapshots only.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
time_seconds: FloatArray
|
|
34
|
+
signals: FloatArray
|
|
35
|
+
channel_names: tuple[str, ...]
|
|
36
|
+
history: FloatArray = field(
|
|
37
|
+
default_factory=lambda: np.empty((0, 0, 0), dtype=float)
|
|
38
|
+
)
|
|
39
|
+
summary: tuple[StageRecord, ...] = ()
|
|
40
|
+
derived: dict[str, FloatArray] = field(default_factory=dict)
|
|
41
|
+
results: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
42
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
43
|
+
|
|
44
|
+
_name_to_index: dict[str, int] = field(init=False, repr=False)
|
|
45
|
+
|
|
46
|
+
def __post_init__(self) -> None:
|
|
47
|
+
t = np.asarray(self.time_seconds, dtype=float)
|
|
48
|
+
s = np.asarray(self.signals, dtype=float)
|
|
49
|
+
|
|
50
|
+
if t.ndim != 1:
|
|
51
|
+
raise ValueError("time_seconds must be 1D.")
|
|
52
|
+
if s.ndim != 2:
|
|
53
|
+
raise ValueError(
|
|
54
|
+
"signals must be 2D with shape (n_signals, n_samples)."
|
|
55
|
+
)
|
|
56
|
+
if s.shape[1] != t.shape[0]:
|
|
57
|
+
raise ValueError(
|
|
58
|
+
"signals second dimension must match time length: "
|
|
59
|
+
f"{s.shape[1]} != {t.shape[0]}"
|
|
60
|
+
)
|
|
61
|
+
if len(self.channel_names) != s.shape[0]:
|
|
62
|
+
raise ValueError(
|
|
63
|
+
"channel_names length must match signals first dimension: "
|
|
64
|
+
f"{len(self.channel_names)} != {s.shape[0]}"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# normalise channel names to lower case
|
|
68
|
+
names = tuple(str(n).lower() for n in self.channel_names)
|
|
69
|
+
name_to_idx = {n: i for i, n in enumerate(names)}
|
|
70
|
+
|
|
71
|
+
# history normalisation
|
|
72
|
+
h = np.asarray(self.history, dtype=float)
|
|
73
|
+
if h.size == 0:
|
|
74
|
+
h = np.empty((0, s.shape[0], s.shape[1]), dtype=float)
|
|
75
|
+
elif h.ndim != 3 or h.shape[1:] != s.shape:
|
|
76
|
+
raise ValueError(
|
|
77
|
+
"history must be 3D with shape (h, n_signals, n_samples) "
|
|
78
|
+
f"matching signals; got {h.shape}, expected "
|
|
79
|
+
f"(*, {s.shape[0]}, {s.shape[1]})."
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
object.__setattr__(self, "time_seconds", t)
|
|
83
|
+
object.__setattr__(self, "signals", s)
|
|
84
|
+
object.__setattr__(self, "channel_names", names)
|
|
85
|
+
object.__setattr__(self, "history", h)
|
|
86
|
+
object.__setattr__(self, "_name_to_index", name_to_idx)
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def n_samples(self) -> int:
|
|
90
|
+
return int(self.time_seconds.shape[0])
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def n_signals(self) -> int:
|
|
94
|
+
return int(self.signals.shape[0])
|
|
95
|
+
|
|
96
|
+
@property
|
|
97
|
+
def sampling_rate(self) -> float:
|
|
98
|
+
dt = np.diff(self.time_seconds)
|
|
99
|
+
return float(1.0 / np.median(dt))
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def tags(self) -> dict[str, str]:
|
|
103
|
+
raw = self.metadata.get("tags", {})
|
|
104
|
+
if isinstance(raw, dict):
|
|
105
|
+
return {str(k): str(v) for k, v in raw.items()}
|
|
106
|
+
return {}
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def subject(self) -> str | None:
|
|
110
|
+
subj = self.metadata.get("subject")
|
|
111
|
+
if subj is not None:
|
|
112
|
+
return str(subj)
|
|
113
|
+
|
|
114
|
+
src = self.metadata.get("source_path")
|
|
115
|
+
if not src:
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
stem = Path(str(src)).stem
|
|
119
|
+
|
|
120
|
+
return stem.split("_", maxsplit=1)[0].lower() if stem else None
|
|
121
|
+
|
|
122
|
+
def idx(self, channel: str) -> int:
|
|
123
|
+
key = channel.lower()
|
|
124
|
+
if key not in self._name_to_index:
|
|
125
|
+
raise KeyError(
|
|
126
|
+
f"Unknown channel '{channel}'. Available: {self.channel_names}"
|
|
127
|
+
)
|
|
128
|
+
return self._name_to_index[key]
|
|
129
|
+
|
|
130
|
+
def channel(self, channel: str) -> FloatArray:
|
|
131
|
+
return self.signals[self.idx(channel)]
|
|
132
|
+
|
|
133
|
+
def tag(self, key: str, default: str | None = None) -> str | None:
|
|
134
|
+
return self.tags.get(key, default)
|
|
135
|
+
|
|
136
|
+
def with_channel(self, channel: str, values: FloatArray) -> PhotometryState:
|
|
137
|
+
v = np.asarray(values, dtype=float)
|
|
138
|
+
if v.shape != (self.n_samples,):
|
|
139
|
+
raise ValueError(
|
|
140
|
+
f"Channel replacement must have shape ({self.n_samples},), "
|
|
141
|
+
f"got {v.shape}."
|
|
142
|
+
)
|
|
143
|
+
i = self.idx(channel)
|
|
144
|
+
new_signals = self.signals.copy()
|
|
145
|
+
new_signals[i] = v
|
|
146
|
+
return replace(self, signals=new_signals)
|
|
147
|
+
|
|
148
|
+
def with_metadata(
|
|
149
|
+
self, updates: dict[str, Any] | None = None, **kwargs: Any
|
|
150
|
+
) -> PhotometryState:
|
|
151
|
+
patch: dict[str, Any] = {}
|
|
152
|
+
if updates:
|
|
153
|
+
patch.update(updates)
|
|
154
|
+
patch.update(kwargs)
|
|
155
|
+
|
|
156
|
+
new_meta = dict(self.metadata)
|
|
157
|
+
new_meta.update(patch)
|
|
158
|
+
return replace(self, metadata=new_meta)
|
|
159
|
+
|
|
160
|
+
def with_tags(
|
|
161
|
+
self,
|
|
162
|
+
tags: dict[str, str],
|
|
163
|
+
*,
|
|
164
|
+
overwrite: bool = False,
|
|
165
|
+
) -> PhotometryState:
|
|
166
|
+
existing = dict(self.tags)
|
|
167
|
+
incoming = {str(k): str(v) for k, v in tags.items()}
|
|
168
|
+
|
|
169
|
+
if overwrite:
|
|
170
|
+
existing.update(incoming)
|
|
171
|
+
else:
|
|
172
|
+
for k, v in incoming.items():
|
|
173
|
+
existing.setdefault(k, v)
|
|
174
|
+
|
|
175
|
+
return self.with_metadata(tags=existing)
|
|
176
|
+
|
|
177
|
+
def push_history(self) -> PhotometryState:
|
|
178
|
+
"""Return a new state with the current signals appended to history."""
|
|
179
|
+
new_hist = np.concatenate(
|
|
180
|
+
[self.history, self.signals[None, :, :]], axis=0
|
|
181
|
+
)
|
|
182
|
+
return replace(self, history=new_hist)
|
|
183
|
+
|
|
184
|
+
def raw(self) -> PhotometryState:
|
|
185
|
+
"""
|
|
186
|
+
Return a new state representing the raw signals (after 0 stages).
|
|
187
|
+
|
|
188
|
+
- signals restored to the raw snapshot
|
|
189
|
+
- history cleared (you are back at the start)
|
|
190
|
+
- summary/results/derived cleared
|
|
191
|
+
- metadata preserved
|
|
192
|
+
"""
|
|
193
|
+
if self.history.shape[0] == 0:
|
|
194
|
+
# No stages applied, already raw.
|
|
195
|
+
return self
|
|
196
|
+
|
|
197
|
+
raw_signals = self.history[0]
|
|
198
|
+
|
|
199
|
+
return PhotometryState(
|
|
200
|
+
time_seconds=self.time_seconds,
|
|
201
|
+
signals=raw_signals,
|
|
202
|
+
channel_names=self.channel_names,
|
|
203
|
+
history=np.empty((0, self.n_signals, self.n_samples), dtype=float),
|
|
204
|
+
summary=(),
|
|
205
|
+
derived={},
|
|
206
|
+
results={},
|
|
207
|
+
metadata=self.metadata,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
def revert(self, n_steps: int | None = 1) -> PhotometryState:
|
|
211
|
+
"""
|
|
212
|
+
Revert to a previous signals snapshot and drops corresponding summary
|
|
213
|
+
entries and stage results.
|
|
214
|
+
|
|
215
|
+
n_steps=1: before most recent stage
|
|
216
|
+
n_steps=None: restore raw (after 0 stages)
|
|
217
|
+
"""
|
|
218
|
+
if n_steps is None:
|
|
219
|
+
return self.raw()
|
|
220
|
+
|
|
221
|
+
if n_steps < 1:
|
|
222
|
+
raise ValueError("n_steps must be >= 1.")
|
|
223
|
+
if self.history.shape[0] < n_steps:
|
|
224
|
+
raise ValueError(
|
|
225
|
+
f"Cannot revert {n_steps} step(s); history has "
|
|
226
|
+
f"{self.history.shape[0]}."
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
restored_signals = self.history[-n_steps]
|
|
230
|
+
new_history = self.history[:-n_steps]
|
|
231
|
+
new_summary = self.summary[:-n_steps]
|
|
232
|
+
|
|
233
|
+
valid_ids = {r.stage_id for r in new_summary}
|
|
234
|
+
new_results = {k: v for k, v in self.results.items() if k in valid_ids}
|
|
235
|
+
|
|
236
|
+
return PhotometryState(
|
|
237
|
+
time_seconds=self.time_seconds,
|
|
238
|
+
signals=restored_signals,
|
|
239
|
+
channel_names=self.channel_names,
|
|
240
|
+
history=new_history,
|
|
241
|
+
summary=new_summary,
|
|
242
|
+
derived={},
|
|
243
|
+
results=new_results,
|
|
244
|
+
metadata=self.metadata,
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
def revert_to(self, stage_name: str) -> PhotometryState:
|
|
248
|
+
"""
|
|
249
|
+
Revert to the state immediately after the last occurrence of stage_name.
|
|
250
|
+
|
|
251
|
+
If stage_name does not exist in the summary, raises KeyError.
|
|
252
|
+
"""
|
|
253
|
+
target = stage_name.lower()
|
|
254
|
+
names = [r.name.lower() for r in self.summary]
|
|
255
|
+
if target not in names:
|
|
256
|
+
raise KeyError(f"Stage '{stage_name}' not found in summary.")
|
|
257
|
+
|
|
258
|
+
last_idx = max(i for i, n in enumerate(names) if n == target)
|
|
259
|
+
steps_to_drop = len(self.summary) - (last_idx + 1)
|
|
260
|
+
return self if steps_to_drop == 0 else self.revert(steps_to_drop)
|
|
261
|
+
|
|
262
|
+
def pipe(self, *stages: Any) -> PhotometryState:
|
|
263
|
+
"""Apply stages in order (functional pipeline)."""
|
|
264
|
+
state: PhotometryState = self
|
|
265
|
+
for st in stages:
|
|
266
|
+
state = st(state)
|
|
267
|
+
return state
|
|
268
|
+
|
|
269
|
+
def plot(
|
|
270
|
+
self,
|
|
271
|
+
*,
|
|
272
|
+
signal: str,
|
|
273
|
+
control: str | None = None,
|
|
274
|
+
**kwargs: Any,
|
|
275
|
+
):
|
|
276
|
+
"""Plot the current state."""
|
|
277
|
+
from .plotting import plot_current
|
|
278
|
+
|
|
279
|
+
return plot_current(self, signal=signal, control=control, **kwargs)
|
|
280
|
+
|
|
281
|
+
def plot_history(self, channel: str, **kwargs):
|
|
282
|
+
"""Plot a channel across the saved history (and optionally current)."""
|
|
283
|
+
from .plotting import plot_history
|
|
284
|
+
|
|
285
|
+
return plot_history(self, channel, **kwargs)
|
|
286
|
+
|
|
287
|
+
def to_h5(
|
|
288
|
+
self,
|
|
289
|
+
path: Path | str,
|
|
290
|
+
*,
|
|
291
|
+
compression: str | None = "gzip",
|
|
292
|
+
compression_opts: int = 4,
|
|
293
|
+
) -> None:
|
|
294
|
+
"""Save this state to an HDF5 file."""
|
|
295
|
+
from .io.h5 import save_state_h5
|
|
296
|
+
|
|
297
|
+
save_state_h5(
|
|
298
|
+
self,
|
|
299
|
+
path,
|
|
300
|
+
compression=compression,
|
|
301
|
+
compression_opts=compression_opts,
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
@classmethod
|
|
305
|
+
def from_h5(cls, path: Path | str) -> PhotometryState:
|
|
306
|
+
"""Load a state from an HDF5 file."""
|
|
307
|
+
from .io.h5 import load_state_h5
|
|
308
|
+
|
|
309
|
+
return load_state_h5(path)
|
fibphot/tags.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import csv
|
|
4
|
+
import re
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from .state import PhotometryState
|
|
10
|
+
|
|
11
|
+
SubjectGetter = Callable[[PhotometryState], str | None]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass(frozen=True, slots=True)
|
|
15
|
+
class TagTable:
|
|
16
|
+
"""
|
|
17
|
+
Mapping from subject -> tags.
|
|
18
|
+
|
|
19
|
+
The first column is assumed to be the subject identifier. Remaining columns
|
|
20
|
+
are treated as tag keys.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
by_subject: dict[str, dict[str, str]]
|
|
24
|
+
|
|
25
|
+
def tags_for(self, subject: str) -> dict[str, str]:
|
|
26
|
+
return dict(self.by_subject.get(subject.lower(), {}))
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def read_tag_table(path: Path | str) -> TagTable:
|
|
30
|
+
"""
|
|
31
|
+
Read a delimited text table where the first column is subject.
|
|
32
|
+
|
|
33
|
+
Supports CSV/TSV/space-delimited. Header row is required.
|
|
34
|
+
"""
|
|
35
|
+
path = Path(path)
|
|
36
|
+
|
|
37
|
+
text = path.read_text(encoding="utf-8").strip()
|
|
38
|
+
if not text:
|
|
39
|
+
raise ValueError(f"Tag file is empty: {path}")
|
|
40
|
+
|
|
41
|
+
# sniff delimiter (fallback to whitespace splitting)
|
|
42
|
+
sample = "\n".join(text.splitlines()[:5])
|
|
43
|
+
try:
|
|
44
|
+
dialect = csv.Sniffer().sniff(sample, delimiters=",\t;")
|
|
45
|
+
delim: str | None = dialect.delimiter
|
|
46
|
+
except csv.Error:
|
|
47
|
+
delim = None
|
|
48
|
+
|
|
49
|
+
rows: list[list[str]] = []
|
|
50
|
+
if delim is None:
|
|
51
|
+
for line in text.splitlines():
|
|
52
|
+
rows.append(re.split(r"\s+", line.strip()))
|
|
53
|
+
else:
|
|
54
|
+
reader = csv.reader(text.splitlines(), delimiter=delim)
|
|
55
|
+
rows = [list(r) for r in reader]
|
|
56
|
+
|
|
57
|
+
if len(rows) < 2:
|
|
58
|
+
raise ValueError(
|
|
59
|
+
"Tag table must include a header and at least one row."
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
header = [h.strip().lower() for h in rows[0]]
|
|
63
|
+
if len(header) < 2:
|
|
64
|
+
raise ValueError("Tag table must have >=2 columns (subject + tags).")
|
|
65
|
+
|
|
66
|
+
subj_key = header[0]
|
|
67
|
+
tag_keys = header[1:]
|
|
68
|
+
|
|
69
|
+
mapping: dict[str, dict[str, str]] = {}
|
|
70
|
+
for r in rows[1:]:
|
|
71
|
+
if not r or all(not c.strip() for c in r):
|
|
72
|
+
continue
|
|
73
|
+
subject = str(r[0]).strip().lower()
|
|
74
|
+
if not subject:
|
|
75
|
+
continue
|
|
76
|
+
tags: dict[str, str] = {}
|
|
77
|
+
for j, k in enumerate(tag_keys, start=1):
|
|
78
|
+
if j >= len(r):
|
|
79
|
+
tags[k] = ""
|
|
80
|
+
else:
|
|
81
|
+
tags[k] = str(r[j]).strip()
|
|
82
|
+
mapping[subject] = tags
|
|
83
|
+
|
|
84
|
+
if subj_key != "subject":
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
return TagTable(by_subject=mapping)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def subject_from_filename(path: Path | str) -> str:
|
|
91
|
+
"""
|
|
92
|
+
Default subject extraction:
|
|
93
|
+
- take the stem
|
|
94
|
+
- split on '_' and use the first token
|
|
95
|
+
|
|
96
|
+
Example:
|
|
97
|
+
'74R_10122025_A61603_0001.doric' -> '74r'
|
|
98
|
+
"""
|
|
99
|
+
p = Path(path)
|
|
100
|
+
stem = p.stem
|
|
101
|
+
token = stem.split("_", maxsplit=1)[0]
|
|
102
|
+
return token.lower()
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def default_subject_getter(state: PhotometryState) -> str | None:
|
|
106
|
+
"""
|
|
107
|
+
Prefer explicit metadata subject, else derive from metadata['source_path'].
|
|
108
|
+
"""
|
|
109
|
+
if state.subject is not None:
|
|
110
|
+
return state.subject
|
|
111
|
+
src = state.metadata.get("source_path")
|
|
112
|
+
if not src:
|
|
113
|
+
return None
|
|
114
|
+
return subject_from_filename(str(src))
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def apply_tags(
|
|
118
|
+
state: PhotometryState,
|
|
119
|
+
table: TagTable,
|
|
120
|
+
*,
|
|
121
|
+
subject_getter: SubjectGetter = default_subject_getter,
|
|
122
|
+
overwrite: bool = False,
|
|
123
|
+
) -> PhotometryState:
|
|
124
|
+
subject = subject_getter(state)
|
|
125
|
+
if subject is None:
|
|
126
|
+
return state
|
|
127
|
+
tags = table.tags_for(subject)
|
|
128
|
+
if not tags:
|
|
129
|
+
return state
|
|
130
|
+
return state.with_tags(tags, overwrite=overwrite)
|
fibphot/types.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fibphot
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Analysis pipeline for fibre photometry data
|
|
5
|
+
Author-email: Thomas Nicholas <tcnicholas@me.com>
|
|
6
|
+
Project-URL: Homepage, https://github.com/tcnicholas/fibphot
|
|
7
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
8
|
+
Classifier: Programming Language :: Python
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
+
Requires-Python: >=3.10
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
License-File: LICENSE.md
|
|
15
|
+
Requires-Dist: h5py>=3.15.1
|
|
16
|
+
Requires-Dist: matplotlib>=3.10.8
|
|
17
|
+
Requires-Dist: numpy>=2.4.1
|
|
18
|
+
Requires-Dist: openpyxl>=3.1.5
|
|
19
|
+
Requires-Dist: pandas>=2.3.3
|
|
20
|
+
Requires-Dist: pybaselines>=1.2.1
|
|
21
|
+
Requires-Dist: scipy>=1.17.0
|
|
22
|
+
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: ruff>=0.14.13; extra == "dev"
|
|
24
|
+
Requires-Dist: pytest>=8.0; extra == "dev"
|
|
25
|
+
Requires-Dist: matplotlib>=3.10.8; extra == "dev"
|
|
26
|
+
Provides-Extra: publish
|
|
27
|
+
Requires-Dist: build; extra == "publish"
|
|
28
|
+
Requires-Dist: twine; extra == "publish"
|
|
29
|
+
Provides-Extra: docs
|
|
30
|
+
Requires-Dist: sphinx; extra == "docs"
|
|
31
|
+
Requires-Dist: pip; extra == "docs"
|
|
32
|
+
Requires-Dist: furo; extra == "docs"
|
|
33
|
+
Requires-Dist: nbsphinx; extra == "docs"
|
|
34
|
+
Requires-Dist: ipython; extra == "docs"
|
|
35
|
+
Requires-Dist: pyzmq; extra == "docs"
|
|
36
|
+
Requires-Dist: sphinx-autoapi; extra == "docs"
|
|
37
|
+
Requires-Dist: sphinx-autobuild; extra == "docs"
|
|
38
|
+
Requires-Dist: pydata_sphinx_theme; extra == "docs"
|
|
39
|
+
Requires-Dist: sphinxext-opengraph; extra == "docs"
|
|
40
|
+
Requires-Dist: sphinx-copybutton; extra == "docs"
|
|
41
|
+
Requires-Dist: sphinx-design; extra == "docs"
|
|
42
|
+
Requires-Dist: jupyter_sphinx; extra == "docs"
|
|
43
|
+
Requires-Dist: sphinx-togglebutton; extra == "docs"
|
|
44
|
+
Requires-Dist: sphinx_favicon; extra == "docs"
|
|
45
|
+
Requires-Dist: sphinx_sitemap; extra == "docs"
|
|
46
|
+
Dynamic: license-file
|
|
47
|
+
|
|
48
|
+
# FibPhot
|
|
49
|
+
|
|
50
|
+
<div align="center">
|
|
51
|
+
|
|
52
|
+

|
|
53
|
+
[](LICENSE.md)
|
|
54
|
+
</div>
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
## Install
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
## Relevant sources and related packages/code
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
### References
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
fibphot/__init__.py,sha256=J1DLAhU3rMvbc2cCU0KdvmKvj12LO6eDD1_34_TOplc,152
|
|
2
|
+
fibphot/collection.py,sha256=cht9iam6Twa7xkHhf_RLNJFLwHIpDWcOc0RkJz1f0bA,6127
|
|
3
|
+
fibphot/misc.py,sha256=7ubXg8-V_nNRRDcBCDYkqkrRhUd8e82qaLZyEkS2dWw,238
|
|
4
|
+
fibphot/peaks.py,sha256=oSNQk1fHgLprH-_HU9lomyq9i_bsj9dMcsUHOl72440,18943
|
|
5
|
+
fibphot/pipeline.py,sha256=LpCwFCt-mW69agjQrCqotRGWGa_y5w5A1EtaGLnZhbI,365
|
|
6
|
+
fibphot/plotting.py,sha256=AUDGfN2T96RzuUb-7yj_3Tzsa_law0gMIxPn5EDfHWM,17081
|
|
7
|
+
fibphot/state.py,sha256=X1IvSrWl8867EU7Lz10mcpSokpbWPDL7GQvYPcnVR9w,9927
|
|
8
|
+
fibphot/tags.py,sha256=hpdN3yNQr4mSZ8IzXPBjPxdOSkRP_xIkJwbtmZiBRlk,3458
|
|
9
|
+
fibphot/types.py,sha256=6z3Y6diDFRwafQcyJURJ4ILSk1GNx0zw4LeReFSNCHY,121
|
|
10
|
+
fibphot/analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
+
fibphot/analysis/aggregate.py,sha256=ETUxRYRPcBM_C0MTm6bcfNWIJ8DsjEXofYXk2AnmMCA,6958
|
|
12
|
+
fibphot/analysis/auc.py,sha256=hIKxt4IY2EQLByONG3UD4rSi3rC79ZDVgVAVYCu8bGI,10109
|
|
13
|
+
fibphot/analysis/irls.py,sha256=XNBkDnDDUPxIE2VmzTk9zIA_o35cT-nbALe2aNkcylQ,9846
|
|
14
|
+
fibphot/analysis/peaks.py,sha256=YfByY7Ai_8aERxf3ae2lOxrhwcUgjHL6iD9GEk2XJVc,37014
|
|
15
|
+
fibphot/analysis/photobleaching.py,sha256=2LZnZWLGoVRzCwBRoTdY3ec0i5-brSNT6hnzngajPMA,8806
|
|
16
|
+
fibphot/analysis/plotting.py,sha256=2voBGXrDhE8SzU2Co_8hZWfycielY_8ye1NV0p84l1Q,2790
|
|
17
|
+
fibphot/analysis/report.py,sha256=CgGKwjoo4wCTXSC5lTYsdUCcSmLfY7tCdm31ekfj11k,1622
|
|
18
|
+
fibphot/fit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
|
+
fibphot/fit/regression.py,sha256=6bgObLY26lk8bsk5j7s-HlUSTwW3gZ3YKEgIw6GLWd0,6969
|
|
20
|
+
fibphot/io/__init__.py,sha256=punBqN99w6VCX_E2ZDWyvfxL6zQyP9TJW0Xr0uBNsFQ,136
|
|
21
|
+
fibphot/io/doric.py,sha256=eGJjizkaKjapxqaOPNzLPk-OmEc0t13ydaql4yWQIEM,13702
|
|
22
|
+
fibphot/io/excel.py,sha256=aGU-h8nWmP4H2_0XZ_MqWaPCpEbUgwu5ddQM3v-Y7EI,2241
|
|
23
|
+
fibphot/io/h5.py,sha256=ev44eE_gXm3pZ9AlcjyfsPw_H8E9t9VgBHcLphNAjh8,10851
|
|
24
|
+
fibphot/stages/__init__.py,sha256=SEDH1Gd8Jv7B-KfmKLXLbKuiFsBhUjxk1T0EPvXr5cM,551
|
|
25
|
+
fibphot/stages/base.py,sha256=-C4gDU7EIgQc9cpq-bN6ttQp7DO4kzpYLx0JCgsJun0,2856
|
|
26
|
+
fibphot/stages/baseline.py,sha256=mC-spEiCsuLMYKUqt3t2GPlJrPGQqkNbvpa8KRdlz7k,10989
|
|
27
|
+
fibphot/stages/control_dff.py,sha256=5o-JSuSTnTEkJVcDl5uAN7tpk-KOVmkeg7HDPsFvdfU,7249
|
|
28
|
+
fibphot/stages/filters.py,sha256=ciA92JZzCCwdc6Tpzyivfa5giawSq_Pp6qtUoFTFhRM,8817
|
|
29
|
+
fibphot/stages/normalisation.py,sha256=aRxRsNSZ7Y4-CerJe-txy-APq8tcNh6NuSn7pEifNF8,8151
|
|
30
|
+
fibphot/stages/regression.py,sha256=qAwDnNxFAtRiD_tfmiLAV9SUgLnNrmnLZ8rgDy2B-do,4409
|
|
31
|
+
fibphot/stages/smooth.py,sha256=sqkNE7D9P6rfzjKJiu-Jr_ExSkFFQNseq44doBtOZjM,13193
|
|
32
|
+
fibphot/stages/trim.py,sha256=zcnb-M4Wi7KdBVRN0m9PyPogsgp_rJ0Y4mFuoqEAS_w,3790
|
|
33
|
+
fibphot-0.1.0.dist-info/licenses/LICENSE.md,sha256=W47AITyI1Om7cxTi5AKJYNe-rVEbsZPF4a5DOtFXMek,1071
|
|
34
|
+
fibphot-0.1.0.dist-info/METADATA,sha256=_KJR5CFPd8amhlKxqwtdI5gqQvr7ZpWK8K5NrxIrr1Y,2057
|
|
35
|
+
fibphot-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
36
|
+
fibphot-0.1.0.dist-info/top_level.txt,sha256=exLJNDNxfOY-ufk5CNv1B3bF-1mkVqyCZLKP77YeggM,8
|
|
37
|
+
fibphot-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Thomas Nicholas
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
fibphot
|