py-neuromodulation 0.0.7__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +0 -1
  2. py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +0 -2
  3. py_neuromodulation/__init__.py +12 -4
  4. py_neuromodulation/analysis/RMAP.py +3 -3
  5. py_neuromodulation/analysis/decode.py +55 -2
  6. py_neuromodulation/analysis/feature_reader.py +1 -0
  7. py_neuromodulation/analysis/stats.py +3 -3
  8. py_neuromodulation/default_settings.yaml +25 -20
  9. py_neuromodulation/features/bandpower.py +65 -23
  10. py_neuromodulation/features/bursts.py +9 -8
  11. py_neuromodulation/features/coherence.py +7 -4
  12. py_neuromodulation/features/feature_processor.py +4 -4
  13. py_neuromodulation/features/fooof.py +7 -6
  14. py_neuromodulation/features/mne_connectivity.py +60 -87
  15. py_neuromodulation/features/oscillatory.py +5 -4
  16. py_neuromodulation/features/sharpwaves.py +21 -0
  17. py_neuromodulation/filter/kalman_filter.py +17 -6
  18. py_neuromodulation/gui/__init__.py +3 -0
  19. py_neuromodulation/gui/backend/app_backend.py +419 -0
  20. py_neuromodulation/gui/backend/app_manager.py +345 -0
  21. py_neuromodulation/gui/backend/app_pynm.py +253 -0
  22. py_neuromodulation/gui/backend/app_socket.py +97 -0
  23. py_neuromodulation/gui/backend/app_utils.py +306 -0
  24. py_neuromodulation/gui/backend/app_window.py +202 -0
  25. py_neuromodulation/gui/frontend/assets/Figtree-VariableFont_wght-CkXbWBDP.ttf +0 -0
  26. py_neuromodulation/gui/frontend/assets/index-_6V8ZfAS.js +300137 -0
  27. py_neuromodulation/gui/frontend/assets/plotly-DTCwMlpS.js +23594 -0
  28. py_neuromodulation/gui/frontend/charite.svg +16 -0
  29. py_neuromodulation/gui/frontend/index.html +14 -0
  30. py_neuromodulation/gui/window_api.py +115 -0
  31. py_neuromodulation/lsl_api.cfg +3 -0
  32. py_neuromodulation/processing/data_preprocessor.py +9 -2
  33. py_neuromodulation/processing/filter_preprocessing.py +43 -27
  34. py_neuromodulation/processing/normalization.py +32 -17
  35. py_neuromodulation/processing/projection.py +2 -2
  36. py_neuromodulation/processing/resample.py +6 -2
  37. py_neuromodulation/run_gui.py +36 -0
  38. py_neuromodulation/stream/__init__.py +7 -1
  39. py_neuromodulation/stream/backend_interface.py +47 -0
  40. py_neuromodulation/stream/data_processor.py +24 -3
  41. py_neuromodulation/stream/mnelsl_player.py +121 -21
  42. py_neuromodulation/stream/mnelsl_stream.py +9 -17
  43. py_neuromodulation/stream/settings.py +80 -34
  44. py_neuromodulation/stream/stream.py +83 -62
  45. py_neuromodulation/utils/channels.py +1 -1
  46. py_neuromodulation/utils/file_writer.py +110 -0
  47. py_neuromodulation/utils/io.py +46 -5
  48. py_neuromodulation/utils/perf.py +156 -0
  49. py_neuromodulation/utils/pydantic_extensions.py +322 -0
  50. py_neuromodulation/utils/types.py +33 -107
  51. {py_neuromodulation-0.0.7.dist-info → py_neuromodulation-0.1.1.dist-info}/METADATA +23 -4
  52. {py_neuromodulation-0.0.7.dist-info → py_neuromodulation-0.1.1.dist-info}/RECORD +55 -35
  53. {py_neuromodulation-0.0.7.dist-info → py_neuromodulation-0.1.1.dist-info}/WHEEL +1 -1
  54. py_neuromodulation-0.1.1.dist-info/entry_points.txt +2 -0
  55. {py_neuromodulation-0.0.7.dist-info → py_neuromodulation-0.1.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,16 +1,19 @@
1
1
  """Module for generic and offline data streams."""
2
2
 
3
- from typing import TYPE_CHECKING
3
+ import time
4
+ from typing import TYPE_CHECKING, Any
4
5
  from collections.abc import Iterator
5
6
  import numpy as np
6
7
  from pathlib import Path
7
8
 
8
9
  import py_neuromodulation as nm
9
- from contextlib import suppress
10
10
 
11
11
  from py_neuromodulation.stream.data_processor import DataProcessor
12
- from py_neuromodulation.utils.types import _PathLike, FeatureName
12
+ from py_neuromodulation.utils.types import _PathLike, FEATURE_NAME
13
+ from py_neuromodulation.utils.file_writer import MsgPackFileWriter
13
14
  from py_neuromodulation.stream.settings import NMSettings
15
+ from py_neuromodulation.analysis.decode import RealTimeDecoder
16
+ from py_neuromodulation.stream.backend_interface import StreamBackendInterface
14
17
 
15
18
  if TYPE_CHECKING:
16
19
  import pandas as pd
@@ -35,9 +38,6 @@ class Stream:
35
38
  sampling_rate_features_hz: float | None = None,
36
39
  path_grids: _PathLike | None = None,
37
40
  coord_names: list | None = None,
38
- stream_name: str
39
- | None = "example_stream", # Timon: do we need those in the nmstream_abc?
40
- is_stream_lsl: bool = False,
41
41
  coord_list: list | None = None,
42
42
  verbose: bool = True,
43
43
  ) -> None:
@@ -67,6 +67,7 @@ class Stream:
67
67
  verbose : bool, optional
68
68
  print out stream computation time information, by default True
69
69
  """
70
+ # This is calling NMSettings.validate() which is making a copy
70
71
  self.settings: NMSettings = NMSettings.load(settings)
71
72
 
72
73
  if channels is None and data is not None:
@@ -84,7 +85,7 @@ class Stream:
84
85
  raise ValueError("Either `channels` or `data` must be passed to `Stream`.")
85
86
 
86
87
  # If features that use frequency ranges are on, test them against nyquist frequency
87
- use_freq_ranges: list[FeatureName] = [
88
+ use_freq_ranges: list[FEATURE_NAME] = [
88
89
  "bandpass_filter",
89
90
  "stft",
90
91
  "fft",
@@ -124,8 +125,8 @@ class Stream:
124
125
  self.sess_right = None
125
126
  self.projection = None
126
127
  self.model = None
128
+ self.is_running = False
127
129
 
128
- # TODO(toni): is it necessary to initialize the DataProcessor on stream init?
129
130
  self.data_processor = DataProcessor(
130
131
  sfreq=self.sfreq,
131
132
  settings=self.settings,
@@ -201,13 +202,20 @@ class Stream:
201
202
  experiment_name: str = "sub",
202
203
  is_stream_lsl: bool = False,
203
204
  stream_lsl_name: str | None = None,
204
- plot_lsl: bool = False,
205
- save_csv: bool = False,
205
+ save_csv: bool = True,
206
206
  save_interval: int = 10,
207
207
  return_df: bool = True,
208
+ simulate_real_time: bool = False,
209
+ decoder: RealTimeDecoder | None = None,
210
+ backend_interface: StreamBackendInterface | None = None,
208
211
  ) -> "pd.DataFrame":
209
212
  self.is_stream_lsl = is_stream_lsl
210
213
  self.stream_lsl_name = stream_lsl_name
214
+ self.save_csv = save_csv
215
+ self.save_interval = save_interval
216
+ self.return_df = return_df
217
+ self.out_dir = Path.cwd() if not out_dir else Path(out_dir)
218
+ self.experiment_name = experiment_name
211
219
 
212
220
  # Validate input data
213
221
  if data is not None:
@@ -217,24 +225,10 @@ class Stream:
217
225
  elif self.data is None and data is None and self.is_stream_lsl is False:
218
226
  raise ValueError("No data passed to run function.")
219
227
 
220
- # Generate output dirs
221
- self.out_dir_root = Path.cwd() if not out_dir else Path(out_dir)
222
- self.out_dir = self.out_dir_root / experiment_name
223
- # TONI: Need better default experiment name
224
- self.experiment_name = experiment_name if experiment_name else "sub"
225
-
226
- self.out_dir.mkdir(parents=True, exist_ok=True)
227
-
228
- # Open database connection
229
- # TONI: we should give the user control over the save format
230
- from py_neuromodulation.utils.database import NMDatabase
231
-
232
- db = NMDatabase(experiment_name, out_dir) # Create output database
228
+ file_writer = MsgPackFileWriter(name=experiment_name, out_dir=out_dir)
233
229
 
234
230
  self.batch_count: int = 0 # Keep track of the number of batches processed
235
231
 
236
- # Reinitialize the data processor in case the nm_channels or nm_settings changed between runs of the same Stream
237
- # TONI: then I think we can just not initialize the data processor in the init function
238
232
  self.data_processor = DataProcessor(
239
233
  sfreq=self.sfreq,
240
234
  settings=self.settings,
@@ -248,9 +242,8 @@ class Stream:
248
242
 
249
243
  nm.logger.log_to_file(out_dir)
250
244
 
251
- # Initialize generator
252
245
  self.generator: Iterator
253
- if not is_stream_lsl:
246
+ if not is_stream_lsl and data is not None:
254
247
  from py_neuromodulation.stream.generator import RawDataGenerator
255
248
 
256
249
  self.generator = RawDataGenerator(
@@ -259,6 +252,7 @@ class Stream:
259
252
  self.settings.sampling_rate_features_hz,
260
253
  self.settings.segment_length_features_ms,
261
254
  )
255
+ nm.logger.info("Initializing RawDataGenerator")
262
256
  else:
263
257
  from py_neuromodulation.stream.mnelsl_stream import LSLStream
264
258
 
@@ -266,13 +260,10 @@ class Stream:
266
260
  settings=self.settings, stream_name=stream_lsl_name
267
261
  )
268
262
 
269
- if plot_lsl:
270
- from mne_lsl.stream_viewer import StreamViewer
271
-
272
- viewer = StreamViewer(stream_name=stream_lsl_name)
273
- viewer.start()
274
-
275
- if self.sfreq != self.lsl_stream.stream.sinfo.sfreq:
263
+ if (
264
+ self.lsl_stream.stream.sinfo is not None
265
+ and self.sfreq != self.lsl_stream.stream.sinfo.sfreq
266
+ ):
276
267
  error_msg = (
277
268
  f"Sampling frequency of the lsl-stream ({self.lsl_stream.stream.sinfo.sfreq}) "
278
269
  f"does not match the settings ({self.sfreq})."
@@ -285,9 +276,21 @@ class Stream:
285
276
 
286
277
  prev_batch_end = 0
287
278
  for timestamps, data_batch in self.generator:
279
+ self.is_running = True
280
+ if backend_interface:
281
+ # Only simulate real-time if connected to GUI
282
+ if simulate_real_time:
283
+ time.sleep(1 / self.settings.sampling_rate_features_hz)
284
+
285
+ signal = backend_interface.check_control_signals()
286
+ if signal == "stop":
287
+ break
288
+
288
289
  if data_batch is None:
290
+ nm.logger.info("Data batch is None, stopping run function")
289
291
  break
290
292
 
293
+ nm.logger.info("Processing new data batch")
291
294
  feature_dict = self.data_processor.process(data_batch)
292
295
 
293
296
  this_batch_end = timestamps[-1]
@@ -296,10 +299,13 @@ class Stream:
296
299
  f"{batch_length:.3f} seconds of new data processed",
297
300
  )
298
301
 
299
- feature_dict["time"] = (
300
- batch_length if is_stream_lsl else np.ceil(this_batch_end * 1000 + 1)
301
- )
302
+ if decoder is not None:
303
+ ch_to_decode = self.channels.query("used == 1").iloc[0]["name"]
304
+ feature_dict = decoder.predict(
305
+ feature_dict, ch_to_decode, fft_bands_only=True
306
+ )
302
307
 
308
+ feature_dict["time"] = np.ceil(this_batch_end * 1000 + 1)
303
309
  prev_batch_end = this_batch_end
304
310
 
305
311
  if self.verbose:
@@ -307,29 +313,42 @@ class Stream:
307
313
 
308
314
  self._add_target(feature_dict, data_batch)
309
315
 
310
- # We should ensure that feature output is always either float64 or None and remove this
311
- with suppress(TypeError): # Need this because some features output None
312
- for key, value in feature_dict.items():
313
- feature_dict[key] = np.float64(value)
316
+ # Push data to file writer
317
+ file_writer.insert_data(feature_dict)
314
318
 
315
- db.insert_data(feature_dict)
319
+ # Send data to frontend
320
+ if backend_interface:
321
+ nm.logger.debug("stream.run: Sending features to frontend")
322
+ backend_interface.send_features(feature_dict)
323
+ backend_interface.send_raw_data(self._prepare_raw_data_dict(data_batch))
316
324
 
325
+ # Save features to file in intervals
317
326
  self.batch_count += 1
318
- if self.batch_count % save_interval == 0:
319
- db.commit()
327
+ if self.batch_count % self.save_interval == 0:
328
+ file_writer.save()
320
329
 
321
- db.commit() # Save last batches
330
+ file_writer.save()
322
331
 
323
- # If save_csv is False, still save the first row to get the column names
324
- feature_df: "pd.DataFrame" = (
325
- db.fetch_all() if (save_csv or return_df) else db.head()
326
- )
332
+ if self.save_csv:
333
+ file_writer.save_as_csv(save_all_combined=True)
334
+
335
+ feature_df = file_writer.load_all() if self.return_df else {}
327
336
 
328
- db.close() # Close the database connection
337
+ self._save_after_stream()
338
+ self.is_running = False
329
339
 
330
- self._save_after_stream(feature_arr=feature_df)
340
+ return feature_df # Timon: We could think of returnader instead
331
341
 
332
- return feature_df # TONI: Not sure if this makes sense anymore
342
+ def _prepare_raw_data_dict(self, data_batch: np.ndarray) -> dict[str, Any]:
343
+ """Prepare raw data dictionary for sending through queue"""
344
+ new_time_ms = 1000 / self.settings.sampling_rate_features_hz
345
+ new_samples = int(new_time_ms * self.sfreq / 1000)
346
+ return {
347
+ "raw_data": {
348
+ ch: list(data_batch[i, -new_samples:])
349
+ for i, ch in enumerate(self.channels["name"])
350
+ }
351
+ }
333
352
 
334
353
  def plot_raw_signal(
335
354
  self,
@@ -365,11 +384,15 @@ class Stream:
365
384
  ValueError
366
385
  raise Exception when no data is passed
367
386
  """
368
- if self.data is None and data is None:
369
- raise ValueError("No data passed to plot_raw_signal function.")
370
-
371
- if data is None and self.data is not None:
372
- data = self.data
387
+ if data is None:
388
+ if self.data is None:
389
+ raise ValueError("No data passed to plot_raw_signal function.")
390
+ else:
391
+ data = (
392
+ self.data.to_numpy()
393
+ if isinstance(self.data, pd.DataFrame)
394
+ else self.data
395
+ )
373
396
 
374
397
  if sfreq is None:
375
398
  sfreq = self.sfreq
@@ -384,7 +407,7 @@ class Stream:
384
407
  from mne import create_info
385
408
  from mne.io import RawArray
386
409
 
387
- info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
410
+ info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) # type: ignore
388
411
  raw = RawArray(data, info)
389
412
 
390
413
  if picks is not None:
@@ -397,12 +420,9 @@ class Stream:
397
420
 
398
421
  def _save_after_stream(
399
422
  self,
400
- feature_arr: "pd.DataFrame | None" = None,
401
423
  ) -> None:
402
- """Save features, settings, nm_channels and sidecar after run"""
424
+ """Save settings, nm_channels and sidecar after run"""
403
425
  self._save_sidecar()
404
- if feature_arr is not None:
405
- self._save_features(feature_arr)
406
426
  self._save_settings()
407
427
  self._save_channels()
408
428
 
@@ -422,6 +442,7 @@ class Stream:
422
442
  """Save sidecar incduing fs, coords, sess_right to
423
443
  out_path_root and subfolder 'folder_name'"""
424
444
  additional_args = {"sess_right": self.sess_right}
445
+
425
446
  self.data_processor.save_sidecar(
426
447
  self.out_dir, self.experiment_name, additional_args
427
448
  )
@@ -251,7 +251,7 @@ def _get_default_references(
251
251
 
252
252
 
253
253
  def get_default_channels_from_data(
254
- data: np.ndarray,
254
+ data: "np.ndarray | pd.DataFrame",
255
255
  car_rereferencing: bool = True,
256
256
  ):
257
257
  """Return default channels dataframe with
@@ -0,0 +1,110 @@
1
+ import msgpack
2
+ from abc import ABC, abstractmethod
3
+ from pathlib import Path
4
+ import pandas as pd
5
+ import numpy as np
6
+ from py_neuromodulation.utils.types import _PathLike
7
+ from py_neuromodulation import logger
8
+
9
+ class AbstractFileWriter(ABC):
10
+
11
+ @abstractmethod
12
+ def insert_data(self, feature_dict: dict):
13
+ pass
14
+
15
+ @abstractmethod
16
+ def save(self):
17
+ pass
18
+
19
+ @abstractmethod
20
+ def load_all(self):
21
+ pass
22
+
23
+ @abstractmethod
24
+ def save_as_csv(self, save_all_combined: bool = False):
25
+ pass
26
+
27
+ class MsgPackFileWriter(AbstractFileWriter):
28
+ """
29
+ Class to store data in a serialized MessagePack file and load it back efficiently.
30
+ Parameters
31
+ ----------
32
+ out_dir : _PathLike
33
+ The directory to save the MessagePack database.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ name: str = "sub",
39
+ out_dir: _PathLike = "",
40
+ ):
41
+ # Make sure out_dir exists
42
+
43
+ self.out_dir = Path.cwd() if not out_dir else Path(out_dir)
44
+ self.out_dir = self.out_dir / name
45
+
46
+ Path(self.out_dir).mkdir(parents=True, exist_ok=True)
47
+
48
+ self.idx = 0
49
+ self.name = name
50
+ self.csv_path = Path(self.out_dir, f"{name}_FEATURES.csv")
51
+ self.data_l = []
52
+
53
+ def insert_data(self, feature_dict: dict):
54
+ """
55
+ Insert data into the MessagePack database.
56
+ Parameters
57
+ ----------
58
+ feature_dict : dict
59
+ The dictionary with the feature names and values.
60
+ """
61
+ # transform every value to float s.t. msgpack can serialize the data
62
+ for key, value in feature_dict.items():
63
+ feature_dict[key] = float(value) if value is not None else 0
64
+
65
+ self.data_l.append(feature_dict)
66
+
67
+ def save(self):
68
+ """
69
+ Save the current data to the MessagePack file.
70
+ """
71
+ if len(self.data_l) == 0:
72
+ return
73
+ with open(self.out_dir / f"{self.name}-{self.idx}.msgpack", "wb") as f:
74
+ msgpack.pack(self.data_l, f)
75
+ self.idx += 1
76
+ self.data_l = []
77
+
78
+ def load_all(self):
79
+ """
80
+ Load data from the MessagePack file into memory.
81
+ """
82
+ data_l = []
83
+ for i in range(self.idx):
84
+ with open(self.out_dir / f"{self.name}-{i}.msgpack", "rb") as f:
85
+ data_l.append(msgpack.unpack(f))
86
+ if len(data_l) == 0:
87
+ raise ValueError("No data to load")
88
+ data = pd.DataFrame(list(np.concatenate(data_l)))
89
+ return data
90
+
91
+ def save_as_csv(self, save_all_combined: bool = False):
92
+ """
93
+ Save the data as a CSV file.
94
+ """
95
+
96
+ if save_all_combined:
97
+ try:
98
+ data = self.load_all()
99
+ except ValueError as e:
100
+ logger.error(e)
101
+ return
102
+ data.to_csv(self.csv_path, index=False)
103
+ else:
104
+ if len(self.data_l) > 0:
105
+ self.data_l[-1].to_csv(self.csv_path, index=False)
106
+ else:
107
+ outpath =self.out_dir / f"{self.name}-0.msgpack"
108
+ with open(outpath, "rb") as f:
109
+ data = msgpack.unpack(f)
110
+ data.to_csv(self.csv_path, index=False)
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  from pathlib import PurePath, Path
3
- from typing import TYPE_CHECKING
3
+ from typing import TYPE_CHECKING, cast
4
4
 
5
5
  import numpy as np
6
6
 
@@ -79,7 +79,7 @@ def read_BIDS_data(
79
79
  def read_mne_data(
80
80
  PATH_RUN: "_PathLike | BIDSPath",
81
81
  line_noise: int = 50,
82
- ):
82
+ ) -> tuple[np.ndarray, float, list[str], list[str], list[str]]:
83
83
  """Read data in the mne.io.read_raw supported format.
84
84
 
85
85
  Parameters
@@ -117,7 +117,8 @@ def read_mne_data(
117
117
  f"Line noise is not available in the data, using value of {line_noise} Hz."
118
118
  )
119
119
 
120
- return raw_arr.get_data(), sfreq, ch_names, ch_types, bads
120
+ data = cast(np.ndarray, raw_arr.get_data())
121
+ return data, sfreq, ch_names, ch_types, bads
121
122
 
122
123
 
123
124
  def get_coord_list(
@@ -190,6 +191,46 @@ def get_annotations(PATH_ANNOTATIONS: str, PATH_RUN: str, raw_arr: "mne_io.RawAr
190
191
  return annot, annot_data, raw_arr
191
192
 
192
193
 
194
+ def read_plot_modules(
195
+ PATH_PLOT: _PathLike = PYNM_DIR / "plots",
196
+ ):
197
+ """Read required .mat files for plotting
198
+
199
+ Parameters
200
+ ----------
201
+ PATH_PLOT : regexp, optional
202
+ path to plotting files, by default
203
+ """
204
+
205
+ faces = loadmat(PurePath(PATH_PLOT, "faces.mat"))
206
+ vertices = loadmat(PurePath(PATH_PLOT, "Vertices.mat"))
207
+ grid = loadmat(PurePath(PATH_PLOT, "grid.mat"))["grid"]
208
+ stn_surf = loadmat(PurePath(PATH_PLOT, "STN_surf.mat"))
209
+ x_ver = stn_surf["vertices"][::2, 0]
210
+ y_ver = stn_surf["vertices"][::2, 1]
211
+ x_ecog = vertices["Vertices"][::1, 0]
212
+ y_ecog = vertices["Vertices"][::1, 1]
213
+ z_ecog = vertices["Vertices"][::1, 2]
214
+ x_stn = stn_surf["vertices"][::1, 0]
215
+ y_stn = stn_surf["vertices"][::1, 1]
216
+ z_stn = stn_surf["vertices"][::1, 2]
217
+
218
+ return (
219
+ faces,
220
+ vertices,
221
+ grid,
222
+ stn_surf,
223
+ x_ver,
224
+ y_ver,
225
+ x_ecog,
226
+ y_ecog,
227
+ z_ecog,
228
+ x_stn,
229
+ y_stn,
230
+ z_stn,
231
+ )
232
+
233
+
193
234
  def write_csv(df, path_out):
194
235
  """
195
236
  Function to save Pandas dataframes to disk as CSV using
@@ -209,7 +250,7 @@ def save_channels(
209
250
  ) -> None:
210
251
  out_dir = Path.cwd() if not out_dir else Path(out_dir)
211
252
  filename = "channels.csv" if not prefix else prefix + "_channels.csv"
212
- write_csv(nmchannels, out_dir / filename)
253
+ write_csv(nmchannels, out_dir / prefix / filename)
213
254
  logger.info(f"{filename} saved to {out_dir}")
214
255
 
215
256
 
@@ -241,7 +282,7 @@ def save_general_dict(
241
282
  out_dir = Path.cwd() if not out_dir else Path(out_dir)
242
283
  filename = f"{prefix}{str_add}"
243
284
 
244
- with open(out_dir / filename, "w") as f:
285
+ with open(out_dir / prefix / filename, "w") as f:
245
286
  json.dump(
246
287
  dict_,
247
288
  f,
@@ -0,0 +1,156 @@
1
+ from typing import Any
2
+ import time
3
+ import threading
4
+ import logging
5
+ from dataclasses import dataclass, field
6
+ from collections import deque
7
+ import statistics
8
+
9
+
10
+ @dataclass
11
+ class MetricPoint:
12
+ timestamp: float
13
+ value: float
14
+ metadata: dict[str, Any] = field(default_factory=dict)
15
+
16
+
17
+ class MetricBuffer:
18
+ """Maintains a rolling buffer of metric values with timestamps."""
19
+
20
+ def __init__(self, max_size: int = 1000):
21
+ self.buffer: deque[MetricPoint] = deque(maxlen=max_size)
22
+ self.lock = threading.Lock()
23
+
24
+ def add(self, value: float, metadata: dict[str, Any] | None = None):
25
+ with self.lock:
26
+ self.buffer.append(
27
+ MetricPoint(timestamp=time.time(), value=value, metadata=metadata or {})
28
+ )
29
+
30
+ def get_stats(self, window_seconds: float | None = None) -> dict[str, float]:
31
+ with self.lock:
32
+ if not self.buffer:
33
+ return {"count": 0, "mean": 0.0, "min": 0.0, "max": 0.0, "std_dev": 0.0}
34
+
35
+ current_time = time.time()
36
+ values = [
37
+ point.value
38
+ for point in self.buffer
39
+ if window_seconds is None
40
+ or (current_time - point.timestamp) <= window_seconds
41
+ ]
42
+
43
+ if not values:
44
+ return {"count": 0, "mean": 0.0, "min": 0.0, "max": 0.0, "std_dev": 0.0}
45
+
46
+ return {
47
+ "count": len(values),
48
+ "mean": statistics.mean(values),
49
+ "min": min(values),
50
+ "max": max(values),
51
+ "std_dev": statistics.stdev(values) if len(values) > 1 else 0.0,
52
+ }
53
+
54
+
55
+ class PerformanceMonitor:
56
+ """Centralized system for tracking performance metrics across the application."""
57
+
58
+ _instance = None
59
+ _lock = threading.Lock()
60
+
61
+ def __new__(cls):
62
+ with cls._lock:
63
+ if cls._instance is None:
64
+ cls._instance = super().__new__(cls)
65
+ return cls._instance
66
+
67
+ def __init__(self):
68
+ if not hasattr(self, "initialized"):
69
+ self.logger = logging.getLogger("PyNM.Performance")
70
+ self.metrics: dict[str, MetricBuffer] = {}
71
+ self.timers: dict[str, float] = {}
72
+ self.counters: dict[str, int] = {}
73
+ self.metrics_lock = threading.Lock()
74
+ self.initialized = True
75
+
76
+ def record_metric(
77
+ self, name: str, value: float, metadata: dict[str, Any] | None = None
78
+ ):
79
+ """Record a metric value with optional metadata."""
80
+ with self.metrics_lock:
81
+ if name not in self.metrics:
82
+ self.metrics[name] = MetricBuffer()
83
+ self.metrics[name].add(value, metadata)
84
+
85
+ def start_timer(self, name: str):
86
+ """Start a timer for measuring operation duration."""
87
+ self.timers[name] = time.time()
88
+
89
+ def stop_timer(self, name: str, record: bool = True) -> float:
90
+ """Stop a timer and optionally record its duration as a metric."""
91
+ if name not in self.timers:
92
+ raise KeyError(f"Timer '{name}' was never started")
93
+
94
+ duration = time.time() - self.timers[name]
95
+ if record:
96
+ self.record_metric(f"{name}_duration", duration)
97
+
98
+ del self.timers[name]
99
+ return duration
100
+
101
+ def increment_counter(self, name: str, amount: int = 1):
102
+ """Increment a counter by the specified amount."""
103
+ with self.metrics_lock:
104
+ self.counters[name] = self.counters.get(name, 0) + amount
105
+
106
+ def get_counter(self, name: str) -> int:
107
+ """Get the current value of a counter."""
108
+ return self.counters.get(name, 0)
109
+
110
+ def get_metric_stats(
111
+ self, name: str, window_seconds: float | None = None
112
+ ) -> dict[str, float]:
113
+ """Get statistics for a metric over the specified time window."""
114
+ if name not in self.metrics:
115
+ return {"count": 0, "mean": 0.0, "min": 0.0, "max": 0.0, "std_dev": 0.0}
116
+ return self.metrics[name].get_stats(window_seconds)
117
+
118
+ def get_all_metrics(
119
+ self, window_seconds: float | None = None
120
+ ) -> dict[str, dict[str, float]]:
121
+ """Get statistics for all metrics."""
122
+ return {
123
+ name: self.get_metric_stats(name, window_seconds) for name in self.metrics
124
+ }
125
+
126
+ def log_summary(self, window_seconds: float | None = None):
127
+ """Log a summary of all metrics and counters."""
128
+ stats = self.get_all_metrics(window_seconds)
129
+ self.logger.info("Performance Summary:")
130
+
131
+ for name, metric_stats in stats.items():
132
+ self.logger.info(f"{name}:")
133
+ for stat_name, value in metric_stats.items():
134
+ self.logger.info(f" {stat_name}: {value:.3f}")
135
+
136
+ self.logger.info("Counters:")
137
+ for name, value in self.counters.items():
138
+ self.logger.info(f" {name}: {value}")
139
+
140
+
141
+ # Example usage:
142
+ # monitor = PerformanceMonitor()
143
+ #
144
+ # # Record individual metrics
145
+ # monitor.record_metric("queue_size", queue.qsize())
146
+ #
147
+ # # Time operations
148
+ # monitor.start_timer("websocket_send")
149
+ # await websocket.send_bytes(data)
150
+ # duration = monitor.stop_timer("websocket_send")
151
+ #
152
+ # # Track message counts
153
+ # monitor.increment_counter("messages_received")
154
+ #
155
+ # # Get stats for the last 5 minutes
156
+ # stats = monitor.get_metric_stats("websocket_send", window_seconds=300)