spectre-core 0.0.22__py3-none-any.whl → 0.0.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. spectre_core/_file_io/__init__.py +4 -4
  2. spectre_core/_file_io/file_handlers.py +60 -106
  3. spectre_core/batches/__init__.py +20 -3
  4. spectre_core/batches/_base.py +85 -134
  5. spectre_core/batches/_batches.py +55 -99
  6. spectre_core/batches/_factory.py +21 -20
  7. spectre_core/batches/_register.py +8 -8
  8. spectre_core/batches/plugins/_batch_keys.py +7 -6
  9. spectre_core/batches/plugins/_callisto.py +65 -97
  10. spectre_core/batches/plugins/_iq_stream.py +105 -169
  11. spectre_core/capture_configs/__init__.py +46 -17
  12. spectre_core/capture_configs/_capture_config.py +25 -52
  13. spectre_core/capture_configs/_capture_modes.py +8 -6
  14. spectre_core/capture_configs/_capture_templates.py +50 -110
  15. spectre_core/capture_configs/_parameters.py +37 -74
  16. spectre_core/capture_configs/_pconstraints.py +40 -40
  17. spectre_core/capture_configs/_pnames.py +36 -34
  18. spectre_core/capture_configs/_ptemplates.py +260 -347
  19. spectre_core/capture_configs/_pvalidators.py +99 -102
  20. spectre_core/config/__init__.py +13 -8
  21. spectre_core/config/_paths.py +18 -35
  22. spectre_core/config/_time_formats.py +6 -5
  23. spectre_core/exceptions.py +38 -0
  24. spectre_core/jobs/__init__.py +3 -6
  25. spectre_core/jobs/_duration.py +12 -0
  26. spectre_core/jobs/_jobs.py +72 -43
  27. spectre_core/jobs/_workers.py +55 -105
  28. spectre_core/logs/__init__.py +7 -2
  29. spectre_core/logs/_configure.py +13 -17
  30. spectre_core/logs/_decorators.py +6 -4
  31. spectre_core/logs/_logs.py +37 -89
  32. spectre_core/logs/_process_types.py +5 -3
  33. spectre_core/plotting/__init__.py +13 -3
  34. spectre_core/plotting/_base.py +64 -138
  35. spectre_core/plotting/_format.py +10 -8
  36. spectre_core/plotting/_panel_names.py +7 -5
  37. spectre_core/plotting/_panel_stack.py +82 -115
  38. spectre_core/plotting/_panels.py +120 -155
  39. spectre_core/post_processing/__init__.py +6 -3
  40. spectre_core/post_processing/_base.py +41 -55
  41. spectre_core/post_processing/_factory.py +14 -11
  42. spectre_core/post_processing/_post_processor.py +16 -12
  43. spectre_core/post_processing/_register.py +10 -7
  44. spectre_core/post_processing/plugins/_event_handler_keys.py +4 -3
  45. spectre_core/post_processing/plugins/_fixed_center_frequency.py +54 -47
  46. spectre_core/post_processing/plugins/_swept_center_frequency.py +199 -174
  47. spectre_core/receivers/__init__.py +9 -2
  48. spectre_core/receivers/_base.py +82 -148
  49. spectre_core/receivers/_factory.py +20 -30
  50. spectre_core/receivers/_register.py +7 -10
  51. spectre_core/receivers/_spec_names.py +17 -15
  52. spectre_core/receivers/plugins/_b200mini.py +47 -60
  53. spectre_core/receivers/plugins/_receiver_names.py +8 -6
  54. spectre_core/receivers/plugins/_rsp1a.py +44 -40
  55. spectre_core/receivers/plugins/_rspduo.py +59 -44
  56. spectre_core/receivers/plugins/_sdrplay_receiver.py +67 -83
  57. spectre_core/receivers/plugins/_test.py +136 -129
  58. spectre_core/receivers/plugins/_usrp.py +93 -85
  59. spectre_core/receivers/plugins/gr/__init__.py +1 -1
  60. spectre_core/receivers/plugins/gr/_base.py +14 -22
  61. spectre_core/receivers/plugins/gr/_rsp1a.py +53 -60
  62. spectre_core/receivers/plugins/gr/_rspduo.py +77 -89
  63. spectre_core/receivers/plugins/gr/_test.py +49 -57
  64. spectre_core/receivers/plugins/gr/_usrp.py +61 -59
  65. spectre_core/spectrograms/__init__.py +21 -13
  66. spectre_core/spectrograms/_analytical.py +108 -99
  67. spectre_core/spectrograms/_array_operations.py +39 -46
  68. spectre_core/spectrograms/_spectrogram.py +289 -322
  69. spectre_core/spectrograms/_transform.py +106 -73
  70. spectre_core/wgetting/__init__.py +1 -3
  71. spectre_core/wgetting/_callisto.py +87 -93
  72. {spectre_core-0.0.22.dist-info → spectre_core-0.0.23.dist-info}/METADATA +9 -23
  73. spectre_core-0.0.23.dist-info/RECORD +79 -0
  74. {spectre_core-0.0.22.dist-info → spectre_core-0.0.23.dist-info}/WHEEL +1 -1
  75. spectre_core-0.0.22.dist-info/RECORD +0 -78
  76. {spectre_core-0.0.22.dist-info → spectre_core-0.0.23.dist-info}/licenses/LICENSE +0 -0
  77. {spectre_core-0.0.22.dist-info → spectre_core-0.0.23.dist-info}/top_level.txt +0 -0
@@ -3,109 +3,138 @@
3
3
  # SPDX-License-Identifier: GPL-3.0-or-later
4
4
 
5
5
  from logging import getLogger
6
+
6
7
  _LOGGER = getLogger(__name__)
7
8
 
8
9
  import time
9
10
 
10
11
  from ._workers import Worker
12
+ from ._duration import Duration
13
+
11
14
 
12
15
  class Job:
13
- """Represents a collection of workers that run long-running tasks as
16
+ """Represents a collection of workers that run long-running tasks as
14
17
  multiprocessing processes.
15
18
 
16
- A `Job` manages the lifecycle of its workers, including starting,
17
- monitoring, and terminating them.
19
+ A `Job` manages the lifecycle of its workers, including starting,
20
+ monitoring, and killing them.
18
21
  """
19
- def __init__(
20
- self,
21
- workers: list[Worker]
22
- ) -> None:
22
+
23
+ def __init__(self, workers: list[Worker]) -> None:
23
24
  """Initialise a `Job` with a list of workers.
24
25
 
25
26
  :param workers: A list of `Worker` instances to manage as part of the job.
26
27
  """
27
28
  self._workers = workers
28
29
 
30
+ @property
31
+ def workers_are_alive(self) -> bool:
32
+ """Returns True if all managed workers are alive, and False otherwise."""
33
+ return all([worker.is_alive for worker in self._workers])
29
34
 
30
35
  def start(
31
36
  self,
32
37
  ) -> None:
33
38
  """Tell each worker to call their functions in the background as multiprocessing processes."""
39
+ if self.workers_are_alive:
40
+ raise RuntimeError("A job cannot be started twice.")
34
41
  for worker in self._workers:
35
42
  worker.start()
36
-
37
-
38
- def terminate(
43
+
44
+ def kill(
39
45
  self,
40
46
  ) -> None:
41
- """Tell each worker to terminate their processes, if the processes are still running."""
42
- _LOGGER.info("Terminating workers...")
47
+ """Tell each worker to kill their processes, if the processes are still running."""
48
+ _LOGGER.info("Killing workers...")
43
49
  for worker in self._workers:
44
- if worker.process.is_alive():
45
- worker.process.terminate()
46
- worker.process.join()
47
- _LOGGER.info("All workers successfully terminated")
48
-
49
-
50
- def monitor(
50
+ if worker.is_alive:
51
+ worker.kill()
52
+
53
+ _LOGGER.info("All workers successfully killed")
54
+
55
+ def restart(
51
56
  self,
52
- total_runtime: float,
53
- force_restart: bool = False
57
+ ) -> None:
58
+ """Tell each worker to restart it's process."""
59
+ for worker in self._workers:
60
+ worker.restart()
61
+
62
+ def monitor(
63
+ self, total_runtime: float, force_restart: bool = False, max_restarts: int = 5
54
64
  ) -> None:
55
65
  """
56
66
  Monitor the workers during execution and handle unexpected exits.
57
67
 
58
- Periodically checks worker processes within the specified runtime duration.
68
+ Periodically checks worker processes within the specified runtime duration.
59
69
  If a worker exits unexpectedly:
60
70
  - Restarts all workers if `force_restart` is True.
61
- - Terminates all workers and raises an exception if `force_restart` is False.
71
+ - Kills all workers and raises an exception if `force_restart` is False.
62
72
 
63
73
  :param total_runtime: Total time to monitor the workers, in seconds.
64
- :param force_restart: Whether to restart all workers if one exits unexpectedly.
74
+ :param force_restart: Whether to restart all workers if one dies unexpectedly.
75
+ :param max_restarts: Maximum number of times workers can be restarted before giving up and killing all workers.
76
+ Only applies when force_restart is True. Defaults to 5.
65
77
  :raises RuntimeError: If a worker exits and `force_restart` is False.
66
78
  """
67
79
  _LOGGER.info("Monitoring workers...")
68
80
  start_time = time.time()
69
81
 
82
+ restarts_remaining = max_restarts
70
83
  try:
84
+ # Check that the elapsed time since the job started is within the total runtime configured by the user.
71
85
  while time.time() - start_time < total_runtime:
72
86
  for worker in self._workers:
73
- if not worker.process.is_alive():
74
- error_message = f"Worker with name `{worker.name}` unexpectedly exited."
87
+ if not worker.is_alive:
88
+ error_message = (
89
+ f"Worker with name `{worker.name}` unexpectedly exited."
90
+ )
75
91
  _LOGGER.error(error_message)
76
92
  if force_restart:
77
- # Restart all workers
78
- for worker in self._workers:
79
- worker.restart()
93
+ if restarts_remaining > 0:
94
+ _LOGGER.info(
95
+ f"Attempting restart ({restarts_remaining} restarts remaining)..."
96
+ )
97
+ restarts_remaining -= 1
98
+ self.restart()
99
+
100
+ else:
101
+ error_message = (
102
+ f"Maximum number of restarts has been reached: {max_restarts}. "
103
+ f"Killing all workers."
104
+ )
105
+ self.kill()
106
+ raise RuntimeError(error_message)
80
107
  else:
81
- self.terminate()
108
+ self.kill()
82
109
  raise RuntimeError(error_message)
83
- time.sleep(1) # Poll every second
110
+ time.sleep(Duration.ONE_DECISECOND) # Poll every 0.1 seconds
84
111
 
85
- _LOGGER.info("Session duration reached. Terminating workers...")
86
- self.terminate()
112
+ # If the jobs total runtime has elapsed, kill all the workers
113
+ _LOGGER.info("Job complete. Killing workers... ")
114
+ self.kill()
87
115
 
88
116
  except KeyboardInterrupt:
89
- _LOGGER.info("Keyboard interrupt detected. Terminating workers...")
90
- self.terminate()
117
+ _LOGGER.info("Keyboard interrupt detected. Killing workers...")
118
+ self.kill()
119
+
91
120
 
92
-
93
121
  def start_job(
94
122
  workers: list[Worker],
95
123
  total_runtime: float,
96
- force_restart: bool = False
124
+ force_restart: bool = False,
125
+ max_restarts: int = 5,
97
126
  ) -> None:
98
127
  """Create and run a job with the specified workers.
99
128
 
100
- Starts the workers, monitors them for the specified runtime, and handles
129
+ Starts the workers, monitors them for the specified runtime, and handles
101
130
  unexpected exits according to the `force_restart` policy.
102
131
 
103
132
  :param workers: A list of `Worker` instances to include in the job.
104
- :param total_runtime: Total time to monitor the job, in seconds.
105
- :param force_restart: Whether to restart all workers if one exits unexpectedly.
106
- Defaults to False.
133
+ :param total_runtime: Total time to monitor the workers, in seconds.
134
+ :param force_restart: Whether to restart all workers if one dies unexpectedly.
135
+ :param max_restarts: Maximum number of times workers can be restarted before giving up and killing all workers.
136
+ Only applies when force_restart is True. Defaults to 5.
107
137
  """
108
138
  job = Job(workers)
109
139
  job.start()
110
- job.monitor(total_runtime, force_restart)
111
-
140
+ job.monitor(total_runtime, force_restart, max_restarts)
@@ -3,22 +3,19 @@
3
3
  # SPDX-License-Identifier: GPL-3.0-or-later
4
4
 
5
5
  from logging import getLogger
6
+
6
7
  _LOGGER = getLogger(__name__)
7
8
 
8
- from functools import wraps
9
9
  import time
10
- from typing import Callable, TypeVar, ParamSpec
10
+ from typing import Callable
11
11
  import multiprocessing
12
12
 
13
- from spectre_core.logs import configure_root_logger, log_call, ProcessType
14
- from spectre_core.capture_configs import CaptureConfig
15
- from spectre_core.receivers import get_receiver, ReceiverName
16
- from spectre_core.post_processing import start_post_processor
13
+ from spectre_core.logs import configure_root_logger, ProcessType
14
+ from ._duration import Duration
17
15
 
18
16
 
19
17
  def _make_daemon_process(
20
- name: str,
21
- target: Callable[[], None]
18
+ name: str, target: Callable[[], None]
22
19
  ) -> multiprocessing.Process:
23
20
  """
24
21
  Creates and returns a daemon `multiprocessing.Process` instance.
@@ -27,21 +24,16 @@ def _make_daemon_process(
27
24
  :param target: The function to execute in the process.
28
25
  :return: A `multiprocessing.Process` instance configured as a daemon.
29
26
  """
30
- return multiprocessing.Process(target=target,
31
- name=name,
32
- daemon=True)
27
+ return multiprocessing.Process(target=target, name=name, daemon=True)
33
28
 
34
29
 
35
30
  class Worker:
36
31
  """A lightweight wrapper for a `multiprocessing.Process` daemon.
37
-
32
+
38
33
  Provides a very simple API to start, and restart a multiprocessing process.
39
34
  """
40
- def __init__(
41
- self,
42
- name: str,
43
- target: Callable[[], None]
44
- ) -> None:
35
+
36
+ def __init__(self, name: str, target: Callable[[], None]) -> None:
45
37
  """Initialise a `Worker` instance.
46
38
 
47
39
  :param name: The name assigned to the process.
@@ -51,121 +43,79 @@ class Worker:
51
43
  self._target = target
52
44
  self._process = _make_daemon_process(name, target)
53
45
 
54
-
55
46
  @property
56
- def name(
57
- self
58
- ) -> str:
47
+ def name(self) -> str:
59
48
  """Get the name of the worker process.
60
49
 
61
50
  :return: The name of the multiprocessing process.
62
51
  """
63
52
  return self._process.name
64
-
65
-
66
- @property
67
- def process(
68
- self
69
- ) -> multiprocessing.Process:
70
- """Access the underlying multiprocessing process.
71
53
 
72
- :return: The wrapped `multiprocessing.Process` instance.
73
- """
74
- return self._process
54
+ @property
55
+ def is_alive(self) -> bool:
56
+ """Return whether the managed process is alive."""
57
+ return self._process.is_alive()
75
58
 
76
-
77
- def start(
78
- self
79
- ) -> None:
59
+ def start(self) -> None:
80
60
  """Start the worker process.
81
61
 
82
62
  This method runs the `target` in the background as a daemon.
83
63
  """
64
+ if self.is_alive:
65
+ raise RuntimeError("A worker cannot be started twice.")
66
+
84
67
  self._process.start()
85
68
 
69
+ def kill(self) -> None:
70
+ """Kill the managed process."""
71
+ if not self.is_alive:
72
+ raise RuntimeError("Cannot kill a process which is not alive.")
73
+
74
+ self._process.kill()
86
75
 
87
- def restart(
88
- self
89
- ) -> None:
76
+ def restart(self) -> None:
90
77
  """Restart the worker process.
91
78
 
92
- Terminates the existing process if it is alive and then starts a new process
93
- after a brief pause.
79
+ Kills the existing process if it is alive and then starts a new process
80
+ after a brief pause.
94
81
  """
95
82
  _LOGGER.info(f"Restarting {self.name} worker")
96
- if self._process.is_alive():
83
+ if self.is_alive:
97
84
  # forcibly stop if it is still alive
98
- self._process.terminate()
99
- self._process.join()
85
+ self.kill()
86
+
100
87
  # a moment of respite
101
- time.sleep(1)
88
+ time.sleep(0.5 * Duration.ONE_SECOND)
89
+
102
90
  # make a new process, as we can't start the same process again.
103
91
  self._process = _make_daemon_process(self._name, self._target)
104
92
  self.start()
105
93
 
106
94
 
107
- P = ParamSpec("P")
108
- T = TypeVar("T", bound=Callable[..., None])
95
+ # TODO: Somehow statically type check that `args` match the arguments to `target`
109
96
  def make_worker(
110
- name: str
111
- ) -> Callable[[Callable[P, None]], Callable[P, Worker]]:
97
+ name: str,
98
+ target: Callable[..., None],
99
+ args: tuple = (),
100
+ configure_logging: bool = True,
101
+ ) -> Worker:
102
+ """Create a `Worker` instance to manage a target function in a multiprocessing background daemon process.
103
+
104
+ This function returns a `Worker` that is configured to run the given target function with the provided arguments
105
+ in a separate process. The worker is not started automatically; you must call `start()` to call the target. The target should not return anything,
106
+ as its return value will be discarded.
107
+
108
+ :param name: Human-readable name for the worker process.
109
+ :param target: The function to be executed by the worker process.
110
+ :param args: Arguments to pass to the target function.
111
+ :param configure_root_logger: If True, configure the root logger to write log events to file. Defaults to True.
112
+ :return: A `Worker` instance managing the background process (not started).
112
113
  """
113
- Turns a function into a worker.
114
114
 
115
- This decorator wraps a function, allowing it to run in a separate process
116
- managed by a `Worker` object. Use it to easily create long-running or
117
- isolated tasks without directly handling multiprocessing.
115
+ def _worker_target() -> None:
116
+ if configure_logging:
117
+ configure_root_logger(ProcessType.WORKER)
118
+ _LOGGER.info(args)
119
+ target(*args)
118
120
 
119
- :param name: A human-readable name for the worker process.
120
- :return: A decorator that creates a `Worker` to run the function in its own process.
121
- """
122
-
123
- def decorator(
124
- func: Callable[P, None]
125
- ) -> Callable[P, Worker]:
126
- @wraps(func)
127
- def wrapper(*args: P.args, **kwargs: P.kwargs) -> Worker:
128
- # Worker target funcs must have no arguments
129
- def target():
130
- configure_root_logger(ProcessType.WORKER)
131
- func(*args, **kwargs)
132
- return Worker(name, target)
133
- return wrapper
134
- return decorator
135
-
136
-
137
- @make_worker("capture")
138
- @log_call
139
- def do_capture(
140
- tag: str,
141
- ) -> None:
142
- """Start capturing data from an SDR in real time.
143
-
144
- :param tag: The capture config tag.
145
- """
146
- _LOGGER.info((f"Reading capture config with tag '{tag}'"))
147
-
148
- # load the receiver and mode from the capture config file
149
- capture_config = CaptureConfig(tag)
150
-
151
- _LOGGER.info((f"Starting capture with the receiver '{capture_config.receiver_name}' "
152
- f"operating in mode '{capture_config.receiver_mode}' "
153
- f"with tag '{tag}'"))
154
-
155
- name = ReceiverName( capture_config.receiver_name )
156
- receiver = get_receiver(name,
157
- capture_config.receiver_mode)
158
- receiver.start_capture(tag)
159
-
160
-
161
- @make_worker("post_processing")
162
- @log_call
163
- def do_post_processing(
164
- tag: str,
165
- ) -> None:
166
- """Start post processing SDR data into spectrograms in real time.
167
-
168
- :param tag: The capture config tag.
169
- """
170
- _LOGGER.info(f"Starting post processor with tag '{tag}'")
171
- start_post_processor(tag)
121
+ return Worker(name, _worker_target)
@@ -12,6 +12,11 @@ from ._logs import Log, Logs, parse_log_base_file_name
12
12
 
13
13
 
14
14
  __all__ = [
15
- "log_call", "configure_root_logger", "Log", "Logs", "ProcessType",
16
- "get_root_logger_state", "parse_log_base_file_name"
15
+ "log_call",
16
+ "configure_root_logger",
17
+ "Log",
18
+ "Logs",
19
+ "ProcessType",
20
+ "get_root_logger_state",
21
+ "parse_log_base_file_name",
17
22
  ]
@@ -12,11 +12,8 @@ from ._logs import Log
12
12
  from ._process_types import ProcessType
13
13
 
14
14
 
15
- def configure_root_logger(
16
- process_type: ProcessType,
17
- level: int = logging.INFO
18
- ) -> str:
19
- """Configures the root logger to write logs to a file named based on
15
+ def configure_root_logger(process_type: ProcessType, level: int = logging.INFO) -> str:
16
+ """Configures the root logger to write logs to a file named based on
20
17
  the process type, process ID, and the current system time.
21
18
 
22
19
  :param process_type: Indicates the type of process, as defined by `ProcessType`.
@@ -27,36 +24,35 @@ def configure_root_logger(
27
24
  # get the star time of the log
28
25
  system_datetime = datetime.now()
29
26
  start_time = system_datetime.strftime(TimeFormat.DATETIME)
30
-
27
+
31
28
  # extract the process identifier, and cast as a string
32
- pid = str( os.getpid() )
33
-
29
+ pid = str(os.getpid())
30
+
34
31
  # create a file handler representing the log file
35
- log = Log(start_time,
36
- pid,
37
- process_type)
32
+ log = Log(start_time, pid, process_type)
38
33
  log.make_parent_dir_path()
39
34
 
40
35
  # get the root logger and set its level.
41
36
  logger = logging.getLogger()
42
37
  logger.setLevel(level)
43
-
38
+
44
39
  # remove existing handlers
45
40
  for handler in logger.handlers:
46
41
  logger.removeHandler(handler)
47
-
42
+
48
43
  # Set up a file handler and add it to the root logger
49
44
  file_handler = logging.FileHandler(log.file_path)
50
45
  file_handler.setLevel(level)
51
- formatter = logging.Formatter("[%(asctime)s] [%(levelname)8s] --- %(message)s (%(name)s:%(lineno)s)")
46
+ formatter = logging.Formatter(
47
+ "[%(asctime)s] [%(levelname)8s] --- %(message)s (%(name)s:%(lineno)s)"
48
+ )
52
49
  file_handler.setFormatter(formatter)
53
50
  logger.addHandler(file_handler)
54
51
 
55
52
  return log.file_path
56
53
 
57
54
 
58
- def get_root_logger_state(
59
- ) -> Tuple[bool, int]:
55
+ def get_root_logger_state() -> Tuple[bool, int]:
60
56
  """Get the state of the root logger.
61
57
 
62
58
  :return: Whether the root logger has any handlers, and the level of the root logger.
@@ -64,4 +60,4 @@ def get_root_logger_state(
64
60
  root_logger = logging.getLogger()
65
61
  if root_logger.handlers:
66
62
  return True, root_logger.level
67
- return False, logging.NOTSET
63
+ return False, logging.NOTSET
@@ -10,9 +10,9 @@ from functools import wraps
10
10
  P = ParamSpec("P")
11
11
  # TypeVar for capturing the return type of the function
12
12
  RT = TypeVar("RT")
13
- def log_call(
14
- func: Callable[P, RT]
15
- ) -> Callable[P, RT]:
13
+
14
+
15
+ def log_call(func: Callable[P, RT]) -> Callable[P, RT]:
16
16
  """Decorator to log the execution of a function.
17
17
 
18
18
  Logs an informational message when the decorated function is called,
@@ -21,6 +21,7 @@ def log_call(
21
21
  :param func: The function to be decorated.
22
22
  :return: The decorated function with added logging behaviour.
23
23
  """
24
+
24
25
  @wraps(func)
25
26
  def wrapper(*args: P.args, **kwargs: P.kwargs) -> RT:
26
27
  logger = logging.getLogger(func.__module__)
@@ -30,4 +31,5 @@ def log_call(
30
31
  except Exception as e:
31
32
  logger.error(f"Error in function: {func.__name__}", exc_info=True)
32
33
  raise
33
- return wrapper
34
+
35
+ return wrapper