citrascope 0.1.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. citrascope/__main__.py +13 -13
  2. citrascope/api/abstract_api_client.py +7 -0
  3. citrascope/api/citra_api_client.py +43 -2
  4. citrascope/citra_scope_daemon.py +205 -61
  5. citrascope/constants.py +23 -0
  6. citrascope/hardware/abstract_astro_hardware_adapter.py +70 -2
  7. citrascope/hardware/adapter_registry.py +94 -0
  8. citrascope/hardware/indi_adapter.py +456 -16
  9. citrascope/hardware/kstars_dbus_adapter.py +179 -0
  10. citrascope/hardware/nina_adv_http_adapter.py +593 -0
  11. citrascope/hardware/nina_adv_http_survey_template.json +328 -0
  12. citrascope/logging/__init__.py +2 -1
  13. citrascope/logging/_citrascope_logger.py +80 -1
  14. citrascope/logging/web_log_handler.py +75 -0
  15. citrascope/settings/citrascope_settings.py +140 -0
  16. citrascope/settings/settings_file_manager.py +126 -0
  17. citrascope/tasks/runner.py +129 -29
  18. citrascope/tasks/scope/base_telescope_task.py +25 -10
  19. citrascope/tasks/scope/static_telescope_task.py +11 -3
  20. citrascope/web/__init__.py +1 -0
  21. citrascope/web/app.py +479 -0
  22. citrascope/web/server.py +132 -0
  23. citrascope/web/static/api.js +82 -0
  24. citrascope/web/static/app.js +502 -0
  25. citrascope/web/static/config.js +438 -0
  26. citrascope/web/static/img/citra.png +0 -0
  27. citrascope/web/static/img/favicon.png +0 -0
  28. citrascope/web/static/style.css +152 -0
  29. citrascope/web/static/websocket.js +127 -0
  30. citrascope/web/templates/dashboard.html +407 -0
  31. {citrascope-0.1.0.dist-info → citrascope-0.4.0.dist-info}/METADATA +87 -47
  32. citrascope-0.4.0.dist-info/RECORD +38 -0
  33. {citrascope-0.1.0.dist-info → citrascope-0.4.0.dist-info}/WHEEL +1 -1
  34. citrascope/settings/_citrascope_settings.py +0 -42
  35. citrascope-0.1.0.dist-info/RECORD +0 -21
  36. docs/index.md +0 -47
  37. {citrascope-0.1.0.dist-info → citrascope-0.4.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,126 @@
1
+ """Settings file manager for CitraScope.
2
+
3
+ Handles reading and writing JSON settings files using platformdirs
4
+ for cross-platform settings directory management.
5
+ """
6
+
7
+ import json
8
+ import os
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+ from typing import Any, Dict, Optional
12
+
13
+ import platformdirs
14
+
15
+ from citrascope.settings.citrascope_settings import APP_AUTHOR, APP_NAME
16
+
17
+
18
+ class SettingsFileManager:
19
+ """Manages settings file storage and retrieval."""
20
+
21
+ def __init__(self):
22
+ """Initialize the config file manager with the standard config directory."""
23
+ self.config_dir = Path(platformdirs.user_config_dir(APP_NAME, appauthor=APP_AUTHOR))
24
+ self.config_file = self.config_dir / "config.json"
25
+ self.log_dir = Path(platformdirs.user_log_dir(APP_NAME, appauthor=APP_AUTHOR))
26
+
27
+ def ensure_config_directory(self) -> None:
28
+ """Create config directory with proper permissions if it doesn't exist."""
29
+ if not self.config_dir.exists():
30
+ self.config_dir.mkdir(parents=True, mode=0o700)
31
+ else:
32
+ # Ensure proper permissions on existing directory
33
+ os.chmod(self.config_dir, 0o700)
34
+
35
+ def load_config(self) -> Dict[str, Any]:
36
+ """Load configuration from JSON file.
37
+
38
+ Returns:
39
+ Dict containing configuration, or empty dict if file doesn't exist.
40
+ """
41
+ if not self.config_file.exists():
42
+ return {}
43
+
44
+ try:
45
+ with open(self.config_file, "r") as f:
46
+ return json.load(f)
47
+ except (json.JSONDecodeError, IOError) as e:
48
+ # Log error but return empty config to allow recovery
49
+ print(f"Error loading config file: {e}")
50
+ return {}
51
+
52
+ def save_config(self, config: Dict[str, Any]) -> None:
53
+ """Save configuration to JSON file with proper permissions.
54
+
55
+ Args:
56
+ config: Dictionary of configuration values to save.
57
+ """
58
+ self.ensure_config_directory()
59
+
60
+ # Write to temp file first, then atomic rename
61
+ temp_file = self.config_file.with_suffix(".json.tmp")
62
+ try:
63
+ with open(temp_file, "w") as f:
64
+ json.dump(config, f, indent=2)
65
+ # Set restrictive permissions before moving into place
66
+ os.chmod(temp_file, 0o600)
67
+ temp_file.rename(self.config_file)
68
+ except Exception as e:
69
+ # Clean up temp file on error
70
+ if temp_file.exists():
71
+ temp_file.unlink()
72
+ raise IOError(f"Failed to save config: {e}")
73
+
74
+ def get_config_path(self) -> Path:
75
+ """Get the path to the config file.
76
+
77
+ Returns:
78
+ Path object pointing to the config file location.
79
+ """
80
+ return self.config_file
81
+
82
+ def config_exists(self) -> bool:
83
+ """Check if a config file exists.
84
+
85
+ Returns:
86
+ True if config file exists, False otherwise.
87
+ """
88
+ return self.config_file.exists()
89
+
90
+ def validate_config(self, config: Dict[str, Any]) -> tuple[bool, Optional[str]]:
91
+ """Validate configuration structure.
92
+
93
+ Args:
94
+ config: Configuration dictionary to validate.
95
+
96
+ Returns:
97
+ Tuple of (is_valid, error_message).
98
+ """
99
+ # Basic validation - check that it's a dict
100
+ if not isinstance(config, dict):
101
+ return False, "Configuration must be a dictionary"
102
+
103
+ # Could add more validation here for required fields, types, etc.
104
+ return True, None
105
+
106
+ def ensure_log_directory(self) -> None:
107
+ """Create log directory if it doesn't exist."""
108
+ if not self.log_dir.exists():
109
+ self.log_dir.mkdir(parents=True)
110
+
111
+ def get_log_dir(self) -> Path:
112
+ """Get the path to the log directory.
113
+
114
+ Returns:
115
+ Path object pointing to the log directory.
116
+ """
117
+ return self.log_dir
118
+
119
+ def get_current_log_path(self) -> Path:
120
+ """Get the path to the current log file (dated for today).
121
+
122
+ Returns:
123
+ Path object pointing to today's log file.
124
+ """
125
+ today = datetime.now().strftime("%Y-%m-%d")
126
+ return self.log_dir / f"citrascope-{today}.log"
@@ -2,7 +2,7 @@ import heapq
2
2
  import os
3
3
  import threading
4
4
  import time
5
- from datetime import datetime
5
+ from datetime import datetime, timezone
6
6
 
7
7
  from dateutil import parser as dtparser
8
8
 
@@ -11,6 +11,9 @@ from citrascope.tasks.scope.static_telescope_task import StaticTelescopeTask
11
11
  from citrascope.tasks.scope.tracking_telescope_task import TrackingTelescopeTask
12
12
  from citrascope.tasks.task import Task
13
13
 
14
+ # Task polling interval in seconds
15
+ TASK_POLL_INTERVAL_SECONDS = 15
16
+
14
17
 
15
18
  class TaskManager:
16
19
  def __init__(
@@ -20,57 +23,110 @@ class TaskManager:
20
23
  ground_station_record,
21
24
  logger,
22
25
  hardware_adapter: AbstractAstroHardwareAdapter,
26
+ keep_images: bool = False,
27
+ settings=None,
23
28
  ):
24
29
  self.api_client = api_client
25
30
  self.telescope_record = telescope_record
26
31
  self.ground_station_record = ground_station_record
27
32
  self.logger = logger
33
+ self.settings = settings
28
34
  self.task_heap = [] # min-heap by start time
29
35
  self.task_ids = set()
30
36
  self.hardware_adapter = hardware_adapter
31
37
  self.heap_lock = threading.RLock()
32
38
  self._stop_event = threading.Event()
33
39
  self.current_task_id = None # Track currently executing task
40
+ self.keep_images = keep_images
41
+ self.task_retry_counts = {} # Track retry attempts per task ID
42
+ self.task_last_failure = {} # Track last failure timestamp per task ID
34
43
 
35
44
  def poll_tasks(self):
36
45
  while not self._stop_event.is_set():
37
46
  try:
47
+ self._report_online()
38
48
  tasks = self.api_client.get_telescope_tasks(self.telescope_record["id"])
49
+
50
+ # If API call failed (timeout, network error, etc.), wait before retrying
51
+ if tasks is None:
52
+ self._stop_event.wait(TASK_POLL_INTERVAL_SECONDS)
53
+ continue
54
+
39
55
  added = 0
56
+ removed = 0
40
57
  now = int(time.time())
41
58
  with self.heap_lock:
59
+ # Build a map of current valid tasks from the API
60
+ api_task_map = {}
42
61
  for task_dict in tasks:
43
62
  try:
44
63
  task = Task.from_dict(task_dict)
45
64
  tid = task.id
65
+ if tid and task.status in ["Pending", "Scheduled"]:
66
+ api_task_map[tid] = task
67
+ except Exception as e:
68
+ self.logger.error(f"Error parsing task from API: {e}", exc_info=True)
69
+
70
+ # Remove tasks from heap that are no longer valid (cancelled, completed, or not in API response)
71
+ new_heap = []
72
+ for start_epoch, stop_epoch, tid, task in self.task_heap:
73
+ # Keep task if it's still in the API response with a valid status
74
+ # Don't remove currently executing task
75
+ if tid == self.current_task_id or tid in api_task_map:
76
+ new_heap.append((start_epoch, stop_epoch, tid, task))
77
+ else:
78
+ self.logger.info(f"Removing task {tid} from queue (cancelled or status changed)")
79
+ self.task_ids.discard(tid)
80
+ # Clean up retry tracking
81
+ self.task_retry_counts.pop(tid, None)
82
+ self.task_last_failure.pop(tid, None)
83
+ removed += 1
84
+
85
+ # Rebuild heap if we removed anything
86
+ if removed > 0:
87
+ self.task_heap = new_heap
88
+ heapq.heapify(self.task_heap)
89
+
90
+ # Add new tasks that aren't already in the heap
91
+ for tid, task in api_task_map.items():
92
+ # Skip if task is in heap or is currently being executed
93
+ if tid not in self.task_ids and tid != self.current_task_id:
46
94
  task_start = task.taskStart
47
95
  task_stop = task.taskStop
48
- # Skip if task is in heap or is currently being executed
49
- if tid and task_start and tid not in self.task_ids and tid != self.current_task_id:
50
- try:
51
- start_epoch = int(dtparser.isoparse(task_start).timestamp())
52
- stop_epoch = int(dtparser.isoparse(task_stop).timestamp()) if task_stop else 0
53
- except Exception:
54
- self.logger.error(f"Could not parse taskStart/taskStop for task {tid}")
55
- continue
56
- if stop_epoch and stop_epoch < now:
57
- self.logger.debug(f"Skipping past task {tid} that ended at {task_stop}")
58
- continue # Skip tasks whose end date has passed
59
- if task.status not in ["Pending", "Scheduled"]:
60
- self.logger.debug(f"Skipping task {tid} with status {task.status}")
61
- continue # Only schedule pending/scheduled tasks
62
- heapq.heappush(self.task_heap, (start_epoch, stop_epoch, tid, task))
63
- self.task_ids.add(tid)
64
- added += 1
65
- except Exception as e:
66
- self.logger.error(f"Error adding task {tid} to heap: {e}", exc_info=True)
67
- if added > 0:
68
- self.logger.info(self._heap_summary("Added tasks"))
69
- self.logger.info(self._heap_summary("Polled tasks"))
96
+ try:
97
+ start_epoch = int(dtparser.isoparse(task_start).timestamp())
98
+ stop_epoch = int(dtparser.isoparse(task_stop).timestamp()) if task_stop else 0
99
+ except Exception:
100
+ self.logger.error(f"Could not parse taskStart/taskStop for task {tid}")
101
+ continue
102
+ if stop_epoch and stop_epoch < now:
103
+ self.logger.debug(f"Skipping past task {tid} that ended at {task_stop}")
104
+ continue # Skip tasks whose end date has passed
105
+ heapq.heappush(self.task_heap, (start_epoch, stop_epoch, tid, task))
106
+ self.task_ids.add(tid)
107
+ added += 1
108
+
109
+ if added > 0 or removed > 0:
110
+ action = []
111
+ if added > 0:
112
+ action.append(f"Added {added}")
113
+ if removed > 0:
114
+ action.append(f"Removed {removed}")
115
+ self.logger.info(self._heap_summary(f"{', '.join(action)} tasks"))
116
+ # self.logger.info(self._heap_summary("Polled tasks"))
70
117
  except Exception as e:
71
118
  self.logger.error(f"Exception in poll_tasks loop: {e}", exc_info=True)
72
119
  time.sleep(5) # avoid tight error loop
73
- self._stop_event.wait(15)
120
+ self._stop_event.wait(TASK_POLL_INTERVAL_SECONDS)
121
+
122
+ def _report_online(self):
123
+ """
124
+ PUT to /telescopes to report this telescope as online.
125
+ """
126
+ telescope_id = self.telescope_record["id"]
127
+ iso_timestamp = datetime.now(timezone.utc).isoformat()
128
+ self.api_client.put_telescope_status([{"id": telescope_id, "last_connection_epoch": iso_timestamp}])
129
+ self.logger.debug(f"Reported online status for telescope {telescope_id} at {iso_timestamp}")
74
130
 
75
131
  def task_runner(self):
76
132
  while not self._stop_event.is_set():
@@ -98,9 +154,49 @@ class TaskManager:
98
154
  self.logger.info(f"Completed observation task {tid} successfully.")
99
155
  heapq.heappop(self.task_heap)
100
156
  self.task_ids.discard(tid)
157
+ # Clean up retry tracking for successful task
158
+ self.task_retry_counts.pop(tid, None)
159
+ self.task_last_failure.pop(tid, None)
101
160
  completed += 1
102
161
  else:
103
- self.logger.error(f"Observation task {tid} failed.")
162
+ # Task failed - implement retry logic with exponential backoff
163
+ retry_count = self.task_retry_counts.get(tid, 0)
164
+ max_retries = self.settings.max_task_retries if self.settings else 3
165
+
166
+ if retry_count >= max_retries:
167
+ # Max retries exceeded - permanently fail the task
168
+ self.logger.error(
169
+ f"Observation task {tid} failed after {retry_count} retries. Permanently failing."
170
+ )
171
+ heapq.heappop(self.task_heap)
172
+ self.task_ids.discard(tid)
173
+ # Clean up retry tracking
174
+ self.task_retry_counts.pop(tid, None)
175
+ self.task_last_failure.pop(tid, None)
176
+ # Mark task as failed in API
177
+ try:
178
+ self.api_client.mark_task_failed(tid)
179
+ except Exception as e:
180
+ self.logger.error(f"Failed to mark task {tid} as failed in API: {e}")
181
+ else:
182
+ # Retry with exponential backoff
183
+ self.task_retry_counts[tid] = retry_count + 1
184
+ self.task_last_failure[tid] = now
185
+
186
+ # Calculate backoff delay: initial_delay * 2^retry_count, capped at max_delay
187
+ initial_delay = self.settings.initial_retry_delay_seconds if self.settings else 30
188
+ max_delay = self.settings.max_retry_delay_seconds if self.settings else 300
189
+ backoff_delay = min(initial_delay * (2**retry_count), max_delay)
190
+
191
+ # Update task start time in heap to retry after backoff delay
192
+ _, stop_epoch, _, task = heapq.heappop(self.task_heap)
193
+ new_start_time = now + backoff_delay
194
+ heapq.heappush(self.task_heap, (new_start_time, stop_epoch, tid, task))
195
+
196
+ self.logger.warning(
197
+ f"Observation task {tid} failed (attempt {retry_count + 1}/{max_retries}). "
198
+ f"Retrying in {backoff_delay} seconds at {datetime.fromtimestamp(new_start_time).isoformat()}"
199
+ )
104
200
 
105
201
  if completed > 0:
106
202
  self.logger.info(self._heap_summary("Completed tasks"))
@@ -113,7 +209,13 @@ class TaskManager:
113
209
 
114
210
  # stake a still
115
211
  static_task = StaticTelescopeTask(
116
- self.api_client, self.hardware_adapter, self.logger, self.telescope_record, self.ground_station_record, task
212
+ self.api_client,
213
+ self.hardware_adapter,
214
+ self.logger,
215
+ self.telescope_record,
216
+ self.ground_station_record,
217
+ task,
218
+ self.keep_images,
117
219
  )
118
220
  return static_task.execute()
119
221
 
@@ -137,9 +239,7 @@ class TaskManager:
137
239
  if self.current_task_id is not None:
138
240
  # Show the current in-flight task at the front
139
241
  summary += f"Current: {self.current_task_id}. "
140
- if next_tasks and len(next_tasks) > 1 and self.current_task_id != next_tasks[0].split()[0]:
141
- summary += "Next: " + ", ".join(next_tasks)
142
- else:
242
+ if not next_tasks:
143
243
  summary += "No tasks scheduled."
144
244
  return summary
145
245
 
@@ -17,6 +17,7 @@ class AbstractBaseTelescopeTask(ABC):
17
17
  telescope_record,
18
18
  ground_station_record,
19
19
  task,
20
+ keep_images: bool = False,
20
21
  ):
21
22
  self.api_client = api_client
22
23
  self.hardware_adapter: AbstractAstroHardwareAdapter = hardware_adapter
@@ -24,6 +25,7 @@ class AbstractBaseTelescopeTask(ABC):
24
25
  self.telescope_record = telescope_record
25
26
  self.ground_station_record = ground_station_record
26
27
  self.task = task
28
+ self.keep_images = keep_images
27
29
 
28
30
  def fetch_satellite(self) -> dict | None:
29
31
  satellite_data = self.api_client.get_satellite(self.task.satelliteId)
@@ -55,17 +57,30 @@ class AbstractBaseTelescopeTask(ABC):
55
57
  )
56
58
  return most_recent_elset
57
59
 
58
- def upload_image_and_mark_complete(self, filepath):
59
- upload_result = self.api_client.upload_image(self.task.id, self.telescope_record["id"], filepath)
60
- if upload_result:
61
- self.logger.info(f"Successfully uploaded image for task {self.task.id}")
60
+ def upload_image_and_mark_complete(self, filepath: str | list[str]) -> bool:
61
+
62
+ if isinstance(filepath, str):
63
+ filepaths = [filepath]
62
64
  else:
63
- self.logger.error(f"Failed to upload image for task {self.task.id}")
64
- try:
65
- os.remove(filepath)
66
- self.logger.info(f"Deleted local image file {filepath} after upload.")
67
- except Exception as e:
68
- self.logger.error(f"Failed to delete local image file {filepath}: {e}")
65
+ filepaths = filepath
66
+
67
+ for filepath in filepaths:
68
+ upload_result = self.api_client.upload_image(self.task.id, self.telescope_record["id"], filepath)
69
+ if upload_result:
70
+ self.logger.info(f"Successfully uploaded image {filepath}")
71
+ else:
72
+ self.logger.error(f"Failed to upload image {filepath}")
73
+ return False
74
+
75
+ if not self.keep_images:
76
+ try:
77
+ os.remove(filepath)
78
+ self.logger.debug(f"Deleted local image file {filepath} after upload.")
79
+ except Exception as e:
80
+ self.logger.error(f"Failed to delete local image file {filepath}: {e}")
81
+ else:
82
+ self.logger.info(f"Keeping local image file {filepath} (--keep-images flag set).")
83
+
69
84
  marked_complete = self.api_client.mark_task_complete(self.task.id)
70
85
  if not marked_complete:
71
86
  task_check = self.api_client.get_telescope_tasks(self.telescope_record["id"])
@@ -1,5 +1,6 @@
1
1
  import time
2
2
 
3
+ from citrascope.hardware.abstract_astro_hardware_adapter import ObservationStrategy
3
4
  from citrascope.tasks.scope.base_telescope_task import AbstractBaseTelescopeTask
4
5
 
5
6
 
@@ -9,8 +10,15 @@ class StaticTelescopeTask(AbstractBaseTelescopeTask):
9
10
  satellite_data = self.fetch_satellite()
10
11
  if not satellite_data or satellite_data.get("most_recent_elset") is None:
11
12
  raise ValueError("Could not fetch valid satellite data or TLE.")
12
- self.point_to_lead_position(satellite_data)
13
+
14
+ filepath = None
15
+ if self.hardware_adapter.get_observation_strategy() == ObservationStrategy.MANUAL:
16
+ self.point_to_lead_position(satellite_data)
17
+ filepaths = self.hardware_adapter.take_image(self.task.id, 2.0) # 2 second exposure
18
+
19
+ if self.hardware_adapter.get_observation_strategy() == ObservationStrategy.SEQUENCE_TO_CONTROLLER:
20
+ # Assume the hardware adapter has already pointed the telescope and started tracking
21
+ filepaths = self.hardware_adapter.perform_observation_sequence(self.task.id, satellite_data)
13
22
 
14
23
  # Take the image
15
- filepath = self.hardware_adapter.take_image(self.task.id, 2.0) # 2 second exposure
16
- return self.upload_image_and_mark_complete(filepath)
24
+ return self.upload_image_and_mark_complete(filepaths)
@@ -0,0 +1 @@
1
+ # Web interface module for CitraScope