citrascope 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- citrascope/__main__.py +8 -5
- citrascope/api/abstract_api_client.py +7 -0
- citrascope/api/citra_api_client.py +30 -1
- citrascope/citra_scope_daemon.py +214 -61
- citrascope/hardware/abstract_astro_hardware_adapter.py +70 -2
- citrascope/hardware/adapter_registry.py +94 -0
- citrascope/hardware/indi_adapter.py +456 -16
- citrascope/hardware/kstars_dbus_adapter.py +179 -0
- citrascope/hardware/nina_adv_http_adapter.py +593 -0
- citrascope/hardware/nina_adv_http_survey_template.json +328 -0
- citrascope/logging/__init__.py +2 -1
- citrascope/logging/_citrascope_logger.py +80 -1
- citrascope/logging/web_log_handler.py +74 -0
- citrascope/settings/citrascope_settings.py +145 -0
- citrascope/settings/settings_file_manager.py +126 -0
- citrascope/tasks/runner.py +124 -28
- citrascope/tasks/scope/base_telescope_task.py +25 -10
- citrascope/tasks/scope/static_telescope_task.py +11 -3
- citrascope/web/__init__.py +1 -0
- citrascope/web/app.py +470 -0
- citrascope/web/server.py +123 -0
- citrascope/web/static/api.js +82 -0
- citrascope/web/static/app.js +500 -0
- citrascope/web/static/config.js +362 -0
- citrascope/web/static/img/citra.png +0 -0
- citrascope/web/static/img/favicon.png +0 -0
- citrascope/web/static/style.css +120 -0
- citrascope/web/static/websocket.js +127 -0
- citrascope/web/templates/dashboard.html +354 -0
- {citrascope-0.1.0.dist-info → citrascope-0.3.0.dist-info}/METADATA +68 -36
- citrascope-0.3.0.dist-info/RECORD +38 -0
- {citrascope-0.1.0.dist-info → citrascope-0.3.0.dist-info}/WHEEL +1 -1
- citrascope/settings/_citrascope_settings.py +0 -42
- citrascope-0.1.0.dist-info/RECORD +0 -21
- {citrascope-0.1.0.dist-info → citrascope-0.3.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"""Settings file manager for CitraScope.
|
|
2
|
+
|
|
3
|
+
Handles reading and writing JSON settings files using platformdirs
|
|
4
|
+
for cross-platform settings directory management.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any, Dict, Optional
|
|
12
|
+
|
|
13
|
+
import platformdirs
|
|
14
|
+
|
|
15
|
+
from citrascope.settings.citrascope_settings import APP_AUTHOR, APP_NAME
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SettingsFileManager:
|
|
19
|
+
"""Manages settings file storage and retrieval."""
|
|
20
|
+
|
|
21
|
+
def __init__(self):
|
|
22
|
+
"""Initialize the config file manager with the standard config directory."""
|
|
23
|
+
self.config_dir = Path(platformdirs.user_config_dir(APP_NAME, appauthor=APP_AUTHOR))
|
|
24
|
+
self.config_file = self.config_dir / "config.json"
|
|
25
|
+
self.log_dir = Path(platformdirs.user_log_dir(APP_NAME, appauthor=APP_AUTHOR))
|
|
26
|
+
|
|
27
|
+
def ensure_config_directory(self) -> None:
|
|
28
|
+
"""Create config directory with proper permissions if it doesn't exist."""
|
|
29
|
+
if not self.config_dir.exists():
|
|
30
|
+
self.config_dir.mkdir(parents=True, mode=0o700)
|
|
31
|
+
else:
|
|
32
|
+
# Ensure proper permissions on existing directory
|
|
33
|
+
os.chmod(self.config_dir, 0o700)
|
|
34
|
+
|
|
35
|
+
def load_config(self) -> Dict[str, Any]:
|
|
36
|
+
"""Load configuration from JSON file.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Dict containing configuration, or empty dict if file doesn't exist.
|
|
40
|
+
"""
|
|
41
|
+
if not self.config_file.exists():
|
|
42
|
+
return {}
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
with open(self.config_file, "r") as f:
|
|
46
|
+
return json.load(f)
|
|
47
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
48
|
+
# Log error but return empty config to allow recovery
|
|
49
|
+
print(f"Error loading config file: {e}")
|
|
50
|
+
return {}
|
|
51
|
+
|
|
52
|
+
def save_config(self, config: Dict[str, Any]) -> None:
|
|
53
|
+
"""Save configuration to JSON file with proper permissions.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
config: Dictionary of configuration values to save.
|
|
57
|
+
"""
|
|
58
|
+
self.ensure_config_directory()
|
|
59
|
+
|
|
60
|
+
# Write to temp file first, then atomic rename
|
|
61
|
+
temp_file = self.config_file.with_suffix(".json.tmp")
|
|
62
|
+
try:
|
|
63
|
+
with open(temp_file, "w") as f:
|
|
64
|
+
json.dump(config, f, indent=2)
|
|
65
|
+
# Set restrictive permissions before moving into place
|
|
66
|
+
os.chmod(temp_file, 0o600)
|
|
67
|
+
temp_file.rename(self.config_file)
|
|
68
|
+
except Exception as e:
|
|
69
|
+
# Clean up temp file on error
|
|
70
|
+
if temp_file.exists():
|
|
71
|
+
temp_file.unlink()
|
|
72
|
+
raise IOError(f"Failed to save config: {e}")
|
|
73
|
+
|
|
74
|
+
def get_config_path(self) -> Path:
|
|
75
|
+
"""Get the path to the config file.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Path object pointing to the config file location.
|
|
79
|
+
"""
|
|
80
|
+
return self.config_file
|
|
81
|
+
|
|
82
|
+
def config_exists(self) -> bool:
|
|
83
|
+
"""Check if a config file exists.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
True if config file exists, False otherwise.
|
|
87
|
+
"""
|
|
88
|
+
return self.config_file.exists()
|
|
89
|
+
|
|
90
|
+
def validate_config(self, config: Dict[str, Any]) -> tuple[bool, Optional[str]]:
|
|
91
|
+
"""Validate configuration structure.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
config: Configuration dictionary to validate.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Tuple of (is_valid, error_message).
|
|
98
|
+
"""
|
|
99
|
+
# Basic validation - check that it's a dict
|
|
100
|
+
if not isinstance(config, dict):
|
|
101
|
+
return False, "Configuration must be a dictionary"
|
|
102
|
+
|
|
103
|
+
# Could add more validation here for required fields, types, etc.
|
|
104
|
+
return True, None
|
|
105
|
+
|
|
106
|
+
def ensure_log_directory(self) -> None:
|
|
107
|
+
"""Create log directory if it doesn't exist."""
|
|
108
|
+
if not self.log_dir.exists():
|
|
109
|
+
self.log_dir.mkdir(parents=True)
|
|
110
|
+
|
|
111
|
+
def get_log_dir(self) -> Path:
|
|
112
|
+
"""Get the path to the log directory.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Path object pointing to the log directory.
|
|
116
|
+
"""
|
|
117
|
+
return self.log_dir
|
|
118
|
+
|
|
119
|
+
def get_current_log_path(self) -> Path:
|
|
120
|
+
"""Get the path to the current log file (dated for today).
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Path object pointing to today's log file.
|
|
124
|
+
"""
|
|
125
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
126
|
+
return self.log_dir / f"citrascope-{today}.log"
|
citrascope/tasks/runner.py
CHANGED
|
@@ -2,7 +2,7 @@ import heapq
|
|
|
2
2
|
import os
|
|
3
3
|
import threading
|
|
4
4
|
import time
|
|
5
|
-
from datetime import datetime
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
6
|
|
|
7
7
|
from dateutil import parser as dtparser
|
|
8
8
|
|
|
@@ -20,58 +20,110 @@ class TaskManager:
|
|
|
20
20
|
ground_station_record,
|
|
21
21
|
logger,
|
|
22
22
|
hardware_adapter: AbstractAstroHardwareAdapter,
|
|
23
|
+
keep_images: bool = False,
|
|
24
|
+
settings=None,
|
|
23
25
|
):
|
|
24
26
|
self.api_client = api_client
|
|
25
27
|
self.telescope_record = telescope_record
|
|
26
28
|
self.ground_station_record = ground_station_record
|
|
27
29
|
self.logger = logger
|
|
30
|
+
self.settings = settings
|
|
28
31
|
self.task_heap = [] # min-heap by start time
|
|
29
32
|
self.task_ids = set()
|
|
30
33
|
self.hardware_adapter = hardware_adapter
|
|
31
34
|
self.heap_lock = threading.RLock()
|
|
32
35
|
self._stop_event = threading.Event()
|
|
33
36
|
self.current_task_id = None # Track currently executing task
|
|
37
|
+
self.keep_images = keep_images
|
|
38
|
+
self.task_retry_counts = {} # Track retry attempts per task ID
|
|
39
|
+
self.task_last_failure = {} # Track last failure timestamp per task ID
|
|
34
40
|
|
|
35
41
|
def poll_tasks(self):
|
|
36
42
|
while not self._stop_event.is_set():
|
|
37
43
|
try:
|
|
44
|
+
self._report_online()
|
|
38
45
|
tasks = self.api_client.get_telescope_tasks(self.telescope_record["id"])
|
|
46
|
+
|
|
47
|
+
# If API call failed (timeout, network error, etc.), skip this poll iteration
|
|
48
|
+
if tasks is None:
|
|
49
|
+
continue
|
|
50
|
+
|
|
39
51
|
added = 0
|
|
52
|
+
removed = 0
|
|
40
53
|
now = int(time.time())
|
|
41
54
|
with self.heap_lock:
|
|
55
|
+
# Build a map of current valid tasks from the API
|
|
56
|
+
api_task_map = {}
|
|
42
57
|
for task_dict in tasks:
|
|
43
58
|
try:
|
|
44
59
|
task = Task.from_dict(task_dict)
|
|
45
60
|
tid = task.id
|
|
61
|
+
if tid and task.status in ["Pending", "Scheduled"]:
|
|
62
|
+
api_task_map[tid] = task
|
|
63
|
+
except Exception as e:
|
|
64
|
+
self.logger.error(f"Error parsing task from API: {e}", exc_info=True)
|
|
65
|
+
|
|
66
|
+
# Remove tasks from heap that are no longer valid (cancelled, completed, or not in API response)
|
|
67
|
+
new_heap = []
|
|
68
|
+
for start_epoch, stop_epoch, tid, task in self.task_heap:
|
|
69
|
+
# Keep task if it's still in the API response with a valid status
|
|
70
|
+
# Don't remove currently executing task
|
|
71
|
+
if tid == self.current_task_id or tid in api_task_map:
|
|
72
|
+
new_heap.append((start_epoch, stop_epoch, tid, task))
|
|
73
|
+
else:
|
|
74
|
+
self.logger.info(f"Removing task {tid} from queue (cancelled or status changed)")
|
|
75
|
+
self.task_ids.discard(tid)
|
|
76
|
+
# Clean up retry tracking
|
|
77
|
+
self.task_retry_counts.pop(tid, None)
|
|
78
|
+
self.task_last_failure.pop(tid, None)
|
|
79
|
+
removed += 1
|
|
80
|
+
|
|
81
|
+
# Rebuild heap if we removed anything
|
|
82
|
+
if removed > 0:
|
|
83
|
+
self.task_heap = new_heap
|
|
84
|
+
heapq.heapify(self.task_heap)
|
|
85
|
+
|
|
86
|
+
# Add new tasks that aren't already in the heap
|
|
87
|
+
for tid, task in api_task_map.items():
|
|
88
|
+
# Skip if task is in heap or is currently being executed
|
|
89
|
+
if tid not in self.task_ids and tid != self.current_task_id:
|
|
46
90
|
task_start = task.taskStart
|
|
47
91
|
task_stop = task.taskStop
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
self.logger.info(self._heap_summary("Polled tasks"))
|
|
92
|
+
try:
|
|
93
|
+
start_epoch = int(dtparser.isoparse(task_start).timestamp())
|
|
94
|
+
stop_epoch = int(dtparser.isoparse(task_stop).timestamp()) if task_stop else 0
|
|
95
|
+
except Exception:
|
|
96
|
+
self.logger.error(f"Could not parse taskStart/taskStop for task {tid}")
|
|
97
|
+
continue
|
|
98
|
+
if stop_epoch and stop_epoch < now:
|
|
99
|
+
self.logger.debug(f"Skipping past task {tid} that ended at {task_stop}")
|
|
100
|
+
continue # Skip tasks whose end date has passed
|
|
101
|
+
heapq.heappush(self.task_heap, (start_epoch, stop_epoch, tid, task))
|
|
102
|
+
self.task_ids.add(tid)
|
|
103
|
+
added += 1
|
|
104
|
+
|
|
105
|
+
if added > 0 or removed > 0:
|
|
106
|
+
action = []
|
|
107
|
+
if added > 0:
|
|
108
|
+
action.append(f"Added {added}")
|
|
109
|
+
if removed > 0:
|
|
110
|
+
action.append(f"Removed {removed}")
|
|
111
|
+
self.logger.info(self._heap_summary(f"{', '.join(action)} tasks"))
|
|
112
|
+
# self.logger.info(self._heap_summary("Polled tasks"))
|
|
70
113
|
except Exception as e:
|
|
71
114
|
self.logger.error(f"Exception in poll_tasks loop: {e}", exc_info=True)
|
|
72
115
|
time.sleep(5) # avoid tight error loop
|
|
73
116
|
self._stop_event.wait(15)
|
|
74
117
|
|
|
118
|
+
def _report_online(self):
|
|
119
|
+
"""
|
|
120
|
+
PUT to /telescopes to report this telescope as online.
|
|
121
|
+
"""
|
|
122
|
+
telescope_id = self.telescope_record["id"]
|
|
123
|
+
iso_timestamp = datetime.now(timezone.utc).isoformat()
|
|
124
|
+
self.api_client.put_telescope_status([{"id": telescope_id, "last_connection_epoch": iso_timestamp}])
|
|
125
|
+
self.logger.debug(f"Reported online status for telescope {telescope_id} at {iso_timestamp}")
|
|
126
|
+
|
|
75
127
|
def task_runner(self):
|
|
76
128
|
while not self._stop_event.is_set():
|
|
77
129
|
try:
|
|
@@ -98,9 +150,49 @@ class TaskManager:
|
|
|
98
150
|
self.logger.info(f"Completed observation task {tid} successfully.")
|
|
99
151
|
heapq.heappop(self.task_heap)
|
|
100
152
|
self.task_ids.discard(tid)
|
|
153
|
+
# Clean up retry tracking for successful task
|
|
154
|
+
self.task_retry_counts.pop(tid, None)
|
|
155
|
+
self.task_last_failure.pop(tid, None)
|
|
101
156
|
completed += 1
|
|
102
157
|
else:
|
|
103
|
-
|
|
158
|
+
# Task failed - implement retry logic with exponential backoff
|
|
159
|
+
retry_count = self.task_retry_counts.get(tid, 0)
|
|
160
|
+
max_retries = self.settings.max_task_retries if self.settings else 3
|
|
161
|
+
|
|
162
|
+
if retry_count >= max_retries:
|
|
163
|
+
# Max retries exceeded - permanently fail the task
|
|
164
|
+
self.logger.error(
|
|
165
|
+
f"Observation task {tid} failed after {retry_count} retries. Permanently failing."
|
|
166
|
+
)
|
|
167
|
+
heapq.heappop(self.task_heap)
|
|
168
|
+
self.task_ids.discard(tid)
|
|
169
|
+
# Clean up retry tracking
|
|
170
|
+
self.task_retry_counts.pop(tid, None)
|
|
171
|
+
self.task_last_failure.pop(tid, None)
|
|
172
|
+
# Mark task as failed in API
|
|
173
|
+
try:
|
|
174
|
+
self.api_client.mark_task_failed(tid)
|
|
175
|
+
except Exception as e:
|
|
176
|
+
self.logger.error(f"Failed to mark task {tid} as failed in API: {e}")
|
|
177
|
+
else:
|
|
178
|
+
# Retry with exponential backoff
|
|
179
|
+
self.task_retry_counts[tid] = retry_count + 1
|
|
180
|
+
self.task_last_failure[tid] = now
|
|
181
|
+
|
|
182
|
+
# Calculate backoff delay: initial_delay * 2^retry_count, capped at max_delay
|
|
183
|
+
initial_delay = self.settings.initial_retry_delay_seconds if self.settings else 30
|
|
184
|
+
max_delay = self.settings.max_retry_delay_seconds if self.settings else 300
|
|
185
|
+
backoff_delay = min(initial_delay * (2**retry_count), max_delay)
|
|
186
|
+
|
|
187
|
+
# Update task start time in heap to retry after backoff delay
|
|
188
|
+
_, stop_epoch, _, task = heapq.heappop(self.task_heap)
|
|
189
|
+
new_start_time = now + backoff_delay
|
|
190
|
+
heapq.heappush(self.task_heap, (new_start_time, stop_epoch, tid, task))
|
|
191
|
+
|
|
192
|
+
self.logger.warning(
|
|
193
|
+
f"Observation task {tid} failed (attempt {retry_count + 1}/{max_retries}). "
|
|
194
|
+
f"Retrying in {backoff_delay} seconds at {datetime.fromtimestamp(new_start_time).isoformat()}"
|
|
195
|
+
)
|
|
104
196
|
|
|
105
197
|
if completed > 0:
|
|
106
198
|
self.logger.info(self._heap_summary("Completed tasks"))
|
|
@@ -113,7 +205,13 @@ class TaskManager:
|
|
|
113
205
|
|
|
114
206
|
# stake a still
|
|
115
207
|
static_task = StaticTelescopeTask(
|
|
116
|
-
self.api_client,
|
|
208
|
+
self.api_client,
|
|
209
|
+
self.hardware_adapter,
|
|
210
|
+
self.logger,
|
|
211
|
+
self.telescope_record,
|
|
212
|
+
self.ground_station_record,
|
|
213
|
+
task,
|
|
214
|
+
self.keep_images,
|
|
117
215
|
)
|
|
118
216
|
return static_task.execute()
|
|
119
217
|
|
|
@@ -137,9 +235,7 @@ class TaskManager:
|
|
|
137
235
|
if self.current_task_id is not None:
|
|
138
236
|
# Show the current in-flight task at the front
|
|
139
237
|
summary += f"Current: {self.current_task_id}. "
|
|
140
|
-
if
|
|
141
|
-
summary += "Next: " + ", ".join(next_tasks)
|
|
142
|
-
else:
|
|
238
|
+
if not next_tasks:
|
|
143
239
|
summary += "No tasks scheduled."
|
|
144
240
|
return summary
|
|
145
241
|
|
|
@@ -17,6 +17,7 @@ class AbstractBaseTelescopeTask(ABC):
|
|
|
17
17
|
telescope_record,
|
|
18
18
|
ground_station_record,
|
|
19
19
|
task,
|
|
20
|
+
keep_images: bool = False,
|
|
20
21
|
):
|
|
21
22
|
self.api_client = api_client
|
|
22
23
|
self.hardware_adapter: AbstractAstroHardwareAdapter = hardware_adapter
|
|
@@ -24,6 +25,7 @@ class AbstractBaseTelescopeTask(ABC):
|
|
|
24
25
|
self.telescope_record = telescope_record
|
|
25
26
|
self.ground_station_record = ground_station_record
|
|
26
27
|
self.task = task
|
|
28
|
+
self.keep_images = keep_images
|
|
27
29
|
|
|
28
30
|
def fetch_satellite(self) -> dict | None:
|
|
29
31
|
satellite_data = self.api_client.get_satellite(self.task.satelliteId)
|
|
@@ -55,17 +57,30 @@ class AbstractBaseTelescopeTask(ABC):
|
|
|
55
57
|
)
|
|
56
58
|
return most_recent_elset
|
|
57
59
|
|
|
58
|
-
def upload_image_and_mark_complete(self, filepath):
|
|
59
|
-
|
|
60
|
-
if
|
|
61
|
-
|
|
60
|
+
def upload_image_and_mark_complete(self, filepath: str | list[str]) -> bool:
|
|
61
|
+
|
|
62
|
+
if isinstance(filepath, str):
|
|
63
|
+
filepaths = [filepath]
|
|
62
64
|
else:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
self.
|
|
67
|
-
|
|
68
|
-
|
|
65
|
+
filepaths = filepath
|
|
66
|
+
|
|
67
|
+
for filepath in filepaths:
|
|
68
|
+
upload_result = self.api_client.upload_image(self.task.id, self.telescope_record["id"], filepath)
|
|
69
|
+
if upload_result:
|
|
70
|
+
self.logger.info(f"Successfully uploaded image {filepath}")
|
|
71
|
+
else:
|
|
72
|
+
self.logger.error(f"Failed to upload image {filepath}")
|
|
73
|
+
return False
|
|
74
|
+
|
|
75
|
+
if not self.keep_images:
|
|
76
|
+
try:
|
|
77
|
+
os.remove(filepath)
|
|
78
|
+
self.logger.debug(f"Deleted local image file {filepath} after upload.")
|
|
79
|
+
except Exception as e:
|
|
80
|
+
self.logger.error(f"Failed to delete local image file {filepath}: {e}")
|
|
81
|
+
else:
|
|
82
|
+
self.logger.info(f"Keeping local image file {filepath} (--keep-images flag set).")
|
|
83
|
+
|
|
69
84
|
marked_complete = self.api_client.mark_task_complete(self.task.id)
|
|
70
85
|
if not marked_complete:
|
|
71
86
|
task_check = self.api_client.get_telescope_tasks(self.telescope_record["id"])
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import time
|
|
2
2
|
|
|
3
|
+
from citrascope.hardware.abstract_astro_hardware_adapter import ObservationStrategy
|
|
3
4
|
from citrascope.tasks.scope.base_telescope_task import AbstractBaseTelescopeTask
|
|
4
5
|
|
|
5
6
|
|
|
@@ -9,8 +10,15 @@ class StaticTelescopeTask(AbstractBaseTelescopeTask):
|
|
|
9
10
|
satellite_data = self.fetch_satellite()
|
|
10
11
|
if not satellite_data or satellite_data.get("most_recent_elset") is None:
|
|
11
12
|
raise ValueError("Could not fetch valid satellite data or TLE.")
|
|
12
|
-
|
|
13
|
+
|
|
14
|
+
filepath = None
|
|
15
|
+
if self.hardware_adapter.get_observation_strategy() == ObservationStrategy.MANUAL:
|
|
16
|
+
self.point_to_lead_position(satellite_data)
|
|
17
|
+
filepaths = self.hardware_adapter.take_image(self.task.id, 2.0) # 2 second exposure
|
|
18
|
+
|
|
19
|
+
if self.hardware_adapter.get_observation_strategy() == ObservationStrategy.SEQUENCE_TO_CONTROLLER:
|
|
20
|
+
# Assume the hardware adapter has already pointed the telescope and started tracking
|
|
21
|
+
filepaths = self.hardware_adapter.perform_observation_sequence(self.task.id, satellite_data)
|
|
13
22
|
|
|
14
23
|
# Take the image
|
|
15
|
-
|
|
16
|
-
return self.upload_image_and_mark_complete(filepath)
|
|
24
|
+
return self.upload_image_and_mark_complete(filepaths)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Web interface module for CitraScope
|