sdss-almanac 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,185 @@
1
+ import numpy as np
2
+ from astropy.table import Table, hstack
3
+ from scipy.spatial.distance import cdist
4
+ from subprocess import check_output
5
+ from typing import List, Tuple
6
+
7
+ from almanac import utils
8
+ from almanac.config import config
9
+ from almanac.logger import logger
10
+
11
+ def sanitise_twomass_designation(v):
12
+ # The target_ids seem to be styled '2MASS-J...'
13
+ v = str(v).strip()
14
+ v = v[5:] if v.startswith("2MASS") else v
15
+ v = str(v.lstrip("-Jdb_"))
16
+ if v.lower() == "na" or v == "None":
17
+ return ""
18
+ return v
19
+
20
+ def match_planned_to_plugged(planned, plugged, tol=1e-5):
21
+
22
+ is_apogee = (
23
+ (planned["holetype"] == "APOGEE")
24
+ | (planned["holetype"] == "APOGEE_SHARED")
25
+ | (planned["holetype"] == "APOGEE_SOUTH")
26
+ )
27
+ if not any(is_apogee):
28
+ return []
29
+
30
+ planned = planned[is_apogee]
31
+ plugged = plugged[plugged["spectrographId"] == 2]
32
+
33
+ ra_dist = cdist(
34
+ planned["target_ra"].reshape((-1, 1)),
35
+ plugged["ra"].reshape((-1, 1)),
36
+ )
37
+ dec_dist = cdist(
38
+ planned["target_dec"].reshape((-1, 1)),
39
+ plugged["dec"].reshape((-1, 1)),
40
+ )
41
+
42
+ meets_tolerance = (ra_dist < tol) & (dec_dist < tol)
43
+ n_matches_to_plugged_holes = np.sum(meets_tolerance, axis=0)
44
+
45
+ N = np.sum(n_matches_to_plugged_holes > 1)
46
+ if N > 0:
47
+ raise RuntimeError("Cannot uniquely match plugged holes to planned holes!")
48
+
49
+ dist = np.sqrt(ra_dist**2 + dec_dist**2)
50
+ has_match = (n_matches_to_plugged_holes == 1)
51
+ planned_hole_indices = np.argmin(dist[:, has_match], axis=0)
52
+
53
+ return hstack(
54
+ [
55
+ plugged[has_match],
56
+ planned[planned_hole_indices]
57
+ ],
58
+ metadata_conflicts="silent",
59
+ uniq_col_name="{table_name}{col_name}",
60
+ table_names=("", "planned_")
61
+ )
62
+
63
+
64
+ def get_headers(path, head=20_000):
65
+ keys = (
66
+ "FIELDID", "DESIGNID", "CONFIGID", "SEEING", "EXPTYPE",
67
+ "NREAD", "IMAGETYP", "LAMPQRTZ", "LAMPTHAR", "LAMPUNE", "FOCUS",
68
+ "NAME", "PLATEID", "CARTID", "MAPID", "PLATETYP", "OBSCMNT",
69
+ "COLLPIST", "COLPITCH", "DITHPIX", "TCAMMID", "TLSDETB",
70
+ )
71
+ keys_str = "|".join(keys)
72
+
73
+ commands = " | ".join(
74
+ ['hexdump -n {head} -e \'80/1 "%_p" "\\n"\' {path}', 'egrep "{keys_str}"']
75
+ ).format(head=head, path=path, keys_str=keys_str)
76
+ outputs = check_output(commands, shell=True, text=True)
77
+ outputs = outputs.strip().split("\n")
78
+
79
+ values = _parse_headers(outputs, keys)
80
+ return dict(zip(map(str.lower, keys), values))
81
+
82
+
83
+
84
+ def _parse_headers(output: List[str], keys: Tuple[str, ...], default=None) -> List[str]:
85
+ """
86
+ Parse hexdump output to extract header key-value pairs.
87
+
88
+ :param output:
89
+ List of strings from hexdump output containing header information.
90
+ :param keys:
91
+ Tuple of header keys to look for in the output.
92
+ :param default:
93
+ Default value to use when a key is not found.
94
+
95
+ :returns:
96
+ List of header values corresponding to the input keys, with defaults for missing keys.
97
+ """
98
+ meta = [default] * len(keys)
99
+ for line in output:
100
+ try:
101
+ key, value = line.split("=", 2)
102
+ except ValueError: # grep'd something in the data
103
+ continue
104
+
105
+ key = key.strip()
106
+ if key in keys:
107
+ index = keys.index(key)
108
+ if "/" in value:
109
+ # could be comment
110
+ *parts, comment = value.split("/")
111
+ value = "/".join(parts)
112
+
113
+ value = value.strip("' ")
114
+ meta[index] = value.strip()
115
+ return meta
116
+
117
+
118
+ def get_exposure_path(observatory, mjd, prefix, exposure, chip):
119
+ return (
120
+ f"{config.apogee_dir}/"
121
+ f"{observatory}/"
122
+ f"{mjd}/"
123
+ f"{prefix}-{chip}-{get_exposure_string(mjd, exposure)}.apz"
124
+ )
125
+
126
+ def mjd_to_exposure_prefix(mjd: int) -> int:
127
+ """Convert MJD to exposure prefix.
128
+
129
+ The exposure prefix is calculated as (MJD - 55562) * 10000, with a minimum of 0.
130
+
131
+ :param mjd:
132
+ Modified Julian Date (MJD) as an integer.
133
+
134
+ :returns:
135
+ Exposure prefix as an integer.
136
+ """
137
+ return max(0, (int(mjd) - 55_562) * 10_000)
138
+
139
+
140
+ def get_exposure_string(mjd, exposure):
141
+ if isinstance(exposure, str):
142
+ return exposure
143
+ else:
144
+ return f"{mjd_to_exposure_prefix(mjd) + exposure:08d}"
145
+
146
+
147
+
148
+ def input_id_to_designation(input_id: str) -> Tuple[str, str]:
149
+ """
150
+ Convert an input ID to a standardized designation format.
151
+
152
+ The input identifier might be a 2MASS-style designation (in many different
153
+ formats), or a Gaia DR2-style designation, or an input catalog identifier.
154
+
155
+ :param input_id:
156
+ The input ID string.
157
+
158
+ :returns:
159
+ A two-length tuple containing the designation type, and the cleaned
160
+ designation identifier.
161
+ """
162
+ cleaned = str(input_id).strip().lower()
163
+ if cleaned == "na":
164
+ return ("", "")
165
+ is_gaia = cleaned.startswith("gaia")
166
+ if is_gaia:
167
+ dr, source_id = cleaned.split(" ")
168
+ dr = dr.split("_")[1].lstrip("dr")
169
+ return (f"Gaia_DR{dr}", source_id)
170
+
171
+ is_twomass = cleaned.startswith("2m") or input_id.startswith("j")
172
+ if is_twomass:
173
+ if cleaned.startswith("2mass"):
174
+ cleaned = cleaned[5:]
175
+ if cleaned.startswith("2m"):
176
+ cleaned = cleaned[2:]
177
+ designation = str(cleaned.lstrip("-jdb_"))
178
+ return ("2MASS", designation)
179
+ else:
180
+ try:
181
+ catalogid = np.int64(cleaned)
182
+ except:
183
+ return ("Unknown", input_id)
184
+ else:
185
+ return ("catalog", cleaned)
almanac/database.py ADDED
@@ -0,0 +1,22 @@
1
+ import sdssdb
2
+ from sdssdb.peewee.sdss5db import database
3
+
4
+ from time import time
5
+ from almanac import config, logger
6
+ from dataclasses import asdict
7
+
8
+ # Create a temporary sdssdb profile based on almanac settings
9
+ t = -time()
10
+ sdssdb.config.update(almanac=asdict(config.sdssdb))
11
+ is_database_available = database.set_profile("almanac", reuse_if_open=True)
12
+ t += time()
13
+
14
+ if not is_database_available:
15
+ logger.warning(f"Unable to connect to SDSS database after {t:.1f} seconds")
16
+ elif t > float(config.database_connect_time_warning):
17
+ logger.warning(
18
+ f"Took {t:.1f} s to connect to SDSS database.\n"
19
+ f"You can suppress this warning with the `database_connect_time_warning` configuration."
20
+ )
21
+
22
+ from sdssdb.peewee.sdss5db import catalogdb, opsdb
almanac/display.py ADDED
@@ -0,0 +1,422 @@
1
+ import logging
2
+ import numpy as np
3
+ from itertools import cycle
4
+ from datetime import datetime, timedelta
5
+ from rich.console import Console
6
+ from rich.table import Table
7
+ from rich.live import Live
8
+ from rich.text import Text
9
+ from rich.align import Align
10
+ from rich.table import Table as RichTable
11
+
12
+ from almanac import config
13
+ from almanac.data_models import Exposure
14
+ from typing import Optional, List, Tuple, Dict, Any
15
+
16
+ def mjd_to_datetime(mjd):
17
+ """Convert MJD to datetime - mock implementation"""
18
+ # MJD epoch is November 17, 1858
19
+ epoch = datetime(1858, 11, 17)
20
+ return epoch + timedelta(days=mjd)
21
+
22
+ class BufferedHandler(logging.Handler):
23
+ """Custom logging handler that buffers log records"""
24
+ def __init__(self):
25
+ super().__init__()
26
+ self.buffer = []
27
+
28
+ def emit(self, record):
29
+ self.buffer.append(record)
30
+
31
+ def flush_to_console(self, console=None):
32
+ """Flush buffered records to console"""
33
+ if console is None:
34
+ console = Console()
35
+
36
+ for record in self.buffer:
37
+ log_message = self.format(record)
38
+
39
+ # Color code based on log level
40
+ if record.levelno >= logging.ERROR:
41
+ style = "red"
42
+ elif record.levelno >= logging.WARNING:
43
+ style = "yellow"
44
+ elif record.levelno >= logging.INFO:
45
+ style = "blue"
46
+ else:
47
+ style = "dim"
48
+
49
+ console.print(log_message, style=style)
50
+
51
+ self.buffer.clear()
52
+
53
+ class ObservationsDisplay:
54
+ color_outside_range = "black"
55
+ color_unknown = "white"
56
+ color_no_data = "bright_black"
57
+ color_apo = "dodger_blue3"
58
+ color_lco = "green4"
59
+ color_both = "purple4"
60
+ color_missing = "red"
61
+
62
+ def __init__(self, mjd_min, mjd_max, observatories=("apo", "lco")):
63
+ self.console = Console()
64
+ self.start_date = mjd_to_datetime(mjd_min) if isinstance(mjd_min, (int, float)) else mjd_min
65
+ self.end_date = mjd_to_datetime(mjd_max) if isinstance(mjd_max, (int, float)) else mjd_max
66
+ self.days_per_week = 7
67
+
68
+ # Track completion status for each day
69
+ self.completed = dict(apo=set(), lco=set())
70
+ self.no_data = dict(apo=set(), lco=set())
71
+ self.missing = set()
72
+ self.offset = 0
73
+ # Setup logging buffer
74
+ self.log_buffer = BufferedHandler()
75
+ self.log_buffer.setFormatter(logging.Formatter(
76
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
77
+ datefmt='%H:%M:%S'
78
+ ))
79
+
80
+ # Calculate the grid dimensions based on the date range
81
+ self._setup_grid()
82
+ self.observatories = observatories
83
+
84
+ def _setup_grid(self):
85
+ """Setup the grid based on start and end dates, organized by years"""
86
+ # Calculate which years we need to display
87
+ self.start_year = self.start_date.year
88
+ self.end_year = self.end_date.year
89
+ self.year_rows = list(range(self.start_year, self.end_year + 1))
90
+
91
+ # For each year, we'll create a full year grid (Jan 1 to Dec 31)
92
+ # but pad the beginning if our data doesn't start on Jan 1
93
+ self.year_grids = {}
94
+ self.dates = []
95
+
96
+ self.offset = 0
97
+ for i, year in enumerate(self.year_rows):
98
+ year_start = datetime(year, 1, 1)
99
+ year_end = datetime(year, 12, 31)
100
+
101
+ # Find Monday of the week containing Jan 1
102
+ jan1_weekday = year_start.weekday() # 0=Monday, 6=Sunday
103
+ grid_start = year_start - timedelta(days=jan1_weekday)
104
+ if i == 0:
105
+ self.offset = (self.start_date - grid_start).days
106
+
107
+ # Find Sunday of the week containing Dec 31
108
+ dec31_weekday = year_end.weekday() # 0=Monday, 6=Sunday
109
+ days_to_sunday = 6 - dec31_weekday
110
+ grid_end = year_end + timedelta(days=days_to_sunday)
111
+
112
+ # Generate dates for this year's grid
113
+ year_dates = []
114
+ current_date = grid_start
115
+ while current_date <= grid_end:
116
+ year_dates.append(current_date)
117
+ current_date += timedelta(days=1)
118
+
119
+ # Calculate weeks for this year
120
+ total_days = len(year_dates)
121
+ weeks = total_days // 7
122
+
123
+ self.year_grids[year] = {
124
+ 'dates': year_dates,
125
+ 'weeks': weeks,
126
+ 'grid_start': grid_start,
127
+ 'grid_end': grid_end,
128
+ 'year_start': year_start,
129
+ 'year_end': year_end
130
+ }
131
+
132
+ # Add to master dates list with offset
133
+ for i, date in enumerate(year_dates):
134
+ self.dates.append(date)
135
+
136
+
137
+ def get_day_color(self, day_index):
138
+ """Return the color for a given day based on completion status"""
139
+ if day_index >= len(self.dates):
140
+ return self.color_outside_range
141
+
142
+ date = self.dates[day_index]
143
+
144
+ # Only show colored squares for dates within our actual range
145
+ if date < self.start_date or date > self.end_date:
146
+ return self.color_outside_range
147
+
148
+ if day_index in self.missing:
149
+ return self.color_missing
150
+ elif day_index in self.completed["apo"] and day_index in self.completed["lco"]:
151
+ return self.color_both
152
+ elif day_index in self.completed["apo"]:
153
+ return self.color_apo
154
+ elif day_index in self.completed["lco"]:
155
+ return self.color_lco
156
+ elif day_index in self.no_data["apo"] and day_index in self.no_data["lco"]:
157
+ return self.color_no_data
158
+ else:
159
+ return self.color_unknown
160
+
161
+ def _get_month_headers_for_year(self, year):
162
+ """Generate month headers for a specific year"""
163
+ year_data = self.year_grids[year]
164
+ weeks = year_data['weeks']
165
+ year_dates = year_data['dates']
166
+
167
+ headers = [" "] # Space for day labels
168
+ current_month = None
169
+ text_to_add = ""
170
+
171
+ for week in range(weeks):
172
+ week_start_index = week * 7
173
+
174
+ if week_start_index < len(year_dates):
175
+ week_date = year_dates[week_start_index]
176
+
177
+ # Only show month headers for dates within the actual year
178
+ if week_date.year == year:
179
+ month_abbr = week_date.strftime("%b")
180
+
181
+ # Only show month if it's different from previous week
182
+ if current_month != month_abbr:
183
+ headers.append(Text(f"{month_abbr[:1]}", style="dim"))
184
+ text_to_add = month_abbr[1:]
185
+ current_month = month_abbr
186
+ else:
187
+ if len(text_to_add) > 0:
188
+ headers.append(Text(text_to_add[:1], style="dim"))
189
+ text_to_add = text_to_add[1:]
190
+ else:
191
+ headers.append(Text(" "))
192
+ else:
193
+ # This is padding (before Jan 1 or after Dec 31)
194
+ headers.append(Text(" "))
195
+ else:
196
+ headers.append(Text(" "))
197
+
198
+ return headers
199
+
200
+ def create_contributions_grid_for_year(self, year):
201
+ """Create the contributions grid for a specific year"""
202
+ year_data = self.year_grids[year]
203
+ weeks = year_data['weeks']
204
+ year_dates = year_data['dates']
205
+
206
+ table = Table.grid(padding=0)
207
+
208
+ # Add columns for day labels and each week in this year
209
+ table.add_column() # For day labels
210
+ for _ in range(weeks):
211
+ table.add_column()
212
+
213
+ # Add month headers
214
+ month_headers = self._get_month_headers_for_year(year)
215
+ table.add_row(*month_headers)
216
+
217
+ # Create rows for each day of the week
218
+ day_names = ["S", "M", "T", "W", "T", "F", "S"]
219
+
220
+ for day_of_week in range(self.days_per_week):
221
+ row = [Text(day_names[day_of_week].ljust(3), style="dim")]
222
+
223
+ for week in range(weeks):
224
+ day_index_in_year = week * 7 + day_of_week
225
+
226
+ if day_index_in_year < len(year_dates):
227
+ date = year_dates[day_index_in_year]
228
+
229
+ # Find this date in our master dates list to get the right index
230
+ master_day_index = None
231
+ for i, master_date in enumerate(self.dates):
232
+ if master_date == date:
233
+ master_day_index = i
234
+ break
235
+
236
+ if master_day_index is not None:
237
+ color = self.get_day_color(master_day_index)
238
+
239
+ # Show square only for dates within the actual year and our date range
240
+ if (date.year == year and
241
+ self.start_date <= date <= self.end_date):
242
+ square = Text("■", style=color)
243
+ elif date.year == year:
244
+ # Within the year but outside our date range
245
+ square = Text("■", style=self.color_no_data)
246
+ else:
247
+ # Padding dates (before Jan 1 or after Dec 31)
248
+ square = Text(" ")
249
+ else:
250
+ square = Text(" ")
251
+ else:
252
+ square = Text(" ")
253
+
254
+ row.append(square)
255
+
256
+ table.add_row(*row)
257
+
258
+ return table
259
+
260
+ def create_display(self):
261
+ """Create the complete display with title and yearly grids"""
262
+ date_range = f"{self.start_date.strftime('%b %d, %Y')} - {self.end_date.strftime('%b %d, %Y')}"
263
+ title = Text("SDSS/APOGEE Observations", style="bold white")
264
+ subtitle = Text(date_range, style="dim")
265
+
266
+ # Create legend
267
+ legend = Table.grid(padding=(0, 1))
268
+ legend.add_column()
269
+ legend.add_column()
270
+ legend.add_column()
271
+ legend.add_column()
272
+ legend.add_column()
273
+
274
+ items = [
275
+ Text("■", style=self.color_no_data),
276
+ Text("None", style="dim"),
277
+ ]
278
+ if "apo" in self.observatories:
279
+ items.extend([
280
+ Text("■", style=self.color_apo),
281
+ Text("APO", style="dim"),
282
+ ])
283
+ if "lco" in self.observatories:
284
+ items.extend([
285
+ Text("■", style=self.color_lco),
286
+ Text("LCO", style="dim"),
287
+ ])
288
+
289
+ if "apo" in self.observatories and "lco" in self.observatories:
290
+ items.extend([
291
+ Text("■", style=self.color_both),
292
+ Text("Both", style="dim"),
293
+ ])
294
+ legend.add_row(*items)
295
+
296
+ # Combine everything
297
+ main_table = Table.grid()
298
+ main_table.add_column()
299
+ main_table.add_row(Align.center(title))
300
+ main_table.add_row(Align.center(subtitle))
301
+ main_table.add_row("")
302
+
303
+ # Add each year's grid with year header
304
+ for i, year in enumerate(self.year_rows):
305
+ # Add year header
306
+ year_header = Text(str(year), style="bold cyan")
307
+ main_table.add_row(Align.left(year_header))
308
+
309
+ # Add the grid for this year
310
+ year_grid = self.create_contributions_grid_for_year(year)
311
+ main_table.add_row(Align.center(year_grid))
312
+
313
+ # Add spacing between years except after the last one
314
+ if i < len(self.year_rows) - 1:
315
+ main_table.add_row("")
316
+
317
+ main_table.add_row("")
318
+ main_table.add_row(Align.center(legend))
319
+
320
+ return main_table
321
+
322
+ def add_observation(self, date, observatory):
323
+ """Add an observation for a specific date and observatory"""
324
+ # Find the day index for this date
325
+ for i, grid_date in enumerate(self.dates):
326
+ if grid_date.date() == date.date():
327
+ self.completed[observatory].add(i)
328
+ break
329
+
330
+
331
+ def display_exposures(
332
+ exposures: List[Exposure],
333
+ sequences: Optional[Dict[str, List[Tuple[int, int]]]] = None,
334
+ console: Optional[Console] = None,
335
+ header_style: str = "bold cyan",
336
+ column_names: Optional[List[str]] = None,
337
+ sequence_styles: Tuple[str, ...] = ("green", "yellow"),
338
+ missing_style: str = "blink bold red",
339
+ title_style: str = "bold blue",
340
+ ) -> None:
341
+ """Display exposure information using Rich table formatting.
342
+
343
+ Args:
344
+ exposures: List of Exposure objects containing exposure data
345
+ sequences: Dictionary mapping sequence names to lists of (start, end) tuples (default: None)
346
+ console: Rich Console instance (default: None, creates new one)
347
+ header_style: Style for table headers (default: "bold cyan")
348
+ sequence_styles: Tuple of styles to cycle through for sequences (default: ("green", "yellow"))
349
+ missing_style: Style for missing/error entries (default: "red")
350
+ title_style: Style for the table title (default: "bold blue")
351
+ """
352
+ if console is None:
353
+ console = Console()
354
+
355
+ if len(exposures) == 0:
356
+ return
357
+
358
+ # Create the title
359
+ observatory, mjd = (exposures[0].observatory, exposures[0].mjd)
360
+ title = f"{len(exposures)} exposures from {observatory.upper()} on MJD {mjd}"
361
+
362
+ # Create Rich table
363
+ rich_table = RichTable(title=title, title_style=title_style, show_header=True, header_style=header_style)
364
+
365
+ field_names = config.display_field_names
366
+
367
+ for field_name in field_names:
368
+ rich_table.add_column(field_name, justify="center")
369
+
370
+ # Prepare sequence tracking
371
+ flattened_sequences = []
372
+ for k, v in (sequences or dict()).items():
373
+ flattened_sequences.extend(v)
374
+ flattened_sequences = np.array(flattened_sequences)
375
+
376
+ sequence_styles_cycle = cycle(sequence_styles)
377
+ in_sequence, current_sequence_style = (False, next(sequence_styles_cycle))
378
+
379
+ # Add rows to the table
380
+ for i, exposure in enumerate(exposures, start=1):
381
+ # Check if this row is part of a sequence
382
+ row_style = None
383
+ end_of_sequence = None
384
+ if len(flattened_sequences) > 0:
385
+ try:
386
+ j, k = np.where(flattened_sequences == i)
387
+ except:
388
+ pass
389
+ else:
390
+ # Could be start or end of sequence, and could be out of order
391
+ start_of_sequence = 0 in k
392
+ end_of_sequence = 1 in k
393
+
394
+ if start_of_sequence:
395
+ in_sequence = True
396
+ current_sequence_style = next(sequence_styles_cycle)
397
+ elif end_of_sequence: # only end of sequence
398
+ in_sequence = True
399
+
400
+ # Determine row style
401
+ if in_sequence:
402
+ row_style = current_sequence_style
403
+ else:
404
+ # Check if it's missing or has issues
405
+ if exposure.image_type == "missing":
406
+ row_style = missing_style
407
+
408
+ # Convert row data to strings and apply styling if needed
409
+ row_data = []
410
+ for field_name in field_names:
411
+ value = getattr(exposure, field_name)
412
+ if row_style:
413
+ row_data.append(Text(f"{value}", style=row_style))
414
+ else:
415
+ row_data.append(f"{value}")
416
+
417
+ rich_table.add_row(*row_data)
418
+ if end_of_sequence:
419
+ in_sequence = False
420
+
421
+ console.print(rich_table)
422
+ console.print() # Add a blank line after the table
File without changes