compass-lib 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,842 @@
1
+ # -*- coding: utf-8 -*-
2
+ """Parser for Compass .DAT survey data files.
3
+
4
+ This module implements the parser for reading Compass survey data files,
5
+ which contain cave survey measurements organized into trips.
6
+
7
+ Architecture: The parser produces dictionaries (like loading JSON) which are
8
+ then fed to Pydantic models via a single `model_validate()` call. This keeps
9
+ parsing logic separate from model construction.
10
+ """
11
+
12
+ import re
13
+ from datetime import date
14
+ from pathlib import Path
15
+ from typing import Any
16
+
17
+ from compass_lib.constants import ASCII_ENCODING
18
+ from compass_lib.constants import MISSING_ANGLE_THRESHOLD
19
+ from compass_lib.constants import MISSING_VALUE_THRESHOLD
20
+ from compass_lib.enums import AzimuthUnit
21
+ from compass_lib.enums import InclinationUnit
22
+ from compass_lib.enums import LengthUnit
23
+ from compass_lib.enums import LrudAssociation
24
+ from compass_lib.enums import LrudItem
25
+ from compass_lib.enums import Severity
26
+ from compass_lib.enums import ShotItem
27
+ from compass_lib.errors import CompassParseError
28
+ from compass_lib.errors import SourceLocation
29
+ from compass_lib.survey.models import CompassDatFile
30
+ from compass_lib.validation import days_in_month
31
+
32
+
33
+ class CompassSurveyParser:
34
+ """Parser for Compass .DAT survey data files.
35
+
36
+ This parser reads Compass survey data files and produces dictionaries
37
+ (like loading JSON from disk). The dictionaries can then be fed to
38
+ Pydantic models via a single `model_validate()` call.
39
+
40
+ Errors are collected rather than thrown, allowing partial parsing
41
+ of malformed files.
42
+
43
+ Attributes:
44
+ errors: List of parsing errors and warnings encountered
45
+ """
46
+
47
+ # Regex patterns
48
+ EOL = re.compile(r"\r\n|\r|\n")
49
+ COLUMN_HEADER = re.compile(
50
+ r"^\s*FROM\s+TO[^\r\n]+(\r\n|\r|\n){2}",
51
+ re.MULTILINE | re.IGNORECASE,
52
+ )
53
+ NON_WHITESPACE = re.compile(r"\S+")
54
+ HEADER_FIELDS = re.compile(
55
+ r"SURVEY (NAME|DATE|TEAM):|COMMENT:|DECLINATION:|FORMAT:|CORRECTIONS2?:|FROM",
56
+ re.IGNORECASE,
57
+ )
58
+
59
+ def __init__(self) -> None:
60
+ """Initialize a new parser with empty error list."""
61
+ self.errors: list[CompassParseError] = []
62
+ self._source: str = "<string>"
63
+
64
+ def _add_error(
65
+ self,
66
+ message: str,
67
+ text: str = "",
68
+ line: int = 0,
69
+ column: int = 0,
70
+ ) -> None:
71
+ """Add an error to the error list."""
72
+ self.errors.append(
73
+ CompassParseError(
74
+ severity=Severity.ERROR,
75
+ message=message,
76
+ location=SourceLocation(
77
+ source=self._source,
78
+ line=line,
79
+ column=column,
80
+ text=text,
81
+ ),
82
+ )
83
+ )
84
+
85
+ def _add_warning(
86
+ self,
87
+ message: str,
88
+ text: str = "",
89
+ line: int = 0,
90
+ column: int = 0,
91
+ ) -> None:
92
+ """Add a warning to the error list."""
93
+ self.errors.append(
94
+ CompassParseError(
95
+ severity=Severity.WARNING,
96
+ message=message,
97
+ location=SourceLocation(
98
+ source=self._source,
99
+ line=line,
100
+ column=column,
101
+ text=text,
102
+ ),
103
+ )
104
+ )
105
+
106
+ # -------------------------------------------------------------------------
107
+ # Dictionary-returning methods (primary API)
108
+ # -------------------------------------------------------------------------
109
+
110
+ def parse_file_to_dict(self, path: Path) -> dict[str, Any]:
111
+ """Parse a Compass survey data file to dictionary.
112
+
113
+ This is the primary parsing method. It returns a dictionary that
114
+ can be directly fed to `CompassDatFile.model_validate()`.
115
+
116
+ Args:
117
+ path: Path to the .DAT file
118
+
119
+ Returns:
120
+ Dictionary with "trips" key containing list of trip dicts
121
+ """
122
+ self._source = str(path)
123
+ with path.open(mode="r", encoding=ASCII_ENCODING, errors="replace") as f:
124
+ data = f.read()
125
+ return self.parse_string_to_dict(data, str(path))
126
+
127
+ def parse_string_to_dict(
128
+ self,
129
+ data: str,
130
+ source: str = "<string>",
131
+ ) -> dict[str, Any]:
132
+ """Parse survey data from a string to dictionary.
133
+
134
+ Args:
135
+ data: Survey data as string
136
+ source: Source identifier for error messages
137
+
138
+ Returns:
139
+ Dictionary with "trips" key containing list of trip dicts
140
+ """
141
+ self._source = source
142
+ trips: list[dict[str, Any]] = []
143
+
144
+ # Split on form feed character
145
+ sections = data.split("\f")
146
+ for section in sections:
147
+ _section = section.strip()
148
+ if _section:
149
+ trip = self._parse_trip_to_dict(_section)
150
+ if trip:
151
+ trips.append(trip)
152
+
153
+ return {"trips": trips}
154
+
155
+ # -------------------------------------------------------------------------
156
+ # Legacy model-returning methods (thin wrappers for backwards compat)
157
+ # -------------------------------------------------------------------------
158
+
159
+ def parse_file(self, path: Path) -> list["CompassTrip"]: # noqa: F821
160
+ """Parse a Compass survey data file.
161
+
162
+ DEPRECATED: Use parse_file_to_dict() for new code.
163
+
164
+ Args:
165
+ path: Path to the .DAT file
166
+
167
+ Returns:
168
+ List of parsed trips
169
+ """
170
+
171
+ data = self.parse_file_to_dict(path)
172
+ dat_file = CompassDatFile.model_validate(data)
173
+ return dat_file.trips
174
+
175
+ def parse_string(
176
+ self,
177
+ data: str,
178
+ source: str = "<string>",
179
+ ) -> list["CompassTrip"]: # noqa: F821
180
+ """Parse survey data from a string.
181
+
182
+ DEPRECATED: Use parse_string_to_dict() for new code.
183
+
184
+ Args:
185
+ data: Survey data as string
186
+ source: Source identifier for error messages
187
+
188
+ Returns:
189
+ List of parsed trips
190
+ """
191
+
192
+ parsed = self.parse_string_to_dict(data, source)
193
+ dat_file = CompassDatFile.model_validate(parsed)
194
+ return dat_file.trips
195
+
196
+ def _split_header_and_data(self, text: str) -> tuple[str, str]:
197
+ """Split trip text into header and shot data sections.
198
+
199
+ Args:
200
+ text: Complete trip text
201
+
202
+ Returns:
203
+ Tuple of (header_text, data_text)
204
+ """
205
+ match = self.COLUMN_HEADER.search(text)
206
+ if match:
207
+ header_end = match.end()
208
+ return text[:header_end].strip(), text[header_end:].strip()
209
+ return text.strip(), ""
210
+
211
+ def _parse_trip_to_dict(self, text: str) -> dict[str, Any] | None:
212
+ """Parse a single trip from text to dictionary.
213
+
214
+ Args:
215
+ text: Trip text (header + shots)
216
+
217
+ Returns:
218
+ Dictionary with "header" and "shots" keys, or None if parsing fails
219
+ """
220
+ header_text, data_text = self._split_header_and_data(text)
221
+
222
+ header = self._parse_trip_header_to_dict(header_text)
223
+ if header is None:
224
+ return None
225
+
226
+ shots: list[dict[str, Any]] = []
227
+ if data_text:
228
+ for line in self.EOL.split(data_text):
229
+ _line = line.strip()
230
+ if _line:
231
+ if shot := self._parse_shot_to_dict(_line, header):
232
+ shots.append(shot)
233
+
234
+ return {"header": header, "shots": shots}
235
+
236
+ def _parse_trip_header_to_dict(self, text: str) -> dict[str, Any] | None:
237
+ """Parse trip header from text to dictionary.
238
+
239
+ Args:
240
+ text: Header text
241
+
242
+ Returns:
243
+ Dictionary with header fields, or None if parsing fails
244
+ """
245
+ lines = self.EOL.split(text, maxsplit=1)
246
+ if len(lines) < 2:
247
+ return None
248
+
249
+ # Build header dictionary
250
+ header: dict[str, Any] = {
251
+ "cave_name": lines[0].strip(),
252
+ "survey_name": None,
253
+ "date": None,
254
+ "comment": None,
255
+ "team": None,
256
+ "declination": 0.0,
257
+ "length_unit": LengthUnit.DECIMAL_FEET.value,
258
+ "lrud_unit": LengthUnit.DECIMAL_FEET.value,
259
+ "azimuth_unit": AzimuthUnit.DEGREES.value,
260
+ "inclination_unit": InclinationUnit.DEGREES.value,
261
+ "lrud_order": [
262
+ LrudItem.LEFT.value,
263
+ LrudItem.RIGHT.value,
264
+ LrudItem.UP.value,
265
+ LrudItem.DOWN.value,
266
+ ],
267
+ "shot_measurement_order": [
268
+ ShotItem.LENGTH.value,
269
+ ShotItem.FRONTSIGHT_AZIMUTH.value,
270
+ ShotItem.FRONTSIGHT_INCLINATION.value,
271
+ ],
272
+ "has_backsights": True,
273
+ "lrud_association": LrudAssociation.FROM.value,
274
+ "length_correction": 0.0,
275
+ "frontsight_azimuth_correction": 0.0,
276
+ "frontsight_inclination_correction": 0.0,
277
+ "backsight_azimuth_correction": 0.0,
278
+ "backsight_inclination_correction": 0.0,
279
+ }
280
+
281
+ # Extract fields using regex
282
+ rest = lines[1]
283
+ fields = self._extract_fields(rest)
284
+
285
+ for field_name, field_value in fields:
286
+ field_upper = field_name.upper()
287
+ value = field_value.strip()
288
+
289
+ if field_upper == "SURVEY NAME:":
290
+ match = self.NON_WHITESPACE.search(value)
291
+ if match:
292
+ header["survey_name"] = match.group()
293
+
294
+ elif field_upper == "SURVEY DATE:":
295
+ parsed_date = self._parse_date(value)
296
+ if parsed_date:
297
+ header["date"] = parsed_date.isoformat()
298
+
299
+ elif field_upper == "COMMENT:":
300
+ header["comment"] = value if value else None
301
+
302
+ elif field_upper == "SURVEY TEAM:":
303
+ header["team"] = value if value else None
304
+
305
+ elif field_upper == "DECLINATION:":
306
+ dec = self._parse_measurement(value)
307
+ if dec is not None:
308
+ header["declination"] = dec
309
+
310
+ elif field_upper == "FORMAT:":
311
+ self._parse_shot_format_to_dict(header, value)
312
+
313
+ elif field_upper == "CORRECTIONS:":
314
+ parts = self.NON_WHITESPACE.findall(value)
315
+ if len(parts) >= 1:
316
+ val = self._parse_measurement(parts[0])
317
+ if val is not None:
318
+ header["length_correction"] = val
319
+ if len(parts) >= 2:
320
+ val = self._parse_measurement(parts[1])
321
+ if val is not None:
322
+ header["frontsight_azimuth_correction"] = val
323
+ if len(parts) >= 3:
324
+ val = self._parse_measurement(parts[2])
325
+ if val is not None:
326
+ header["frontsight_inclination_correction"] = val
327
+
328
+ elif field_upper == "CORRECTIONS2:":
329
+ parts = self.NON_WHITESPACE.findall(value)
330
+ if len(parts) >= 1:
331
+ val = self._parse_measurement(parts[0])
332
+ if val is not None:
333
+ header["backsight_azimuth_correction"] = val
334
+ if len(parts) >= 2:
335
+ val = self._parse_measurement(parts[1])
336
+ if val is not None:
337
+ header["backsight_inclination_correction"] = val
338
+
339
+ return header
340
+
341
+ def _extract_fields(self, text: str) -> list[tuple[str, str]]:
342
+ """Extract field name-value pairs from header text.
343
+
344
+ Args:
345
+ text: Header text after cave name
346
+
347
+ Returns:
348
+ List of (field_name, field_value) tuples
349
+ """
350
+ fields: list[tuple[str, str]] = []
351
+ matches = list(self.HEADER_FIELDS.finditer(text))
352
+
353
+ for i, match in enumerate(matches):
354
+ field_name = match.group()
355
+ start = match.end()
356
+ end = matches[i + 1].start() if i + 1 < len(matches) else len(text)
357
+ field_value = text[start:end]
358
+ fields.append((field_name, field_value))
359
+
360
+ return fields
361
+
362
+ def _parse_date(self, text: str) -> date | None: # noqa: PLR0911
363
+ """Parse date from text (month day year format).
364
+
365
+ Args:
366
+ text: Date text
367
+
368
+ Returns:
369
+ Parsed date or None if invalid
370
+ """
371
+ parts = self.NON_WHITESPACE.findall(text)
372
+ if len(parts) < 3:
373
+ self._add_error(f"incomplete date: {text}", text)
374
+ return None
375
+
376
+ try:
377
+ month = int(parts[0])
378
+ day = int(parts[1])
379
+ year = int(parts[2])
380
+ except ValueError:
381
+ self._add_error(f"invalid date: {text}", text)
382
+ return None
383
+
384
+ # Handle 2-digit years (years < 100 are treated as offset from 1900)
385
+ if year < 100:
386
+ year += 1900
387
+
388
+ # Validate ranges with leap year aware day validation
389
+ if not 1 <= month <= 12:
390
+ self._add_error(f"month must be between 1 and 12: {month}", parts[0])
391
+ return None
392
+ if year < 0:
393
+ self._add_error(f"year must be >= 0: {year}", parts[2])
394
+ return None
395
+
396
+ max_day = days_in_month(month, year)
397
+ if not 1 <= day <= max_day:
398
+ self._add_error(
399
+ f"day must be between 1 and {max_day} for month {month}: {day}",
400
+ parts[1],
401
+ )
402
+ return None
403
+
404
+ try:
405
+ return date(year, month, day)
406
+ except ValueError as e:
407
+ self._add_error(f"invalid date: {e}", text)
408
+ return None
409
+
410
+ def _parse_measurement(self, text: str) -> float | None:
411
+ """Parse a numeric measurement from text.
412
+
413
+ Values >= 990 indicate missing data.
414
+
415
+ Args:
416
+ text: Measurement text
417
+
418
+ Returns:
419
+ Parsed value or None if missing/invalid
420
+ """
421
+ try:
422
+ value = float(text)
423
+ if value >= MISSING_VALUE_THRESHOLD:
424
+ return None
425
+ except ValueError:
426
+ return None
427
+
428
+ return value
429
+
430
+ def _parse_angle_measurement(self, text: str) -> float | None:
431
+ """Parse an angle measurement from text.
432
+
433
+ Values < -900 or >= 990 indicate missing data.
434
+
435
+ Args:
436
+ text: Measurement text
437
+
438
+ Returns:
439
+ Parsed value or None if missing/invalid
440
+ """
441
+ try:
442
+ value = float(text)
443
+ if value < MISSING_ANGLE_THRESHOLD:
444
+ return None
445
+ if value >= MISSING_VALUE_THRESHOLD:
446
+ return None
447
+
448
+ except ValueError:
449
+ return None
450
+
451
+ return value
452
+
453
+ def _parse_azimuth(self, text: str) -> float | None:
454
+ """Parse azimuth measurement with validation.
455
+
456
+ Args:
457
+ text: Azimuth text
458
+
459
+ Returns:
460
+ Parsed value or None if invalid
461
+ """
462
+ value = self._parse_angle_measurement(text)
463
+ if value is None:
464
+ return None
465
+ if value < 0 or value >= 360:
466
+ self._add_error(
467
+ f"azimuth must be >= 0 and < 360, got {value}",
468
+ text,
469
+ )
470
+ # Still return the value for continued parsing
471
+ return value
472
+
473
+ def _parse_inclination(self, text: str) -> float | None:
474
+ """Parse inclination measurement with validation.
475
+
476
+ Args:
477
+ text: Inclination text
478
+
479
+ Returns:
480
+ Parsed value or None if invalid
481
+ """
482
+ value = self._parse_angle_measurement(text)
483
+ if value is None:
484
+ return None
485
+ if value < -90 or value > 90:
486
+ self._add_error(
487
+ f"inclination must be between -90 and 90, got {value}",
488
+ text,
489
+ )
490
+ # Still return the value for continued parsing
491
+ return value
492
+
493
+ def _parse_distance(self, text: str) -> float | None:
494
+ """Parse distance measurement with validation (must be >= 0).
495
+
496
+ Args:
497
+ text: Distance text
498
+
499
+ Returns:
500
+ Parsed value or None if invalid
501
+ """
502
+ value = self._parse_measurement(text)
503
+ if value is None:
504
+ return None
505
+ if value < 0:
506
+ self._add_error(f"distance must be >= 0, got {value}", text)
507
+ return value
508
+
509
+ def _parse_lrud(self, text: str) -> float | None:
510
+ """Parse LRUD measurement with validation.
511
+
512
+ Values < -1 or > 990 indicate missing data.
513
+ Values between -1 and 0 generate an error.
514
+
515
+ Args:
516
+ text: LRUD text
517
+
518
+ Returns:
519
+ Parsed value or None if missing
520
+ """
521
+ try:
522
+ value = float(text)
523
+ except ValueError:
524
+ return None
525
+
526
+ # Missing data indicators
527
+ if value < -1 or value > MISSING_VALUE_THRESHOLD:
528
+ return None
529
+
530
+ # Values between -1 and 0 are errors but still parse
531
+ if value < 0:
532
+ self._add_error(f"LRUD must be >= 0, got {value}", text)
533
+
534
+ return value
535
+
536
+ def _parse_shot_format_to_dict(
537
+ self,
538
+ header: dict[str, Any],
539
+ format_str: str,
540
+ ) -> None:
541
+ """Parse the FORMAT string and update header dictionary.
542
+
543
+ Format string structure:
544
+ - Position 0: Azimuth unit (D/Q/R)
545
+ - Position 1: Length unit (D/I/M)
546
+ - Position 2: LRUD unit (D/I/M)
547
+ - Position 3: Inclination unit (D/G/M/R/W)
548
+ - Positions 4-7: LRUD order (L/R/U/D)
549
+ - Positions 8-10 or 8-12: Shot order (L/A/D/a/d)
550
+ - Optional B: Has backsights
551
+ - Optional F/T: LRUD association
552
+
553
+ Args:
554
+ header: Header dictionary to update
555
+ format_str: Format string
556
+ """
557
+ format_str = format_str.strip()
558
+ if len(format_str) < 11:
559
+ self._add_error(
560
+ f"format must be at least 11 characters, got {len(format_str)}",
561
+ format_str,
562
+ )
563
+ return
564
+
565
+ i = 0
566
+
567
+ # Azimuth unit
568
+ header["azimuth_unit"] = self._parse_azimuth_unit(format_str[i]).value
569
+ i += 1
570
+
571
+ # Length unit
572
+ header["length_unit"] = self._parse_length_unit_char(format_str[i]).value
573
+ i += 1
574
+
575
+ # LRUD unit
576
+ header["lrud_unit"] = self._parse_length_unit_char(format_str[i]).value
577
+ i += 1
578
+
579
+ # Inclination unit
580
+ header["inclination_unit"] = self._parse_inclination_unit(format_str[i]).value
581
+ i += 1
582
+
583
+ # LRUD order (4 characters)
584
+ lrud_order: list[str] = []
585
+ for j in range(4):
586
+ if i + j < len(format_str):
587
+ item = self._parse_lrud_item(format_str[i + j])
588
+ if item:
589
+ lrud_order.append(item.value)
590
+ if len(lrud_order) == 4:
591
+ header["lrud_order"] = lrud_order
592
+ i += 4
593
+
594
+ # Shot measurement order (3 or 5 characters)
595
+ shot_order_len = 5 if len(format_str) >= 15 else 3
596
+ shot_order: list[str] = []
597
+ for j in range(shot_order_len):
598
+ if i + j < len(format_str):
599
+ item = self._parse_shot_item(format_str[i + j])
600
+ if item:
601
+ shot_order.append(item.value)
602
+ if shot_order:
603
+ header["shot_measurement_order"] = shot_order
604
+ i += shot_order_len
605
+
606
+ # Has backsights flag (B = yes, N = no, anything else means check next char)
607
+ if i < len(format_str):
608
+ char = format_str[i].upper()
609
+ if char == "B":
610
+ header["has_backsights"] = True
611
+ i += 1
612
+ elif char == "N":
613
+ header["has_backsights"] = False
614
+ i += 1
615
+ else:
616
+ # Assume no backsights if char is not B or N
617
+ header["has_backsights"] = False
618
+
619
+ # LRUD association
620
+ if i < len(format_str):
621
+ assoc = self._parse_lrud_association(format_str[i])
622
+ if assoc:
623
+ header["lrud_association"] = assoc.value
624
+
625
+ def _parse_azimuth_unit(self, char: str) -> AzimuthUnit:
626
+ """Parse azimuth unit character."""
627
+ char = char.upper()
628
+ if char == "D":
629
+ return AzimuthUnit.DEGREES
630
+ if char == "Q":
631
+ return AzimuthUnit.QUADS
632
+ if char == "R":
633
+ return AzimuthUnit.GRADS
634
+ self._add_error(f"unrecognized azimuth unit: {char}", char)
635
+ return AzimuthUnit.DEGREES
636
+
637
+ def _parse_length_unit_char(self, char: str) -> LengthUnit:
638
+ """Parse length unit character."""
639
+ char = char.upper()
640
+ if char == "D":
641
+ return LengthUnit.DECIMAL_FEET
642
+ if char == "I":
643
+ return LengthUnit.FEET_AND_INCHES
644
+ if char == "M":
645
+ return LengthUnit.METERS
646
+ self._add_error(f"unrecognized length unit: {char}", char)
647
+ return LengthUnit.DECIMAL_FEET
648
+
649
+ def _parse_inclination_unit(self, char: str) -> InclinationUnit:
650
+ """Parse inclination unit character."""
651
+ char = char.upper()
652
+ if char == "D":
653
+ return InclinationUnit.DEGREES
654
+ if char == "G":
655
+ return InclinationUnit.PERCENT_GRADE
656
+ if char == "M":
657
+ return InclinationUnit.DEGREES_AND_MINUTES
658
+ if char == "R":
659
+ return InclinationUnit.GRADS
660
+ if char == "W":
661
+ return InclinationUnit.DEPTH_GAUGE
662
+ self._add_error(f"unrecognized inclination unit: {char}", char)
663
+ return InclinationUnit.DEGREES
664
+
665
+ def _parse_lrud_item(self, char: str) -> LrudItem | None:
666
+ """Parse LRUD item character."""
667
+ char = char.upper()
668
+ if char == "L":
669
+ return LrudItem.LEFT
670
+ if char == "R":
671
+ return LrudItem.RIGHT
672
+ if char == "U":
673
+ return LrudItem.UP
674
+ if char == "D":
675
+ return LrudItem.DOWN
676
+ self._add_error(f"unrecognized LRUD item: {char}", char)
677
+ return None
678
+
679
+ def _parse_shot_item(self, char: str) -> ShotItem | None:
680
+ """Parse shot item character (case-sensitive for backsights)."""
681
+ if char == "L":
682
+ return ShotItem.LENGTH
683
+ if char == "A":
684
+ return ShotItem.FRONTSIGHT_AZIMUTH
685
+ if char == "D":
686
+ return ShotItem.FRONTSIGHT_INCLINATION
687
+ if char == "a":
688
+ return ShotItem.BACKSIGHT_AZIMUTH
689
+ if char == "d":
690
+ return ShotItem.BACKSIGHT_INCLINATION
691
+ self._add_error(f"unrecognized shot item: {char}", char)
692
+ return None
693
+
694
+ def _parse_lrud_association(self, char: str) -> LrudAssociation | None:
695
+ """Parse LRUD association character."""
696
+ char = char.upper()
697
+ if char == "F":
698
+ return LrudAssociation.FROM
699
+ if char == "T":
700
+ return LrudAssociation.TO
701
+ self._add_error(f"unrecognized LRUD association: {char}", char)
702
+ return None
703
+
704
+ def _parse_shot_to_dict(
705
+ self,
706
+ line: str,
707
+ header: dict[str, Any],
708
+ ) -> dict[str, Any] | None:
709
+ """Parse a single shot line to dictionary.
710
+
711
+ Args:
712
+ line: Shot line text
713
+ header: Trip header dictionary with format information
714
+
715
+ Returns:
716
+ Shot dictionary or None if line is invalid
717
+ """
718
+ parts = self.NON_WHITESPACE.findall(line)
719
+ if len(parts) < 2:
720
+ return None
721
+
722
+ idx = 0
723
+
724
+ # Station names
725
+ from_station = parts[idx]
726
+ idx += 1
727
+ to_station = parts[idx]
728
+ idx += 1
729
+
730
+ # Length
731
+ if idx >= len(parts):
732
+ self._add_error("missing length", line)
733
+ return None
734
+
735
+ length = self._parse_distance(parts[idx])
736
+ idx += 1
737
+
738
+ # If length is None and we've exhausted parts, skip this shot
739
+ if length is None and idx >= len(parts):
740
+ return None
741
+
742
+ # Frontsight azimuth
743
+ if idx >= len(parts):
744
+ self._add_error("missing frontsight azimuth", line)
745
+ return None
746
+ fs_azimuth = self._parse_azimuth(parts[idx])
747
+ idx += 1
748
+
749
+ # Frontsight inclination
750
+ if idx >= len(parts):
751
+ self._add_error("missing frontsight inclination", line)
752
+ return None
753
+ fs_inclination = self._parse_inclination(parts[idx])
754
+ idx += 1
755
+
756
+ # LRUD values (left, up, down, right in file order)
757
+ left = None
758
+ up = None
759
+ down = None
760
+ right = None
761
+
762
+ if idx < len(parts):
763
+ left = self._parse_lrud(parts[idx])
764
+ idx += 1
765
+ if idx < len(parts):
766
+ up = self._parse_lrud(parts[idx])
767
+ idx += 1
768
+ if idx < len(parts):
769
+ down = self._parse_lrud(parts[idx])
770
+ idx += 1
771
+ if idx < len(parts):
772
+ right = self._parse_lrud(parts[idx])
773
+ idx += 1
774
+
775
+ # Backsight values (if present)
776
+ bs_azimuth = None
777
+ bs_inclination = None
778
+ has_backsights = header.get("has_backsights", True)
779
+ if has_backsights:
780
+ if idx < len(parts):
781
+ bs_azimuth = self._parse_azimuth(parts[idx])
782
+ idx += 1
783
+ if idx < len(parts):
784
+ bs_inclination = self._parse_inclination(parts[idx])
785
+ idx += 1
786
+
787
+ # Flags and comment
788
+ excluded_from_length = False
789
+ excluded_from_plotting = False
790
+ excluded_from_all_processing = False
791
+ do_not_adjust = False
792
+ comment = None
793
+
794
+ # Look for remaining parts (flags and comments)
795
+ remaining = " ".join(parts[idx:]) if idx < len(parts) else ""
796
+
797
+ # Check for flags pattern #|..#
798
+ flag_match = re.search(r"#\|([^#]*?)#", remaining)
799
+ if flag_match:
800
+ flags_str = flag_match.group(1)
801
+ for flag in flags_str:
802
+ flag_upper = flag.upper()
803
+ if flag_upper == "L":
804
+ excluded_from_length = True
805
+ elif flag_upper == "P":
806
+ excluded_from_plotting = True
807
+ elif flag_upper == "X":
808
+ excluded_from_all_processing = True
809
+ elif flag_upper == "C":
810
+ do_not_adjust = True
811
+ elif flag_upper == " ":
812
+ pass # Spaces are allowed
813
+ else:
814
+ self._add_warning(f"unrecognized flag: {flag}", flag)
815
+
816
+ # Comment is after the flags
817
+ comment_start = flag_match.end()
818
+ if comment_start < len(remaining):
819
+ comment = remaining[comment_start:].strip() or None
820
+ # No flags, remaining is comment
821
+ elif remaining.strip():
822
+ comment = remaining.strip()
823
+
824
+ # Return dictionary with alias names for Pydantic
825
+ return {
826
+ "from_station": from_station,
827
+ "to_station": to_station,
828
+ "distance": length,
829
+ "frontsight_azimuth": fs_azimuth,
830
+ "frontsight_inclination": fs_inclination,
831
+ "backsight_azimuth": bs_azimuth,
832
+ "backsight_inclination": bs_inclination,
833
+ "left": left,
834
+ "right": right,
835
+ "up": up,
836
+ "down": down,
837
+ "comment": comment,
838
+ "exclude_distance": excluded_from_length,
839
+ "excluded_from_plotting": excluded_from_plotting,
840
+ "excluded_from_all_processing": excluded_from_all_processing,
841
+ "do_not_adjust": do_not_adjust,
842
+ }