compass-lib 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
compass_lib/parser.py CHANGED
@@ -1,282 +1,435 @@
1
- #!/usr/bin/env python
1
+ from __future__ import annotations
2
2
 
3
- import re
3
+ import contextlib
4
4
  import datetime
5
- import hashlib
6
- import json
7
- from typing import Optional, Union
8
-
5
+ import re
6
+ from dataclasses import dataclass
9
7
  from pathlib import Path
8
+ from typing import TYPE_CHECKING
9
+
10
+ from compass_lib.constants import COMPASS_DATE_COMMENT_RE
11
+ from compass_lib.constants import COMPASS_END_OF_FILE
12
+ from compass_lib.constants import COMPASS_SECTION_NAME_RE
13
+ from compass_lib.constants import COMPASS_SECTION_SEPARATOR
14
+ from compass_lib.constants import COMPASS_SECTION_SPLIT_RE
15
+ from compass_lib.constants import COMPASS_SHOT_FLAGS_RE
16
+ from compass_lib.enums import CompassFileType
10
17
  from compass_lib.enums import ShotFlag
18
+ from compass_lib.models import Survey
19
+ from compass_lib.models import SurveySection
20
+ from compass_lib.models import SurveyShot
11
21
 
12
- from compass_lib.section import SurveySection
13
- from compass_lib.shot import SurveyShot
22
+ if TYPE_CHECKING:
23
+ from typing_extensions import Self
14
24
 
15
- from functools import cached_property
16
25
 
26
+ @dataclass
27
+ class CompassDataRow:
28
+ """Basic Dataclass that represent one row of 'data' from the DAT file.
29
+ This contains no validation logic, the validation is being performed by
30
+ the PyDantic class: `ShotData`.
31
+ The sole purpose of this class is to aggregate the parsing logic."""
32
+
33
+ from_id: str
34
+ to_id: str
35
+ length: float
36
+ azimuth: float
37
+ inclination: float
38
+ left: float
39
+ up: float
40
+ down: float
41
+ right: float
42
+
43
+ # optional attributes
44
+ azimuth2: float = 0.0
45
+ inclination2: float = 0.0
46
+ flags: str | None = None
47
+ comment: str | None = None
17
48
 
18
- import dataclasses
19
- from dataclasses import dataclass
49
+ @classmethod
50
+ def from_str_data(cls, str_data: str, header_row: str) -> Self:
51
+ shot_data = str_data.split(maxsplit=9)
20
52
 
21
- # ============================== CompassFileFormat ============================== #
22
-
23
- # _formatFormat(): string {
24
- # const {
25
- # displayAzimuthUnit,
26
- # displayLengthUnit,
27
- # displayLrudUnit,
28
- # displayInclinationUnit,
29
- # lrudOrder,
30
- # shotMeasurementOrder,
31
- # hasBacksights,
32
- # lrudAssociation,
33
- # } = this
34
- # return `${inverseAzimuthUnits[displayAzimuthUnit]}${
35
- # inverseLengthUnits[displayLengthUnit]
36
- # }${inverseLengthUnits[displayLrudUnit]}${
37
- # inverseInclinationUnits[displayInclinationUnit]
38
- # }${lrudOrder
39
- # .map(i => inverseLrudItems[i])
40
- # .join('')}${shotMeasurementOrder
41
- # .map(i => inverseShotMeasurementItems[i])
42
- # .join('')}${hasBacksights ? 'B' : 'N'}${
43
- # lrudAssociation != null ? inverseStationSides[lrudAssociation] : ''
44
- # }`
45
- # }
53
+ instance = cls(*shot_data[:9])
46
54
 
47
- @dataclass
48
- class CompassFileFormat:
49
- displayAzimuthUnit: str
50
- displayLengthUnit: str
51
- displayLrudUnit: str
52
- displayInclinationUnit: str
53
- lrudOrder: str
54
- shotMeasurementOrder: str
55
- hasBacksights: str
56
- lrudAssociation: str
55
+ def split1_str(val: str) -> tuple[str]:
56
+ """
57
+ Splits the input string into at most two parts.
57
58
 
58
- @classmethod
59
- def from_str(cls, input):
60
- return cls(
61
- displayAzimuthUnit="",
62
- displayLengthUnit="",
63
- displayLrudUnit="",
64
- displayInclinationUnit="",
65
- lrudOrder="",
66
- shotMeasurementOrder="",
67
- hasBacksights="",
68
- lrudAssociation="",
69
- )
59
+ Args:
60
+ val (str): The string to be split.
70
61
 
62
+ Returns:
63
+ tuple[str]: A tuple containing the first part of the split string and
64
+ the second part if it exists, otherwise None.
71
65
 
66
+ Raises:
67
+ ValueError: If the input string is None.
68
+ """
69
+ if val is None:
70
+ raise ValueError("Received a NoneValue.")
72
71
 
73
- class EnhancedJSONEncoder(json.JSONEncoder):
74
- def default(self, obj):
72
+ rslt = val.split(maxsplit=1)
73
+ if len(rslt) == 1:
74
+ return rslt[0], None
75
+ return rslt
75
76
 
76
- if dataclasses.is_dataclass(obj):
77
- return dataclasses.asdict(obj)
77
+ with contextlib.suppress(IndexError):
78
+ optional_data = shot_data[9]
78
79
 
79
- if isinstance(obj, datetime.date):
80
- return obj.isoformat()
80
+ if "AZM2" in header_row:
81
+ instance.azimuth2, optional_data = split1_str(optional_data)
81
82
 
82
- if isinstance(obj, ShotFlag):
83
- return obj.value
83
+ if "INC2" in header_row:
84
+ instance.inclination2, optional_data = split1_str(optional_data)
84
85
 
85
- return super().default(obj)
86
+ if (
87
+ all(x in header_row for x in ["FLAGS", "COMMENTS"])
88
+ and optional_data is not None
89
+ ):
90
+ flags_comment = optional_data
86
91
 
92
+ _, flag_str, comment = re.search(
93
+ COMPASS_SHOT_FLAGS_RE, flags_comment
94
+ ).groups()
87
95
 
88
- class CompassParser:
89
- SEPARATOR = "\f" # Form_feed: https://www.ascii-code.com/12
90
- END_OF_FILE = "\x1A" # Substitute: https://www.ascii-code.com/26
96
+ instance.comment = comment.strip() if comment != "" else None
91
97
 
92
- def __init__(self, filepath: str) -> None:
98
+ instance.flags = (
99
+ [ShotFlag._value2member_map_[f] for f in flag_str]
100
+ if flag_str
101
+ else None
102
+ )
103
+ if instance.flags is not None:
104
+ instance.flags = sorted(set(instance.flags), key=lambda f: f.value)
93
105
 
94
- self._filepath = Path(filepath)
106
+ # Input Normalization
107
+ instance.azimuth = float(instance.azimuth)
108
+ instance.azimuth = instance.azimuth % 360.0 if instance.azimuth >= 0 else 0.0
95
109
 
96
- if not self.filepath.is_file():
97
- raise FileNotFoundError(f"File not found: {filepath}")
110
+ instance.azimuth2 = float(instance.azimuth2)
111
+ instance.azimuth2 = instance.azimuth2 % 360.0 if instance.azimuth2 >= 0 else 0.0
98
112
 
99
- # Ensure at least that the file type is valid
100
- _ = self._data
113
+ return instance
101
114
 
102
- # =================== Data Loading =================== #
103
115
 
104
- @cached_property
105
- def _data(self):
116
+ class CompassParser:
117
+ def __init__(self, *args, **kwargs) -> None:
118
+ raise NotImplementedError(
119
+ "This class is not meant to be instantiated directly."
120
+ )
106
121
 
107
- with self.filepath.open(mode="r") as f:
108
- data = f.read()
122
+ @classmethod
123
+ def load_dat_file(cls, filepath: str) -> Survey:
124
+ filepath = Path(filepath)
109
125
 
110
- return [
111
- activity.strip()
112
- for activity in data.split(CompassParser.SEPARATOR)
113
- if CompassParser.END_OF_FILE not in activity
126
+ if not filepath.is_file():
127
+ raise FileNotFoundError(f"File not found: {filepath}")
128
+
129
+ # Ensure at least that the file type is valid
130
+ with Path(filepath).open(mode="r", encoding="windows-1252") as f:
131
+ # Skip all the comments
132
+ file_content = "".join(
133
+ [line for line in f.readlines() if not line.startswith("/")]
134
+ )
135
+
136
+ file_content = file_content.split(COMPASS_END_OF_FILE, maxsplit=1)[0]
137
+ raw_sections = [
138
+ section.rstrip()
139
+ for section in re.split(COMPASS_SECTION_SPLIT_RE, file_content)
140
+ if section.rstrip() != ""
114
141
  ]
115
142
 
116
- # =================== File Properties =================== #
143
+ try:
144
+ return cls._parse_dat_file(raw_sections)
145
+ except (UnicodeDecodeError, ValueError, IndexError, TypeError) as e:
146
+ raise ValueError(f"Failed to parse file: `{filepath}`") from e
117
147
 
118
- def __repr__(self) -> str:
119
- repr = f"[CompassSurveyFile {self.filetype.upper()}] `{self.filepath}`:"
120
- # for key in self._KEY_MAP.keys():
121
- # if key.startswith("_"):
122
- # continue
123
- # repr += f"\n\t- {key}: {getattr(self, key)}"
124
- # repr += f"\n\t- shots: Total Shots: {len(self.shots)}"
125
- # repr += f"\n\t- hash: {self.hash}"
126
- return repr
148
+ @classmethod
149
+ def _parse_date(cls, date_str: str) -> datetime.date:
150
+ for date_format in ["%m %d %Y", "%m %d %y", "%d %m %Y", "%d %m %y"]:
151
+ try:
152
+ return datetime.datetime.strptime(date_str, date_format).date()
153
+ except ValueError: # noqa: PERF203
154
+ continue
155
+ raise ValueError("Unknown date format: `%s`", date_str)
127
156
 
128
- @cached_property
129
- def __hash__(self):
130
- # return hashlib.sha256(self._as_binary()).hexdigest()
131
- return hashlib.sha256("0".encode()).hexdigest()
132
-
133
- @property
134
- def hash(self):
135
- return self.__hash__
136
-
137
- # =============== Descriptive Properties =============== #
138
-
139
- @property
140
- def filepath(self):
141
- return self._filepath
142
-
143
- @property
144
- def filetype(self):
145
- return self.filepath.suffix[1:]
146
- # try:
147
- # return ArianeFileType.from_str(self.filepath.suffix[1:])
148
- # except ValueError as e:
149
- # raise TypeError(e) from e
150
-
151
- @property
152
- def lstat(self):
153
- return self.filepath.lstat()
154
-
155
- @property
156
- def date_created(self):
157
- return self.lstat.st_ctime
158
-
159
- @property
160
- def date_last_modified(self):
161
- return self.lstat.st_mtime
162
-
163
- @property
164
- def date_last_opened(self):
165
- return self.lstat.st_atime
166
-
167
- # =================== Data Processing =================== #
168
-
169
- @cached_property
170
- def data(self):
171
- sections = []
172
- for activity in self._data:
173
- entries = activity.splitlines()
174
-
175
- cave_name = entries[0].strip()
176
-
177
- if "SURVEY NAME: " not in entries[1]:
178
- raise RuntimeError
179
- survey_name = entries[1].split(":")[-1].strip()
180
-
181
- date_str, comment_str = entries[2].split(" ", maxsplit=1)
182
-
183
- if "SURVEY DATE: " not in date_str:
184
- raise RuntimeError
185
- date = date_str.split(":")[-1].strip()
186
-
187
- if "COMMENT:" not in comment_str:
188
- raise RuntimeError
189
- survey_comment = comment_str.split(":")[-1].strip()
190
-
191
- if "SURVEY TEAM:" != entries[3].strip():
192
- raise RuntimeError
193
-
194
- surveyors = [suveyor.strip() for suveyor in entries[4].split(",") if suveyor.strip() != ""]
195
-
196
- if "DECLINATION:" not in entries[5]:
197
- raise RuntimeError
198
- if "FORMAT:" not in entries[5]:
199
- raise RuntimeError
200
- if "CORRECTIONS:" not in entries[5]:
201
- raise RuntimeError
202
-
203
- _, declination_str, _, format_str, _, correct_A, correct_B, correct_C = entries[5].split()
204
-
205
- shots = list()
206
- for shot in entries[9:]:
207
- shot_data = shot.split(maxsplit=9)
208
- from_id, to_id, length, bearing, incl, left, up, down, right = shot_data[:9]
209
-
210
- try:
211
- flags_comment = shot_data[9]
212
-
213
- flag_regex = rf"({ShotFlag.__start_token__}([{''.join(ShotFlag._value2member_map_.keys())}]*){ShotFlag.__end_token__})*(.*)"
214
- _, flag_str, comment = re.search(flag_regex, flags_comment).groups()
215
-
216
- flags = [ShotFlag._value2member_map_[f] for f in flag_str] if flag_str else None
217
-
218
- except IndexError:
219
- flags = None
220
- comment = None
221
-
222
- shots.append(SurveyShot(
223
- from_id=from_id,
224
- to_id=to_id,
225
- length=float(length),
226
- bearing=float(bearing),
227
- inclination=float(incl),
228
- left=float(left),
229
- up=float(up),
230
- down=float(down),
231
- right=float(right),
232
- flags=sorted(set(flags), key=lambda f: f.value) if flags else None,
233
- comment=comment.strip() if comment else None
234
- ))
235
-
236
- section = SurveySection(
237
- cave_name=cave_name,
238
- survey_name=survey_name,
239
- date=datetime.datetime.strptime(date, "%m %d %Y").date(),
240
- comment=survey_comment,
241
- surveyors=surveyors,
242
- declination=float(declination_str),
243
- format=format_str,
244
- correction=(float(correct_A), float(correct_B), float(correct_C)),
245
- shots=shots
157
+ @classmethod
158
+ def _parse_dat_file(cls, raw_sections: list[str]) -> Survey:
159
+ survey = Survey(cave_name=raw_sections[0].split("\n", maxsplit=1)[0].strip())
160
+
161
+ for raw_section in raw_sections:
162
+ section_data_iter = iter(raw_section.splitlines())
163
+
164
+ # Note: not used
165
+ # cave_name = next(section_data_iter)
166
+ _ = next(section_data_iter)
167
+
168
+ # -------------- Survey Name -------------- #
169
+ input_str = next(section_data_iter)
170
+ if (match := COMPASS_SECTION_NAME_RE.match(input_str)) is None:
171
+ raise ValueError("Compass section name not found: `%s`", input_str)
172
+
173
+ survey_name = match.group("section_name").strip()
174
+
175
+ # -------------- Survey Date & Comment -------------- #
176
+ input_str = next(section_data_iter).replace("\t", " ")
177
+ if (match := COMPASS_DATE_COMMENT_RE.match(input_str)) is None:
178
+ raise ValueError(
179
+ "Compass date and comment name not found: `%s`", input_str
180
+ )
181
+
182
+ survey_date = (
183
+ cls._parse_date(match.group("date"))
184
+ if match.group("date") != "None"
185
+ else None
186
+ )
187
+ section_comment = (
188
+ match.group("comment").strip() if match.group("comment") else ""
246
189
  )
247
- sections.append(section)
248
190
 
249
- return sections
191
+ # -------------- Surveyors -------------- #
192
+ if (surveyor_header := next(section_data_iter).strip()) != "SURVEY TEAM:":
193
+ raise ValueError("Unknown surveyor string: `%s`", surveyor_header)
194
+ surveyors = next(section_data_iter).rstrip(";, ").rstrip()
195
+
196
+ # -------------- Optional Data -------------- #
197
+
198
+ optional_data = next(section_data_iter).split()
199
+ declination_str = format_str = None
200
+
201
+ correct_A = correct_B = correct_C = correct2_A = correct2_B = 0.0
202
+ discovery_date = survey_date
203
+
204
+ with contextlib.suppress(IndexError, ValueError):
205
+ _header, declination_str = optional_data[0:2]
206
+ _header, format_str = optional_data[2:4]
207
+ _header, correct_A, correct_B, correct_C = optional_data[4:8]
208
+ _header, correct2_A, correct2_B = optional_data[8:11]
209
+ _header, d_month, d_day, d_year = optional_data[11:15]
210
+ discovery_date = cls._parse_date(f"{d_month} {d_day} {d_year}")
211
+
212
+ # -------------- Skip Rows -------------- #
213
+ _ = next(section_data_iter) # empty row
214
+ header_row = next(section_data_iter)
215
+ _ = next(section_data_iter) # empty row
216
+
217
+ # -------------- Section Shots -------------- #
218
+
219
+ shots = []
220
+
221
+ with contextlib.suppress(StopIteration):
222
+ while shot_str := next(section_data_iter):
223
+ shot_data = CompassDataRow.from_str_data(
224
+ str_data=shot_str, header_row=header_row
225
+ )
226
+
227
+ shots.append(
228
+ SurveyShot(
229
+ from_id=shot_data.from_id,
230
+ to_id=shot_data.to_id,
231
+ azimuth=float(shot_data.azimuth),
232
+ inclination=float(shot_data.inclination),
233
+ length=float(shot_data.length),
234
+ # Optional Values
235
+ comment=shot_data.comment,
236
+ flags=shot_data.flags,
237
+ azimuth2=float(shot_data.azimuth2),
238
+ inclination2=float(shot_data.inclination2),
239
+ # LRUD
240
+ left=float(shot_data.left),
241
+ right=float(shot_data.right),
242
+ up=float(shot_data.up),
243
+ down=float(shot_data.down),
244
+ )
245
+ )
246
+
247
+ survey.sections.append(
248
+ SurveySection(
249
+ name=survey_name,
250
+ comment=section_comment,
251
+ correction=(float(correct_A), float(correct_B), float(correct_C)),
252
+ correction2=(float(correct2_A), float(correct2_B)),
253
+ survey_date=survey_date,
254
+ discovery_date=discovery_date,
255
+ declination=float(declination_str),
256
+ format=format_str if format_str is not None else "DDDDUDLRLADN",
257
+ shots=shots,
258
+ surveyors=surveyors,
259
+ )
260
+ )
250
261
 
262
+ return survey
251
263
 
252
264
  # =================== Export Formats =================== #
253
265
 
254
- def to_json(self, filepath: Optional[Union[str, Path]] = None) -> str:
255
- json_str = json.dumps(self.data, indent=4, sort_keys=True, cls=EnhancedJSONEncoder)
256
-
257
- if filepath is not None:
258
- with open(filepath, mode="w") as file:
259
- file.write(json_str)
260
-
261
- return json_str
262
-
263
- # ==================== Public APIs ====================== #
264
-
265
- @cached_property
266
- def shots(self):
267
- return []
268
- # return [
269
- # SurveyShot(data=survey_shot)
270
- # for survey_shot in self._KEY_MAP.fetch(self._shots_list, "_shots")
271
- # ]
272
-
273
- @cached_property
274
- def sections(self):
275
- return []
276
- # section_map = dict()
277
- # for shot in self.shots:
278
- # try:
279
- # section_map[shot.section].add_shot(shot)
280
- # except KeyError:
281
- # section_map[shot.section] = SurveySection(shot=shot)
282
- # return list(section_map.values())
266
+ # @classmethod
267
+ # def calculate_depth(
268
+ # self, filepath: str | Path | None = None, include_depth: bool = False
269
+ # ) -> str:
270
+ # data = self.data.model_dump()
271
+
272
+ # all_shots = [
273
+ # shot for section in data["sections"] for shot in section["shots"]
274
+ # ]
275
+
276
+ # if not include_depth:
277
+ # for shot in all_shots:
278
+ # del shot["depth"]
279
+
280
+ # else:
281
+ # # create an index of all the shots by "ID"
282
+ # # use a copy to avoid data corruption.
283
+ # shot_by_origins = defaultdict(list)
284
+ # shot_by_destinations = defaultdict(list)
285
+ # for shot in all_shots:
286
+ # shot_by_origins[shot["from_id"]].append(shot)
287
+ # shot_by_destinations[shot["to_id"]].append(shot)
288
+
289
+ # origin_keys = set(shot_by_origins.keys())
290
+ # destination_keys = set(shot_by_destinations.keys())
291
+
292
+ # # Finding the "origin stations" - aka. stations with no incoming
293
+ # # shots. They are assumed at depth 0.0
294
+ # origin_stations = set()
295
+ # for shot_key in origin_keys:
296
+ # if shot_key in destination_keys:
297
+ # continue
298
+ # origin_stations.add(shot_key)
299
+
300
+ # processing_queue = OrderedQueue()
301
+
302
+ # def collect_downstream_stations(target: str) -> list[str]:
303
+ # if target in processing_queue:
304
+ # return
305
+
306
+ # processing_queue.add(target, value=None, fail_if_present=True)
307
+ # direct_shots = shot_by_origins[target]
308
+
309
+ # for shot in direct_shots:
310
+ # processing_queue.add(
311
+ # shot["from_id"], value=None, fail_if_present=False
312
+ # )
313
+ # if (next_shot := shot["to_id"]) not in processing_queue:
314
+ # collect_downstream_stations(next_shot)
315
+
316
+ # for station in sorted(origin_stations):
317
+ # collect_downstream_stations(station)
318
+
319
+ # def calculate_depth(
320
+ # target: str, fail_if_unknown: bool = False
321
+ # ) -> float | None:
322
+ # if target in origin_stations:
323
+ # return 0.0
324
+
325
+ # if (depth := processing_queue[target]) is not None:
326
+ # return depth
327
+
328
+ # if fail_if_unknown:
329
+ # return None
330
+
331
+ # for shot in shot_by_destinations[target]:
332
+ # start_depth = calculate_depth(
333
+ # shot["from_id"], fail_if_unknown=True
334
+ # )
335
+ # if start_depth is not None:
336
+ # break
337
+ # else:
338
+ # raise RuntimeError("None of the previous shot has a known depth")
339
+
340
+ # vertical_delta = math.cos(
341
+ # math.radians(90 + float(shot["inclination"]))
342
+ # ) * float(shot["length"])
343
+
344
+ # return round(start_depth + vertical_delta, ndigits=4)
345
+
346
+ # for shot in processing_queue:
347
+ # processing_queue[shot] = calculate_depth(shot)
348
+
349
+ # for shot in all_shots:
350
+ # shot["depth"] = round(processing_queue[shot["to_id"]], ndigits=1)
351
+
352
+ @classmethod
353
+ def export_to_dat(cls, survey: Survey, filepath: Path | str) -> None:
354
+ filepath = Path(filepath)
355
+
356
+ filetype = CompassFileType.from_path(filepath)
357
+
358
+ if filetype != CompassFileType.DAT:
359
+ raise TypeError(
360
+ f"Unsupported fileformat: `{filetype.name}`. "
361
+ f"Expected: `{CompassFileType.DAT.name}`"
362
+ )
363
+
364
+ with filepath.open(mode="w", encoding="windows-1252") as f:
365
+ for section in survey.sections:
366
+ # Section Header
367
+ f.write(f"{survey.cave_name}\n")
368
+ f.write(f"SURVEY NAME: {section.name}\n")
369
+ f.write(
370
+ "".join(
371
+ (
372
+ "SURVEY DATE: ",
373
+ section.survey_date.strftime("%m %-d %Y")
374
+ if section.survey_date
375
+ else "None",
376
+ " ",
377
+ )
378
+ )
379
+ )
380
+ f.write(f"COMMENT:{section.comment}\n")
381
+ f.write(f"SURVEY TEAM:\n{section.surveyors}\n")
382
+ f.write(f"DECLINATION: {section.declination:>7.02f} ")
383
+ f.write(f"FORMAT: {section.format} ")
384
+ f.write(
385
+ f"CORRECTIONS: {' '.join(f'{nbr:.02f}' for nbr in section.correction)} " # noqa: E501
386
+ )
387
+ f.write(
388
+ f"CORRECTIONS2: {' '.join(f'{nbr:.02f}' for nbr in section.correction2)} " # noqa: E501
389
+ )
390
+ f.write(
391
+ "".join(
392
+ (
393
+ "DISCOVERY: ",
394
+ section.discovery_date.strftime("%m %-d %Y")
395
+ if section.discovery_date
396
+ else "None",
397
+ "\n\n",
398
+ )
399
+ )
400
+ )
401
+
402
+ # Shots - Header
403
+ f.write(" FROM TO LENGTH BEARING INC")
404
+ f.write(" LEFT UP DOWN RIGHT")
405
+ f.write(" AZM2 INC2 FLAGS COMMENTS\n\n")
406
+
407
+ # Shots - Data
408
+ for shot in section.shots:
409
+ f.write(f"{shot.from_id: >12} ")
410
+ f.write(f"{shot.to_id: >12} ")
411
+ f.write(f"{shot.length:8.2f} ")
412
+ f.write(f"{shot.azimuth:8.2f} ")
413
+ f.write(f"{shot.inclination:8.3f} ")
414
+ f.write(f"{shot.left:8.2f} ")
415
+ f.write(f"{shot.up:8.2f} ")
416
+ f.write(f"{shot.down:8.2f} ")
417
+ f.write(f"{shot.right:8.2f} ")
418
+ f.write(f"{shot.azimuth2:8.2f} ")
419
+ f.write(f"{shot.inclination2:8.3f}")
420
+ if shot.flags is not None:
421
+ escaped_start_token = str(ShotFlag.__start_token__).replace(
422
+ "\\", ""
423
+ )
424
+ f.write(f" {escaped_start_token}")
425
+ f.write("".join([flag.value for flag in shot.flags]))
426
+ f.write(ShotFlag.__end_token__)
427
+ if shot.comment is not None:
428
+ f.write(f" {shot.comment}")
429
+ f.write("\n")
430
+
431
+ # End of Section - Form_feed: https://www.ascii-code.com/12
432
+ f.write(f"{COMPASS_SECTION_SEPARATOR}\n")
433
+
434
+ # End of File - Substitute: https://www.ascii-code.com/26
435
+ f.write(f"{COMPASS_END_OF_FILE}\n")