seabirdfilehandler 0.5.3__tar.gz → 0.5.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of seabirdfilehandler might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: seabirdfilehandler
3
- Version: 0.5.3
3
+ Version: 0.5.4
4
4
  Summary: Library of parsers to interact with SeaBird CTD files.
5
5
  Keywords: CTD,parser,seabird,data
6
6
  Author: Emil Michels
@@ -16,7 +16,8 @@ Classifier: Programming Language :: Python :: 3.12
16
16
  Classifier: Programming Language :: Python :: 3.13
17
17
  Requires-Dist: pandas (>=2.2.1)
18
18
  Requires-Dist: xmltodict (>=0.13.0)
19
- Project-URL: Homepage, https://git.io-warnemuende.de/CTD-Software/SeabirdFileHandler
19
+ Project-URL: Documentation, https://ctd-software.pages.io-warnemuende.de/seabirdfilehandler
20
+ Project-URL: Homepage, https://ctd-software.pages.io-warnemuende.de/seabirdfilehandler
20
21
  Project-URL: Repository, https://git.io-warnemuende.de/CTD-Software/SeabirdFileHandler
21
22
  Description-Content-Type: text/markdown
22
23
 
@@ -16,10 +16,11 @@ classifiers = [
16
16
  "Programming Language :: Python :: 3.12",
17
17
  "Programming Language :: Python :: 3.13",
18
18
  ]
19
- urls.homepage = "https://git.io-warnemuende.de/CTD-Software/SeabirdFileHandler"
19
+ urls.homepage = "https://ctd-software.pages.io-warnemuende.de/seabirdfilehandler"
20
20
  urls.repository = "https://git.io-warnemuende.de/CTD-Software/SeabirdFileHandler"
21
+ urls.documentation = "https://ctd-software.pages.io-warnemuende.de/seabirdfilehandler"
21
22
  dynamic = []
22
- version = "0.5.3"
23
+ version = "0.5.4"
23
24
 
24
25
  [tool.poetry]
25
26
 
@@ -1,3 +1,4 @@
1
+ from pathlib import Path
1
2
  from typing import Union
2
3
  from datetime import datetime, time
3
4
  import pandas as pd
@@ -22,12 +23,13 @@ class BottleFile(DataFile):
22
23
 
23
24
  """
24
25
 
25
- def __init__(self, path_to_file):
26
- super().__init__(path_to_file)
27
- self.original_df = self.create_dataframe()
28
- self.df = self.original_df
29
- self.setting_dataframe_dtypes()
30
- self.adding_timestamp_column()
26
+ def __init__(self, path_to_file: Path | str, only_header: bool = False):
27
+ super().__init__(path_to_file, only_header)
28
+ if not only_header:
29
+ self.original_df = self.create_dataframe()
30
+ self.df = self.original_df
31
+ self.setting_dataframe_dtypes()
32
+ self.adding_timestamp_column()
31
33
 
32
34
  def create_dataframe(self):
33
35
  """Creates a dataframe out of the btl file. Manages the double data
@@ -60,9 +60,11 @@ class CnvFile(DataFile):
60
60
  super().__init__(path_to_file, only_header)
61
61
  self.validation_modules = self.obtaining_validation_modules()
62
62
  self.start_time = self.reading_start_time()
63
- self.parameters = Parameters(self.data, self.data_table_description)
63
+ self.parameters = Parameters(
64
+ self.data, self.data_table_description, only_header
65
+ )
64
66
  if create_dataframe:
65
- self.df = self.parameters.get_pandas_dataframe()
67
+ self.df = self.create_dataframe()
66
68
  if absolute_time_calculation:
67
69
  self.absolute_time_calculation()
68
70
  if event_log_column:
@@ -70,6 +72,13 @@ class CnvFile(DataFile):
70
72
  if coordinate_columns:
71
73
  self.add_position_columns()
72
74
 
75
+ def create_dataframe(self) -> pd.DataFrame:
76
+ """
77
+ Plain dataframe creator.
78
+ """
79
+ self.df = self.parameters.get_pandas_dataframe()
80
+ return self.df
81
+
73
82
  def reading_start_time(
74
83
  self,
75
84
  time_source: str = "System UTC",
@@ -0,0 +1,411 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
3
+ import logging
4
+ from collections import UserList
5
+ from typing import Callable, Type
6
+ import pandas as pd
7
+ import numpy as np
8
+ from seabirdfilehandler import (
9
+ CnvFile,
10
+ BottleFile,
11
+ BottleLogFile,
12
+ )
13
+ from seabirdfilehandler import DataFile
14
+ from seabirdfilehandler.utils import get_unique_sensor_data
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def get_collection(
20
+ path_to_files: Path | str,
21
+ file_suffix: str = "cnv",
22
+ only_metadata: bool = False,
23
+ sorting_key: Callable | None = None,
24
+ ) -> Type[FileCollection]:
25
+ """
26
+ Factory to create instances of FileCollection, depending on input type.
27
+
28
+ Parameters
29
+ ----------
30
+ path_to_files : Path | str :
31
+ The path to the directory to search for files.
32
+ file_suffix : str :
33
+ The suffix to search for. (Default value = "cnv")
34
+ only_metadata : bool :
35
+ Whether to read only metadata. (Default value = False)
36
+ sorting_key : Callable | None :
37
+ A callable that returns the filename-part to use to sort the collection. (Default value = None)
38
+ Returns
39
+ -------
40
+ An instance of FileCollection or one of its children.
41
+
42
+ """
43
+ mapping_suffix_to_type = {
44
+ "cnv": CnvCollection,
45
+ "btl": FileCollection,
46
+ "bl": FileCollection,
47
+ }
48
+ file_suffix = file_suffix.strip(".")
49
+ try:
50
+ collection = mapping_suffix_to_type[file_suffix](
51
+ path_to_files, file_suffix, only_metadata, sorting_key
52
+ )
53
+ except ValueError:
54
+ raise ValueError(f"Unknown input file type: {file_suffix}, aborting.")
55
+ else:
56
+ return collection
57
+
58
+
59
+ class FileCollection(UserList):
60
+ """
61
+ A representation of multiple files of the same kind. These files share
62
+ the same suffix and are otherwise closely connected to each other. A common
63
+ use case would be the collection of CNVs to allow for easier processing or
64
+ integration of field calibration measurements.
65
+
66
+ Parameters
67
+ ----------
68
+ path_to_files : Path | str :
69
+ The path to the directory to search for files.
70
+ file_suffix : str :
71
+ The suffix to search for. (Default value = "cnv")
72
+ only_metadata : bool :
73
+ Whether to read only metadata. (Default value = False)
74
+ sorting_key : Callable | None :
75
+ A callable that returns the filename-part to use to sort the collection. (Default value = None)
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ path_to_files: str | Path,
81
+ file_suffix: str,
82
+ only_metadata: bool = False,
83
+ sorting_key: Callable | None = None,
84
+ ):
85
+ super().__init__()
86
+ self.path_to_files = Path(path_to_files)
87
+ self.file_suffix = file_suffix.strip(".")
88
+ self.file_type = self.extract_file_type(self.file_suffix)
89
+ self.individual_file_paths = self.collect_files(
90
+ sorting_key=sorting_key
91
+ )
92
+ self.data = self.load_files(only_metadata)
93
+ if not only_metadata:
94
+ self.df_list = self.get_dataframes()
95
+ self.df = self.get_collection_dataframe(self.df_list)
96
+
97
+ def __str__(self):
98
+ return "/n".join(self.data)
99
+
100
+ def extract_file_type(self, suffix: str) -> Type[DataFile]:
101
+ """
102
+ Determines the file type using the input suffix.
103
+
104
+ Parameters
105
+ ----------
106
+ suffix : str :
107
+ The file suffix.
108
+ Returns
109
+ -------
110
+ An object corresponding to the given suffix.
111
+ """
112
+ mapping_suffix_to_type = {
113
+ "cnv": CnvFile,
114
+ "btl": BottleFile,
115
+ "bl": BottleLogFile,
116
+ }
117
+ file_type = DataFile
118
+ for key, value in mapping_suffix_to_type.items():
119
+ if key == suffix:
120
+ file_type = value
121
+ break
122
+ return file_type
123
+
124
+ def collect_files(
125
+ self,
126
+ sorting_key: Callable | None = lambda file: int(
127
+ file.stem.split("_")[3]
128
+ ),
129
+ ) -> list[Path]:
130
+ """
131
+ Creates a list of target files, recursively from the given directory.
132
+ These can be sorted with the help of the sorting_key parameter, which
133
+ is a Callable that identifies the part of the filename that shall be
134
+ used for sorting.
135
+
136
+ Parameters
137
+ ----------
138
+ sorting_key : Callable | None :
139
+ The part of the filename to use in sorting. (Default value = lambda file: int(file.stem.split("_")[3]))
140
+ Returns
141
+ -------
142
+ A list of all paths found.
143
+ """
144
+ return sorted(
145
+ self.path_to_files.rglob(f"*{self.file_suffix}"),
146
+ key=sorting_key,
147
+ )
148
+
149
+ def load_files(self, only_metadata: bool = False) -> list[DataFile]:
150
+ """
151
+ Creates python instances of each file.
152
+
153
+ Parameters
154
+ ----------
155
+ only_metadata : bool :
156
+ Whether to load only file metadata. (Default value = False)
157
+ Returns
158
+ -------
159
+ A list of all instances.
160
+ """
161
+ data = []
162
+ for file in self.individual_file_paths:
163
+ try:
164
+ data.append(self.file_type(file, only_metadata))
165
+ except TypeError:
166
+ logger.error(
167
+ f"Could not open file {file} with the type "
168
+ f"{self.file_type}."
169
+ )
170
+ continue
171
+ return data
172
+
173
+ def get_dataframes(
174
+ self,
175
+ event_log: bool = False,
176
+ coordinates: bool = False,
177
+ time_correction: bool = False,
178
+ cast_identifier: bool = False,
179
+ ) -> list[pd.DataFrame]:
180
+ """
181
+ Collects all individual dataframes and allows additional column
182
+ creation.
183
+
184
+ Parameters
185
+ ----------
186
+ event_log : bool :
187
+ (Default value = False)
188
+ coordinates : bool :
189
+ (Default value = False)
190
+ time_correction : bool :
191
+ (Default value = False)
192
+ cast_identifier : bool :
193
+ (Default value = False)
194
+
195
+ Returns
196
+ -------
197
+ A list of the individual pandas DataFrames.
198
+ """
199
+ for index, file in enumerate(self.data):
200
+ if event_log:
201
+ file.add_station_and_event_column()
202
+ if coordinates:
203
+ file.add_position_columns()
204
+ if time_correction:
205
+ file.absolute_time_calculation()
206
+ file.add_start_time()
207
+ if cast_identifier:
208
+ file.add_cast_number(index + 1)
209
+ return [file.df for file in self.data]
210
+
211
+ def get_collection_dataframe(
212
+ self, list_of_dfs: list[pd.DataFrame] | None = None
213
+ ) -> pd.DataFrame:
214
+ """
215
+ Creates one DataFrame from the individual ones, by concatenation.
216
+
217
+ Parameters
218
+ ----------
219
+ list_of_dfs : list[pd.DataFrame] | None :
220
+ A list of the individual DataFrames. (Default value = None)
221
+ Returns
222
+ -------
223
+ A pandas DataFrame representing the whole dataset.
224
+ """
225
+ if not list_of_dfs:
226
+ list_of_dfs = self.get_dataframes()
227
+ if not list_of_dfs:
228
+ raise ValueError("No dataframes to concatenate.")
229
+ df = pd.concat(list_of_dfs, ignore_index=True)
230
+ self.df = df
231
+ return df
232
+
233
+ def tidy_collection_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
234
+ """
235
+ Apply the different dataframe edits to the given dataframe.
236
+
237
+ Parameters
238
+ ----------
239
+ df : pd.DataFrame :
240
+ A DataFrame to edit.
241
+ Returns
242
+ -------
243
+ The tidied dataframe.
244
+ """
245
+ df = self.use_bad_flag_for_nan(df)
246
+ df = self.set_dtype_to_float(df)
247
+ return self.select_real_scan_data(df)
248
+
249
+ def use_bad_flag_for_nan(self, df: pd.DataFrame) -> pd.DataFrame:
250
+ """
251
+ Replace all Nan values by the bad flag value, defined inside the files.
252
+
253
+ Parameters
254
+ ----------
255
+ df : pd.DataFrame :
256
+ The dataframe to edit.
257
+ Returns
258
+ -------
259
+ The edited DataFrame.
260
+ """
261
+ bad_flags = set()
262
+ for file in self.data:
263
+ for line in file.data_table_description:
264
+ if line.startswith("bad_flag"):
265
+ flag = line.split("=")[1].strip()
266
+ bad_flags.add(flag)
267
+ for flag in bad_flags:
268
+ df.replace(to_replace=flag, value=np.nan, inplace=True)
269
+ return df
270
+
271
+ def set_dtype_to_float(self, df: pd.DataFrame) -> pd.DataFrame:
272
+ """
273
+ Use the float-dtype for all DataFrame columns.
274
+
275
+ Parameters
276
+ ----------
277
+ df : pd.DataFrame :
278
+ The dataframe to edit.
279
+ Returns
280
+ -------
281
+ The edited DataFrame.
282
+ """
283
+ for parameter in df.columns:
284
+ if parameter in ["datetime"]:
285
+ continue
286
+ try:
287
+ df[parameter] = df[parameter].astype("float")
288
+ finally:
289
+ continue
290
+ return df
291
+
292
+ def select_real_scan_data(self, df: pd.DataFrame) -> pd.DataFrame:
293
+ """
294
+ Drop data rows have no 'Scan' value, if that column exists.
295
+
296
+ Parameters
297
+ ----------
298
+ df : pd.DataFrame :
299
+ The dataframe to edit.
300
+ Returns
301
+ -------
302
+ The edited DataFrame.
303
+ """
304
+ try:
305
+ scan_column = [
306
+ c for c in df.columns if c.lower().startswith("scan")
307
+ ][0]
308
+ except IndexError:
309
+ return df
310
+ else:
311
+ df = df.loc[df[scan_column].notna()]
312
+ return df
313
+
314
+ def to_csv(self, file_name):
315
+ """
316
+ Writes a csv file with the given filename.
317
+
318
+ Parameters
319
+ ----------
320
+ file_name :
321
+ The new csv file name.
322
+ """
323
+ self.df.to_csv(file_name)
324
+
325
+
326
+ class CnvCollection(FileCollection):
327
+ """
328
+ Specific methods to work with collections of .cnv files.
329
+ """
330
+
331
+ def __init__(
332
+ self,
333
+ *args,
334
+ **kwargs,
335
+ ):
336
+ super().__init__(*args, **kwargs)
337
+ self.data_meta_info = self.get_data_table_meta_info()
338
+ self.sensor_data = get_unique_sensor_data(
339
+ [file.sensors for file in self.data]
340
+ )
341
+ self.array = self.get_array()
342
+
343
+ def get_dataframes(
344
+ self,
345
+ event_log: bool = False,
346
+ coordinates: bool = False,
347
+ time_correction: bool = False,
348
+ cast_identifier: bool = False,
349
+ ) -> list[pd.DataFrame]:
350
+ """
351
+ Collects all individual dataframes and allows additional column
352
+ creation.
353
+
354
+ Parameters
355
+ ----------
356
+ event_log : bool :
357
+ (Default value = False)
358
+ coordinates : bool :
359
+ (Default value = False)
360
+ time_correction : bool :
361
+ (Default value = False)
362
+ cast_identifier : bool :
363
+ (Default value = False)
364
+ Returns
365
+ -------
366
+ A list of the individual pandas DataFrames.
367
+ """
368
+ for index, file in enumerate(self.data):
369
+ if event_log:
370
+ file.add_station_and_event_column()
371
+ if coordinates:
372
+ file.add_position_columns()
373
+ if time_correction:
374
+ file.absolute_time_calculation()
375
+ file.add_start_time()
376
+ if cast_identifier:
377
+ file.add_cast_number(index + 1)
378
+ return [file.create_dataframe() for file in self.data]
379
+
380
+ def get_data_table_meta_info(self) -> list[dict]:
381
+ """
382
+ Ensures the same data description in all input cnv files and returns
383
+ it.
384
+ Acts as an early alarm when working on different kinds of files, which
385
+ cannot be concatenated together.
386
+
387
+ Returns
388
+ -------
389
+ A list of dictionaries that represent the data column information.
390
+ """
391
+ all_column_descriptions = [
392
+ file.parameters.metadata for file in self.data
393
+ ]
394
+ for info in all_column_descriptions:
395
+ if all_column_descriptions[0] != info:
396
+ raise AssertionError(
397
+ "Acting on differently formed data files, aborting"
398
+ )
399
+ return all_column_descriptions[0]
400
+
401
+ def get_array(self) -> np.ndarray:
402
+ """
403
+ Creates a collection array of all individual file arrays.
404
+
405
+ Returns
406
+ -------
407
+ A numpy array, representing the data of all input files.
408
+ """
409
+ return np.concatenate(
410
+ [file.parameters.create_full_ndarray() for file in self.data]
411
+ )
@@ -32,15 +32,17 @@ class Parameters(UserDict):
32
32
  self,
33
33
  data: list,
34
34
  metadata: list,
35
+ only_header: bool = False,
35
36
  ):
36
37
  self.raw_input_data = data
37
38
  self.raw_metadata = metadata
38
- self.full_data_array = self.create_full_ndarray()
39
39
  self.differentiate_table_description()
40
40
  self.metadata, self.duplicate_columns = self.reading_data_header(
41
41
  metadata
42
42
  )
43
- self.data = self.create_parameter_instances()
43
+ if not only_header:
44
+ self.full_data_array = self.create_full_ndarray()
45
+ self.data = self.create_parameter_instances()
44
46
 
45
47
  def get_parameter_names(self) -> list[str]:
46
48
  return [parameter["name"] for parameter in self.metadata.values()]
@@ -1,258 +0,0 @@
1
- from pathlib import Path
2
- import logging
3
- from collections import UserList
4
- from typing import Callable, Type
5
- import pandas as pd
6
- import numpy as np
7
- from seabirdfilehandler import (
8
- CnvFile,
9
- BottleFile,
10
- BottleLogFile,
11
- )
12
- from seabirdfilehandler import DataFile
13
- from seabirdfilehandler.utils import get_unique_sensor_data
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class FileCollection(UserList):
19
- """A representation of multiple files of the same kind. These files share
20
- the same suffix and are otherwise closely connected to each other. A common
21
- use case would be the collection of CNVs to allow for easier processing or
22
- integration of field calibration measurements.
23
-
24
- Parameters
25
- ----------
26
-
27
- Returns
28
- -------
29
-
30
- """
31
-
32
- def __init__(
33
- self,
34
- path_to_files: str | Path,
35
- file_suffix: str,
36
- only_metadata: bool = False,
37
- sorting_key: Callable | None = None,
38
- ):
39
- super().__init__()
40
- self.path_to_files = Path(path_to_files)
41
- self.file_suffix = file_suffix.strip(".")
42
- self.file_type: Type[DataFile]
43
- self.extract_file_type()
44
- self.individual_file_paths = []
45
- self.collect_files(sorting_key=sorting_key)
46
- self.load_files(only_metadata)
47
- if not only_metadata:
48
- if self.file_type == DataFile:
49
- self.df_list = self.get_dataframes()
50
- self.df = self.get_collection_dataframe(self.df_list)
51
- if self.file_type == CnvFile:
52
- self.data_meta_info = self.get_data_table_meta_info()
53
- self.sensor_data = get_unique_sensor_data(
54
- [file.sensors for file in self.data]
55
- )
56
-
57
- def __str__(self):
58
- return "/n".join(self.data)
59
-
60
- def extract_file_type(self):
61
- """ """
62
- mapping_suffix_to_type = {
63
- "cnv": CnvFile,
64
- "btl": BottleFile,
65
- "bl": BottleLogFile,
66
- }
67
- for key, value in mapping_suffix_to_type.items():
68
- if key == self.file_suffix:
69
- self.file_type = value
70
- break
71
- else:
72
- self.file_type = DataFile
73
-
74
- def collect_files(
75
- self,
76
- sorting_key: Callable | None = lambda file: int(
77
- file.stem.split("_")[3]
78
- ),
79
- ):
80
- """ """
81
- self.individual_file_paths = sorted(
82
- self.path_to_files.rglob(f"*{self.file_suffix}"),
83
- key=sorting_key,
84
- )
85
-
86
- def load_files(self, only_metadata: bool = False):
87
- """ """
88
- for file in self.individual_file_paths:
89
- try:
90
- self.data.append(self.file_type(file))
91
- except TypeError:
92
- logger.error(
93
- f"Could not open file {file} with the type "
94
- f"{self.file_type}."
95
- )
96
- continue
97
-
98
- def get_dataframes(
99
- self,
100
- event_log: bool = False,
101
- coordinates: bool = False,
102
- time_correction: bool = False,
103
- cast_identifier: bool = False,
104
- long_header_names: bool = False,
105
- full_data_header: bool = True,
106
- ) -> list[pd.DataFrame]:
107
- """
108
-
109
- Parameters
110
- ----------
111
- event_log: bool :
112
- (Default value = False)
113
- coordinates: bool :
114
- (Default value = False)
115
- time_correction: bool :
116
- (Default value = False)
117
- cast_identifier: bool :
118
- (Default value = False)
119
- long_header_names: bool :
120
- (Default value = False)
121
- full_data_header: bool :
122
- (Default value = True)
123
-
124
- Returns
125
- -------
126
-
127
- """
128
- for index, file in enumerate(self.data):
129
- if full_data_header:
130
- file.rename_dataframe_header(header_detail_level="longinfo")
131
- elif long_header_names:
132
- file.rename_dataframe_header(header_detail_level="name")
133
- if event_log:
134
- file.add_station_and_event_column()
135
- if coordinates:
136
- file.add_position_columns()
137
- if time_correction:
138
- file.absolute_time_calculation()
139
- file.add_start_time()
140
- if cast_identifier:
141
- file.add_cast_number(index + 1)
142
- return [file.df for file in self.data]
143
-
144
- def get_collection_dataframe(
145
- self, list_of_dfs: list[pd.DataFrame] | None = None
146
- ) -> pd.DataFrame:
147
- """
148
-
149
- Parameters
150
- ----------
151
- list_of_dfs: list[pd.DataFrame] | None :
152
- (Default value = None)
153
-
154
- Returns
155
- -------
156
-
157
- """
158
- if not list_of_dfs:
159
- list_of_dfs = self.get_dataframes()
160
- df = pd.concat(list_of_dfs, ignore_index=True)
161
- # df.meta.metadata = list_of_dfs[0].meta.metadata
162
- return df
163
-
164
- def tidy_collection_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
165
- """
166
-
167
- Parameters
168
- ----------
169
- df: pd.DataFrame :
170
-
171
-
172
- Returns
173
- -------
174
-
175
- """
176
- df = self.use_bad_flag_for_nan(df)
177
- df = self.set_dtype_to_float(df)
178
- return self.select_real_scan_data(df)
179
-
180
- def use_bad_flag_for_nan(self, df: pd.DataFrame) -> pd.DataFrame:
181
- """
182
-
183
- Parameters
184
- ----------
185
- df: pd.DataFrame :
186
-
187
-
188
- Returns
189
- -------
190
-
191
- """
192
- bad_flags = set()
193
- for file in self.data:
194
- for line in file.data_table_description:
195
- if line.startswith("bad_flag"):
196
- flag = line.split("=")[1].strip()
197
- bad_flags.add(flag)
198
- for flag in bad_flags:
199
- df.replace(to_replace=flag, value=np.nan, inplace=True)
200
- return df
201
-
202
- def set_dtype_to_float(self, df: pd.DataFrame) -> pd.DataFrame:
203
- """
204
-
205
- Parameters
206
- ----------
207
- df: pd.DataFrame :
208
-
209
-
210
- Returns
211
- -------
212
-
213
- """
214
- for parameter in df.columns:
215
- if parameter in ["datetime"]:
216
- continue
217
- try:
218
- df[parameter] = df[parameter].astype("float")
219
- finally:
220
- continue
221
- return df
222
-
223
- def select_real_scan_data(self, df: pd.DataFrame) -> pd.DataFrame:
224
- """
225
-
226
- Parameters
227
- ----------
228
- df: pd.DataFrame :
229
-
230
-
231
- Returns
232
- -------
233
-
234
- """
235
- # TODO: fix this hardcoded name
236
- try:
237
- df = df.loc[df["Scan Count"].notna()]
238
- finally:
239
- pass
240
- return df
241
-
242
- def to_csv(self, file_name):
243
- """
244
-
245
- Parameters
246
- ----------
247
- file_name :
248
-
249
-
250
- Returns
251
- -------
252
-
253
- """
254
- self.get_collection_dataframe().to_csv(file_name)
255
-
256
- def get_data_table_meta_info(self) -> list[list[dict]]:
257
- """ """
258
- return [file.parameters.metadata for file in self.data]