xradio 0.0.28__py3-none-any.whl → 0.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xradio/__init__.py +5 -4
- xradio/_utils/array.py +90 -0
- xradio/_utils/zarr/common.py +48 -3
- xradio/image/_util/zarr.py +4 -1
- xradio/schema/__init__.py +24 -6
- xradio/schema/bases.py +440 -2
- xradio/schema/check.py +96 -55
- xradio/schema/dataclass.py +123 -27
- xradio/schema/metamodel.py +21 -4
- xradio/schema/typing.py +33 -18
- xradio/vis/__init__.py +5 -2
- xradio/vis/_processing_set.py +28 -20
- xradio/vis/_vis_utils/_ms/_tables/create_field_and_source_xds.py +710 -0
- xradio/vis/_vis_utils/_ms/_tables/load.py +23 -10
- xradio/vis/_vis_utils/_ms/_tables/load_main_table.py +145 -64
- xradio/vis/_vis_utils/_ms/_tables/read.py +747 -172
- xradio/vis/_vis_utils/_ms/_tables/read_main_table.py +173 -44
- xradio/vis/_vis_utils/_ms/_tables/read_subtables.py +79 -28
- xradio/vis/_vis_utils/_ms/_tables/write.py +102 -45
- xradio/vis/_vis_utils/_ms/_tables/write_exp_api.py +127 -65
- xradio/vis/_vis_utils/_ms/chunks.py +58 -21
- xradio/vis/_vis_utils/_ms/conversion.py +536 -67
- xradio/vis/_vis_utils/_ms/descr.py +52 -20
- xradio/vis/_vis_utils/_ms/msv2_to_msv4_meta.py +70 -35
- xradio/vis/_vis_utils/_ms/msv4_infos.py +0 -59
- xradio/vis/_vis_utils/_ms/msv4_sub_xdss.py +76 -9
- xradio/vis/_vis_utils/_ms/optimised_functions.py +0 -46
- xradio/vis/_vis_utils/_ms/partition_queries.py +308 -119
- xradio/vis/_vis_utils/_ms/partitions.py +82 -25
- xradio/vis/_vis_utils/_ms/subtables.py +32 -14
- xradio/vis/_vis_utils/_utils/partition_attrs.py +30 -11
- xradio/vis/_vis_utils/_utils/xds_helper.py +136 -45
- xradio/vis/_vis_utils/_zarr/read.py +60 -22
- xradio/vis/_vis_utils/_zarr/write.py +83 -9
- xradio/vis/_vis_utils/ms.py +48 -29
- xradio/vis/_vis_utils/zarr.py +44 -20
- xradio/vis/convert_msv2_to_processing_set.py +106 -32
- xradio/vis/load_processing_set.py +38 -61
- xradio/vis/read_processing_set.py +62 -96
- xradio/vis/schema.py +687 -0
- xradio/vis/vis_io.py +75 -43
- {xradio-0.0.28.dist-info → xradio-0.0.29.dist-info}/LICENSE.txt +6 -1
- {xradio-0.0.28.dist-info → xradio-0.0.29.dist-info}/METADATA +10 -5
- xradio-0.0.29.dist-info/RECORD +73 -0
- {xradio-0.0.28.dist-info → xradio-0.0.29.dist-info}/WHEEL +1 -1
- xradio/vis/model.py +0 -497
- xradio-0.0.28.dist-info/RECORD +0 -71
- {xradio-0.0.28.dist-info → xradio-0.0.29.dist-info}/top_level.txt +0 -0
|
@@ -17,7 +17,7 @@ from .read import (
|
|
|
17
17
|
)
|
|
18
18
|
|
|
19
19
|
from .table_query import open_table_ro, open_query
|
|
20
|
-
from xradio.
|
|
20
|
+
from xradio._utils.array import (
|
|
21
21
|
unique_1d,
|
|
22
22
|
pairing_function,
|
|
23
23
|
inverse_pairing_function,
|
|
@@ -38,14 +38,22 @@ rename_msv2_cols = {
|
|
|
38
38
|
|
|
39
39
|
|
|
40
40
|
def rename_vars(mvars: Dict[str, xr.DataArray]) -> Dict[str, xr.DataArray]:
|
|
41
|
-
"""
|
|
41
|
+
"""
|
|
42
|
+
Apply rename rules. Also preserve ordering of data_vars
|
|
42
43
|
|
|
43
44
|
Note: not using xr.DataArray.rename because we have optional
|
|
44
45
|
column renames and rename complains if some of the names passed
|
|
45
46
|
are not present in the dataset
|
|
46
47
|
|
|
47
|
-
|
|
48
|
-
|
|
48
|
+
Parameters
|
|
49
|
+
----------
|
|
50
|
+
mvars : Dict[str, xr.DataArray]
|
|
51
|
+
dictionary of data_vars to be used to create an xr.Dataset
|
|
52
|
+
|
|
53
|
+
Returns
|
|
54
|
+
-------
|
|
55
|
+
Dict[str, xr.DataArray]
|
|
56
|
+
similar dictionary after applying MSv2 => MSv3/ngCASA renaming rules
|
|
49
57
|
"""
|
|
50
58
|
renamed = {
|
|
51
59
|
rename_msv2_cols[name] if name in rename_msv2_cols else name: var
|
|
@@ -62,8 +70,16 @@ def redim_id_data_vars(mvars: Dict[str, xr.DataArray]) -> Dict[str, xr.DataArray
|
|
|
62
70
|
The antenna id data vars:
|
|
63
71
|
From MS (antenna1_id(time, baseline), antenna2_id(time,baseline)
|
|
64
72
|
To cds (baseline_ant1_id(baseline), baseline_ant2_id(baseline)
|
|
65
|
-
|
|
66
|
-
|
|
73
|
+
|
|
74
|
+
Parameters
|
|
75
|
+
----------
|
|
76
|
+
mvars : Dict[str, xr.DataArray]
|
|
77
|
+
data variables being prepared for a partition xds
|
|
78
|
+
|
|
79
|
+
Returns
|
|
80
|
+
-------
|
|
81
|
+
Dict[str, xr.DataArray]
|
|
82
|
+
data variables with the ant id ones modified to cds type
|
|
67
83
|
"""
|
|
68
84
|
# Vars to drop baseline dim
|
|
69
85
|
var_names = [
|
|
@@ -74,23 +90,34 @@ def redim_id_data_vars(mvars: Dict[str, xr.DataArray]) -> Dict[str, xr.DataArray
|
|
|
74
90
|
"state_id",
|
|
75
91
|
]
|
|
76
92
|
for vname in var_names:
|
|
77
|
-
|
|
93
|
+
if "baseline" in mvars[vname].coords:
|
|
94
|
+
mvars[vname] = mvars[vname].sel(baseline=0, drop=True)
|
|
78
95
|
|
|
79
96
|
for idx in ["1", "2"]:
|
|
80
97
|
new_name = f"baseline_ant{idx}_id"
|
|
81
98
|
mvars[new_name] = mvars.pop(f"antenna{idx}_id")
|
|
82
|
-
|
|
99
|
+
if "time" in mvars[new_name].coords:
|
|
100
|
+
mvars[new_name] = mvars[new_name].sel(time=0, drop=True)
|
|
83
101
|
|
|
84
102
|
return mvars
|
|
85
103
|
|
|
86
104
|
|
|
87
105
|
def get_partition_ids(mtable: tables.table, taql_where: str) -> Dict:
|
|
88
|
-
"""
|
|
106
|
+
"""
|
|
107
|
+
Get some of the partition IDs that we have to retrieve from some
|
|
89
108
|
of the top level ID/sorting cols of the main table of the MS.
|
|
90
109
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
:
|
|
110
|
+
Parameters
|
|
111
|
+
----------
|
|
112
|
+
mtable : tables.table
|
|
113
|
+
MS main table
|
|
114
|
+
taql_where : str
|
|
115
|
+
where part that defines the partition in TaQL
|
|
116
|
+
|
|
117
|
+
Returns
|
|
118
|
+
-------
|
|
119
|
+
Dict
|
|
120
|
+
ids of array, observation, and processor
|
|
94
121
|
"""
|
|
95
122
|
|
|
96
123
|
taql_ids = f"select DISTINCT ARRAY_ID, OBSERVATION_ID, PROCESSOR_ID from $mtable {taql_where}"
|
|
@@ -130,6 +157,23 @@ def read_expanded_main_table(
|
|
|
130
157
|
This is the expanded version (time, baseline) dims.
|
|
131
158
|
|
|
132
159
|
Chunk tuple: (time, baseline, freq, pol)
|
|
160
|
+
|
|
161
|
+
Parameters
|
|
162
|
+
----------
|
|
163
|
+
infile : str
|
|
164
|
+
|
|
165
|
+
ddi : int (Default value = 0)
|
|
166
|
+
|
|
167
|
+
scan_state : Union[Tuple[int, int], None] (Default value = None)
|
|
168
|
+
|
|
169
|
+
ignore_msv2_cols: Union[list, None] (Default value = None)
|
|
170
|
+
|
|
171
|
+
chunks: Tuple[int, ...] (Default value = (400, 200, 100, 2))
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
Returns
|
|
175
|
+
-------
|
|
176
|
+
Tuple[xr.Dataset, Dict[str, Any], Dict[str, Any]]
|
|
133
177
|
"""
|
|
134
178
|
if ignore_msv2_cols is None:
|
|
135
179
|
ignore_msv2_cols = []
|
|
@@ -175,6 +219,23 @@ def read_main_table_chunks(
|
|
|
175
219
|
"""
|
|
176
220
|
Iterates through the time,baseline chunks and reads slices from
|
|
177
221
|
all the data columns.
|
|
222
|
+
|
|
223
|
+
Parameters
|
|
224
|
+
----------
|
|
225
|
+
infile : str
|
|
226
|
+
|
|
227
|
+
tb_tool : tables.table
|
|
228
|
+
|
|
229
|
+
taql_where : str
|
|
230
|
+
|
|
231
|
+
ignore_msv2_cols : Union[list, None] (Default value = None)
|
|
232
|
+
|
|
233
|
+
chunks: Tuple[int, ...] (Default value = (400, 200, 100, 2))
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
Returns
|
|
237
|
+
-------
|
|
238
|
+
Tuple[xr.Dataset, Dict[str, Any]]
|
|
178
239
|
"""
|
|
179
240
|
baselines = get_baselines(tb_tool)
|
|
180
241
|
|
|
@@ -246,6 +307,7 @@ def read_main_table_chunks(
|
|
|
246
307
|
|
|
247
308
|
dims = ["time", "baseline", "freq", "pol"]
|
|
248
309
|
mvars = concat_tvars_to_mvars(dims, tvars, pol_cnt, chan_cnt)
|
|
310
|
+
|
|
249
311
|
mcoords = {
|
|
250
312
|
"time": xr.DataArray(convert_casacore_time(unique_times), dims=["time"]),
|
|
251
313
|
"baseline": xr.DataArray(np.arange(n_baselines), dims=["baseline"]),
|
|
@@ -279,16 +341,21 @@ def get_utimes_tol(mtable: tables.table, taql_where: str) -> Tuple[np.ndarray, f
|
|
|
279
341
|
|
|
280
342
|
|
|
281
343
|
def get_baselines(tb_tool: tables.table) -> np.ndarray:
|
|
282
|
-
"""
|
|
344
|
+
"""
|
|
345
|
+
Gets the unique baselines from antenna 1 and antenna 2 ids.
|
|
283
346
|
|
|
284
347
|
Uses a pairing function and inverse pairing function to decrease the
|
|
285
348
|
computation time of finding unique values.
|
|
286
349
|
|
|
287
|
-
|
|
288
|
-
|
|
350
|
+
Parameters
|
|
351
|
+
----------
|
|
352
|
+
tb_tool : tables.table
|
|
353
|
+
MeasurementSet table to get the antenna ids.
|
|
289
354
|
|
|
290
|
-
Returns
|
|
291
|
-
|
|
355
|
+
Returns
|
|
356
|
+
-------
|
|
357
|
+
unique_baselines : np.ndarray
|
|
358
|
+
a 2D array of unique antenna pairs
|
|
292
359
|
(baselines) from the MeasurementSet table provided.
|
|
293
360
|
"""
|
|
294
361
|
ant1, ant2 = tb_tool.getcol("ANTENNA1", 0, -1), tb_tool.getcol("ANTENNA2", 0, -1)
|
|
@@ -312,19 +379,23 @@ def get_baselines(tb_tool: tables.table) -> np.ndarray:
|
|
|
312
379
|
def get_baseline_indices(
|
|
313
380
|
unique_baselines: np.ndarray, baseline_set: np.ndarray
|
|
314
381
|
) -> np.ndarray:
|
|
315
|
-
"""
|
|
382
|
+
"""
|
|
383
|
+
Finds the baseline indices of a set of baselines using the unique baselines.
|
|
316
384
|
|
|
317
385
|
Uses a pairing function to reduce the number of values so it's more
|
|
318
386
|
efficient to find the indices.
|
|
319
387
|
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
388
|
+
Parameters
|
|
389
|
+
----------
|
|
390
|
+
unique_baselines : np.ndarray
|
|
391
|
+
a 2D array of unique antenna pairs (baselines).
|
|
392
|
+
baseline_set : np.ndarray
|
|
393
|
+
a 2D array of antenna pairs (baselines). This array may contain duplicates.
|
|
394
|
+
|
|
395
|
+
Returns
|
|
396
|
+
-------
|
|
397
|
+
baseline_indices : np.ndarray
|
|
398
|
+
the indices of the baseline set that
|
|
328
399
|
correspond to the unique baselines.
|
|
329
400
|
"""
|
|
330
401
|
unique_baselines_paired = pairing_function(unique_baselines)
|
|
@@ -345,12 +416,31 @@ def read_all_cols_bvars(
|
|
|
345
416
|
tb_tool: tables.table,
|
|
346
417
|
chunks: Tuple[int, ...],
|
|
347
418
|
chan_cnt: int,
|
|
348
|
-
ignore_msv2_cols,
|
|
419
|
+
ignore_msv2_cols: bool,
|
|
349
420
|
delayed_params: Tuple,
|
|
350
421
|
bvars: Dict[str, xr.DataArray],
|
|
351
422
|
) -> None:
|
|
352
423
|
"""
|
|
353
424
|
Loops over each column and create delayed dask arrays
|
|
425
|
+
|
|
426
|
+
Parameters
|
|
427
|
+
----------
|
|
428
|
+
tb_tool : tables.table
|
|
429
|
+
|
|
430
|
+
chunks : Tuple[int, ...]
|
|
431
|
+
|
|
432
|
+
chan_cnt : int
|
|
433
|
+
|
|
434
|
+
ignore_msv2_cols : bool
|
|
435
|
+
|
|
436
|
+
delayed_params : Tuple
|
|
437
|
+
|
|
438
|
+
bvars : Dict[str, xr.DataArray]
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
Returns
|
|
442
|
+
-------
|
|
443
|
+
|
|
354
444
|
"""
|
|
355
445
|
|
|
356
446
|
col_names = tb_tool.colnames()
|
|
@@ -430,6 +520,17 @@ def concat_bvars_update_tvars(
|
|
|
430
520
|
concats all the dask chunks from each baseline. This is intended to
|
|
431
521
|
be called iteratively, for every time chunk iteration, once all the
|
|
432
522
|
baseline chunks have been read.
|
|
523
|
+
|
|
524
|
+
Parameters
|
|
525
|
+
----------
|
|
526
|
+
bvars: Dict[str, xr.DataArray]
|
|
527
|
+
|
|
528
|
+
tvars: Dict[str, xr.DataArray]
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
Returns
|
|
532
|
+
-------
|
|
533
|
+
|
|
433
534
|
"""
|
|
434
535
|
for kk in bvars.keys():
|
|
435
536
|
if len(bvars[kk]) == 0:
|
|
@@ -446,12 +547,21 @@ def concat_tvars_to_mvars(
|
|
|
446
547
|
Concat into a single dask array all the dask arrays from each time
|
|
447
548
|
chunk to make the final arrays of the xds.
|
|
448
549
|
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
550
|
+
Parameters
|
|
551
|
+
----------
|
|
552
|
+
dims : List[str]
|
|
553
|
+
dimension names
|
|
554
|
+
tvars : Dict[str, xr.DataArray]
|
|
555
|
+
variables as lists of dask arrays per time chunk
|
|
556
|
+
pol_cnt : int
|
|
557
|
+
len of pol axis/dim
|
|
558
|
+
chan_cnt : int
|
|
559
|
+
len of freq axis/dim (chan indices)
|
|
560
|
+
|
|
561
|
+
Returns
|
|
562
|
+
-------
|
|
563
|
+
Dict[str, xr.DataArray]
|
|
564
|
+
variables as concated dask arrays
|
|
455
565
|
"""
|
|
456
566
|
|
|
457
567
|
mvars = {}
|
|
@@ -494,19 +604,38 @@ def read_flat_main_table(
|
|
|
494
604
|
features may be missing and/or flaky.
|
|
495
605
|
|
|
496
606
|
Chunk tuple: (row, freq, pol)
|
|
607
|
+
|
|
608
|
+
Parameters
|
|
609
|
+
----------
|
|
610
|
+
infile : str
|
|
611
|
+
|
|
612
|
+
ddi : Union[int, None] (Default value = None)
|
|
613
|
+
|
|
614
|
+
scan_state : Union[Tuple[int, int], None] (Default value = None)
|
|
615
|
+
|
|
616
|
+
rowidxs : np.ndarray (Default value = None)
|
|
617
|
+
|
|
618
|
+
ignore_msv2_cols : Union[List, None] (Default value = None)
|
|
619
|
+
|
|
620
|
+
chunks : Tuple[int, ...] (Default value = (22000, 512, 2))
|
|
621
|
+
|
|
622
|
+
Returns
|
|
623
|
+
-------
|
|
624
|
+
Tuple[xr.Dataset, Dict[str, Any], Dict[str, Any]]
|
|
497
625
|
"""
|
|
498
626
|
taql_where = f"where DATA_DESC_ID = {ddi}"
|
|
499
627
|
if scan_state:
|
|
500
628
|
# TODO: support additional intent/scan/subscan conditions if
|
|
501
629
|
# we keep this read_flat functionality
|
|
502
|
-
|
|
630
|
+
scans, states = scan_state
|
|
503
631
|
# get row indices relative to full main table
|
|
504
|
-
if states:
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
632
|
+
if type(states) == np.ndarray:
|
|
633
|
+
state_ids_or = " OR STATE_ID = ".join(np.char.mod("%d", states))
|
|
634
|
+
taql_where += f" AND (STATE_ID = {state_ids_or})"
|
|
635
|
+
elif states is not None:
|
|
636
|
+
taql_where += f" AND STATE_ID = {states}"
|
|
637
|
+
elif scans is not None:
|
|
638
|
+
taql_where += f" AND SCAN_NUMBER = {scans}"
|
|
510
639
|
|
|
511
640
|
mtable = tables.table(
|
|
512
641
|
infile, readonly=True, lockoptions={"option": "usernoread"}, ack=False
|
|
@@ -517,11 +646,11 @@ def read_flat_main_table(
|
|
|
517
646
|
taql_rowid = f"select rowid() as ROWS from $mtable {taql_where}"
|
|
518
647
|
with open_query(mtable, taql_rowid) as query_rows:
|
|
519
648
|
rowidxs = query_rows.getcol("ROWS")
|
|
520
|
-
mtable.close()
|
|
521
649
|
|
|
522
650
|
nrows = len(rowidxs)
|
|
523
651
|
if nrows == 0:
|
|
524
|
-
|
|
652
|
+
mtable.close()
|
|
653
|
+
return xr.Dataset(), {}, {}
|
|
525
654
|
|
|
526
655
|
part_ids = get_partition_ids(mtable, taql_where)
|
|
527
656
|
|
|
@@ -539,7 +668,7 @@ def read_flat_main_table(
|
|
|
539
668
|
(col, query_cols.getcol(col, 0, 1))
|
|
540
669
|
for col in cols
|
|
541
670
|
if (col not in ignore)
|
|
542
|
-
and (ignore_msv2_cols and
|
|
671
|
+
and not (ignore_msv2_cols and col in ignore_msv2_cols)
|
|
543
672
|
]
|
|
544
673
|
)
|
|
545
674
|
chan_cnt, pol_cnt = [
|
|
@@ -637,7 +766,7 @@ def read_flat_main_table(
|
|
|
637
766
|
)
|
|
638
767
|
|
|
639
768
|
mvars["time"] = xr.DataArray(
|
|
640
|
-
convert_casacore_time(mvars["
|
|
769
|
+
convert_casacore_time(mvars["time"].values), dims=["row"]
|
|
641
770
|
).chunk({"row": chunks[0]})
|
|
642
771
|
|
|
643
772
|
# add xds global attributes
|
|
@@ -19,7 +19,7 @@ from .read import (
|
|
|
19
19
|
read_generic_table,
|
|
20
20
|
)
|
|
21
21
|
from .write import revert_time
|
|
22
|
-
from xradio.
|
|
22
|
+
from xradio._utils.array import unique_1d
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
def read_ephemerides(
|
|
@@ -28,8 +28,15 @@ def read_ephemerides(
|
|
|
28
28
|
"""
|
|
29
29
|
Read ephemerides info from MSv2 FIELD/EPHEMi_....tab subtables
|
|
30
30
|
|
|
31
|
-
|
|
32
|
-
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
infile : str
|
|
34
|
+
path to MS
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
Union[xr.Dataset, None]
|
|
39
|
+
ephemerides xds with metainfo as in the MSv3/EPHEMERIDES subtable
|
|
33
40
|
"""
|
|
34
41
|
field_subt = Path(infile, "FIELD")
|
|
35
42
|
subdirs = [
|
|
@@ -42,7 +49,7 @@ def read_ephemerides(
|
|
|
42
49
|
logger.debug(f"Reading ephemerides info from: FIELD / {sdir.name}")
|
|
43
50
|
# One "EPHEM_*.tab" (each with a difference ephemeris_id) to concatenate
|
|
44
51
|
ephem.append(
|
|
45
|
-
read_generic_table(infile, str(Path(
|
|
52
|
+
read_generic_table(infile, str(Path(*sdir.parts[-2:])), timecols=["MJD"])
|
|
46
53
|
)
|
|
47
54
|
|
|
48
55
|
if ephem:
|
|
@@ -62,9 +69,21 @@ def read_delayed_pointing_table(
|
|
|
62
69
|
"""
|
|
63
70
|
Read MS pointing subtable in delayed arrays into an xr.Dataset
|
|
64
71
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
72
|
+
Parameters
|
|
73
|
+
----------
|
|
74
|
+
infile : str
|
|
75
|
+
path to pointing table
|
|
76
|
+
rename_ids : Dict[str, str] (Default value = None)
|
|
77
|
+
dict with dimension renaming mapping
|
|
78
|
+
chunks : Tuple (Default value = (10000, 100, 2, 20))
|
|
79
|
+
chunks for the arrays. Chunks tuple: time, antenna, data_vars_dim_1, data_vars_dim_2
|
|
80
|
+
time_slice: slice
|
|
81
|
+
time bounds
|
|
82
|
+
|
|
83
|
+
Returns
|
|
84
|
+
-------
|
|
85
|
+
xr.Dataset
|
|
86
|
+
pointing dataset
|
|
68
87
|
"""
|
|
69
88
|
|
|
70
89
|
with open_table_ro(infile) as mtable:
|
|
@@ -124,18 +143,25 @@ def read_delayed_pointing_table(
|
|
|
124
143
|
return xds
|
|
125
144
|
|
|
126
145
|
|
|
127
|
-
def normalize_time_slice(mtable: tables.table, time_slice: slice):
|
|
146
|
+
def normalize_time_slice(mtable: tables.table, time_slice: slice) -> slice:
|
|
128
147
|
"""
|
|
129
148
|
If we get indices, produce the TIME column time value for the
|
|
130
149
|
start/top indices. If we get timestamps, convert them to casacore
|
|
131
150
|
refeference.
|
|
132
151
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
:
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
mtable : tables.table
|
|
155
|
+
a casacore table from which we are reading a TIME
|
|
156
|
+
column
|
|
157
|
+
time_slice : slice
|
|
158
|
+
slice giving start/stop time. Can be given as
|
|
159
|
+
integer indices or as timestamps (Xarray / pandas reference)
|
|
160
|
+
|
|
161
|
+
Returns
|
|
162
|
+
-------
|
|
163
|
+
slice
|
|
164
|
+
a (start, stop) slice with times in casacore ref frame
|
|
139
165
|
"""
|
|
140
166
|
if type(time_slice.start) == pd.Timestamp and type(time_slice.stop) == pd.Timestamp:
|
|
141
167
|
# Add tol?
|
|
@@ -189,12 +215,25 @@ def read_delayed_pointing_times(
|
|
|
189
215
|
Read pointing table in delayed time / antenna chunks. Loops over
|
|
190
216
|
time chunks
|
|
191
217
|
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
infile : str
|
|
221
|
+
path to pointing table
|
|
222
|
+
antennas : np.ndarray
|
|
223
|
+
antenna ids
|
|
224
|
+
utimes : np.ndarray
|
|
225
|
+
unique times from table
|
|
226
|
+
chunks : tuple
|
|
227
|
+
chunks for the arrays
|
|
228
|
+
query_all : tables.table
|
|
229
|
+
table to read columns
|
|
230
|
+
time_slice: slice :
|
|
231
|
+
time bounds
|
|
232
|
+
|
|
233
|
+
Returns
|
|
234
|
+
-------
|
|
235
|
+
Dict[str, xr.DataArray]
|
|
236
|
+
dictionary of columns=>variables (read as dask.delayed)
|
|
198
237
|
"""
|
|
199
238
|
|
|
200
239
|
antenna_chunks = range(0, len(antennas), chunks[1])
|
|
@@ -232,7 +271,7 @@ def read_delayed_pointing_chunks(
|
|
|
232
271
|
infile: str,
|
|
233
272
|
antennas: np.ndarray,
|
|
234
273
|
chunks: tuple,
|
|
235
|
-
utimes: np.
|
|
274
|
+
utimes: np.ndarray,
|
|
236
275
|
tc: int,
|
|
237
276
|
tb_tool: tables.table,
|
|
238
277
|
) -> Dict[str, xr.DataArray]:
|
|
@@ -240,13 +279,25 @@ def read_delayed_pointing_chunks(
|
|
|
240
279
|
For one time chunk, read the baseline/antenna chunks. Loops over
|
|
241
280
|
antenna_id chunks and reads all columns as dask.delayed calls.
|
|
242
281
|
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
:
|
|
282
|
+
Parameters
|
|
283
|
+
----------
|
|
284
|
+
infile : str
|
|
285
|
+
path to pointing table
|
|
286
|
+
antennas : np.ndarray
|
|
287
|
+
antenna ids
|
|
288
|
+
chunks : tuple
|
|
289
|
+
chunks for the arrays
|
|
290
|
+
utimes : np.ndarray
|
|
291
|
+
unique times from table
|
|
292
|
+
tc : int
|
|
293
|
+
time index
|
|
294
|
+
tb_tool : tables.table
|
|
295
|
+
table to read columns
|
|
296
|
+
|
|
297
|
+
Returns
|
|
298
|
+
-------
|
|
299
|
+
Dict[str, xr.DataArray]
|
|
300
|
+
dictionary of columns=>variables (read as dask.delayed)
|
|
250
301
|
"""
|
|
251
302
|
|
|
252
303
|
# add a tol around the time ranges returned by taql, for the next taql queries
|