pypff 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pypff/__init__.py ADDED
@@ -0,0 +1,8 @@
1
+ from .io import datapff
2
+ from .io2 import PanosetiRun, PFFSequence, hkpff, qconfig
3
+ from .models import QuaboHeader, ModuleHeader, PFFHeader, FrameConfig
4
+ from . import pixelmap
5
+ from .pixelmap_maroc2phys_bga import maroc2phys_bga
6
+ from .pixelmap_maroc2phys_qfp import maroc2phys_qfp
7
+ from .pixelmap_phys2maroc_bga import phys2maroc_bga
8
+ from .pixelmap_phys2maroc_qfp import phys2maroc_qfp
pypff/_cli/__init__.py ADDED
File without changes
pypff/_cli/profile.py ADDED
@@ -0,0 +1,54 @@
1
+ import typer
2
+ from pathlib import Path
3
+ from typing import Annotated, Optional
4
+ from pypff.profiling import Profiler
5
+ from rich.console import Console
6
+
7
+ app = typer.Typer()
8
+
9
+ @app.command()
10
+ def run(
11
+ run_dir: Annotated[Path, typer.Argument(help="Path to the .pffd run directory.")],
12
+ products: Annotated[Optional[list[str]], typer.Option("--product", "-p", help="Specific products to test. Defaults to all.")] = None,
13
+ n_frames: Annotated[int, typer.Option("--frames", "-n", help="Number of frames to test per product.")] = 1000,
14
+ step: Annotated[int, typer.Option("--step", "-s", help="Step size for strided read test.")] = 10,
15
+ random_samples: Annotated[int, typer.Option("--random", "-r", help="Number of random access samples.")] = 100,
16
+ ):
17
+ """Run performance benchmarks on a PanoSETI run."""
18
+ console = Console()
19
+
20
+ if not run_dir.exists():
21
+ console.print(f"[red]❌ Run directory {run_dir} does not exist.[/]")
22
+ raise typer.Exit(1)
23
+
24
+ profiler = Profiler(run_dir)
25
+ target_products = products or profiler.run.list_products()
26
+
27
+ if not target_products:
28
+ console.print("[yellow]⚠️ No data products found in the directory.[/]")
29
+ return
30
+
31
+ with console.status("[bold green]Running benchmarks...[/]"):
32
+ for prod in target_products:
33
+ try:
34
+ # 1. Sequential Read
35
+ profiler.profile_sequential_read(prod, n_frames=n_frames)
36
+
37
+ # 2. Bulk Read
38
+ profiler.profile_bulk_read(prod, n_frames=n_frames)
39
+
40
+ # 3. Strided Read
41
+ profiler.profile_strided_read(prod, step=step, n_frames=n_frames // step)
42
+
43
+ # 4. Random Access
44
+ profiler.profile_random_access(prod, n_samples=random_samples)
45
+
46
+ # 5. Metadata
47
+ profiler.profile_metadata_extraction(prod)
48
+ except Exception as e:
49
+ console.print(f"[red]Error profiling {prod}: {e}[/]")
50
+
51
+ profiler.display_results()
52
+
53
+ if __name__ == "__main__":
54
+ app()
pypff/_cli/root.py ADDED
@@ -0,0 +1,19 @@
1
+ import typer
2
+ from pathlib import Path
3
+ from typing import Annotated
4
+ from pypff import PanosetiRun
5
+
6
+ app = typer.Typer()
7
+
8
+ @app.command()
9
+ def show(
10
+ run_dir: Annotated[Path, typer.Argument(help="Path to the .pffd run directory.")],
11
+ details: Annotated[bool, typer.Option("--details", "-d", help="Show individual PFF files.")] = False,
12
+ ):
13
+ """Explore the structure of a PanoSETI run."""
14
+ if not run_dir.exists():
15
+ print(f"❌ Run directory {run_dir} does not exist.")
16
+ raise typer.Exit(1)
17
+
18
+ run = PanosetiRun(run_dir)
19
+ run.show(details=details)
pypff/_cli/test.py ADDED
@@ -0,0 +1,57 @@
1
+ import typer
2
+ import subprocess
3
+ import sys
4
+ from typing import Annotated
5
+
6
+ app = typer.Typer(help="Run pypff test suite.")
7
+
8
+ @app.command()
9
+ def all(
10
+ lint: Annotated[bool, typer.Option("--lint", help="Run linters (Ruff/MyPy)")] = False,
11
+ cov: Annotated[bool, typer.Option("--cov", help="Enable coverage reporting")] = False,
12
+ ):
13
+ """Run all pypff tests (Unit + Logic + Legacy)."""
14
+ if lint:
15
+ run_lint()
16
+
17
+ cov_args = ["--cov=src/pypff", "--cov-report=term-missing"] if cov else []
18
+
19
+ print("Running Tier 1 (Unit) tests...")
20
+ subprocess.run([sys.executable, "-m", "pytest"] + cov_args + ["src/ci/tier1_unit"], check=True)
21
+
22
+ if cov:
23
+ cov_args.append("--cov-append")
24
+
25
+ print("Running Tier 2 (Logic) tests...")
26
+ subprocess.run([sys.executable, "-m", "pytest"] + cov_args + ["src/ci/tier2_logic"], check=True)
27
+
28
+ print("Running Legacy Integration tests...")
29
+ subprocess.run([sys.executable, "-m", "pytest"] + cov_args + ["src/ci/legacy_tests"], check=True)
30
+
31
+ @app.command()
32
+ def unit():
33
+ """Run Tier 1 (Unit) tests."""
34
+ print("Running Tier 1 (Unit) tests...")
35
+ subprocess.run([sys.executable, "-m", "pytest", "src/ci/tier1_unit"], check=True)
36
+
37
+ @app.command()
38
+ def logic():
39
+ """Run Tier 2 (Logic) tests."""
40
+ print("Running Tier 2 (Logic) tests...")
41
+ subprocess.run([sys.executable, "-m", "pytest", "src/ci/tier2_logic"], check=True)
42
+
43
+ @app.command()
44
+ def legacy():
45
+ """Run Legacy Integration tests."""
46
+ print("Running Legacy Integration tests...")
47
+ subprocess.run([sys.executable, "-m", "pytest", "src/ci/legacy_tests"], check=True)
48
+
49
+ @app.command()
50
+ def lint():
51
+ """Run linters (Ruff/MyPy)."""
52
+ run_lint()
53
+
54
+ def run_lint():
55
+ print("Running linters...")
56
+ subprocess.run([sys.executable, "-m", "ruff", "check", "."], check=True)
57
+ subprocess.run([sys.executable, "-m", "mypy", "src"], check=True)
pypff/_cli/zarr.py ADDED
@@ -0,0 +1,46 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from typing import Annotated
5
+
6
+ import typer
7
+
8
+ app = typer.Typer(help="PFF → Zarr v3 conversion tools.")
9
+
10
+
11
+ @app.command()
12
+ def convert(
13
+ obs_dir: Annotated[Path, typer.Argument(help="Input .pffd observation directory")],
14
+ out_dir: Annotated[Path, typer.Argument(help="Output directory for .zarr stores")],
15
+ codec: Annotated[str, typer.Option(help="Compression codec: zstd, blosc-lz4, gzip, none")] = "zstd",
16
+ level: Annotated[int, typer.Option(help="Compression level (codec-specific)")] = 3,
17
+ time_chunk: Annotated[
18
+ int, typer.Option(help="Frames per time chunk (0 = auto-size to ~8 MB)")
19
+ ] = 0,
20
+ ) -> None:
21
+ """Convert all data products in a .pffd observation directory to Zarr v3 stores."""
22
+ from pypff.io2 import PanosetiRun
23
+ from pypff.zarr import convert_run
24
+
25
+ if not obs_dir.exists():
26
+ typer.echo(f"Error: {obs_dir} does not exist", err=True)
27
+ raise typer.Exit(1)
28
+
29
+ run = PanosetiRun(obs_dir)
30
+ products = run.list_products()
31
+ if not products:
32
+ typer.echo(f"No data products found in {obs_dir}", err=True)
33
+ raise typer.Exit(1)
34
+
35
+ typer.echo(f"Found {len(products)} product(s) in {obs_dir.name}:")
36
+ for p in products:
37
+ seq = run.get_product(p)
38
+ typer.echo(f" {p}: {len(seq):,} frames")
39
+
40
+ chunk = time_chunk if time_chunk > 0 else None
41
+ stores = convert_run(run, out_dir, codec=codec, level=level, time_chunk=chunk)
42
+
43
+ typer.echo(f"\nWrote {len(stores)} Zarr store(s) to {out_dir}:")
44
+ for s in stores:
45
+ size_mb = sum(f.stat().st_size for f in s.rglob("*") if f.is_file()) / 1024**2
46
+ typer.echo(f" {s.name} ({size_mb:.1f} MB)")
pypff/cli.py ADDED
@@ -0,0 +1,55 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from typing import Annotated, Any
5
+
6
+ import typer
7
+
8
+ # Local Imports
9
+ from .util.cli import BaseLazyGroup, display_tree_callback
10
+
11
+
12
+ class PypffLazyGroup(BaseLazyGroup):
13
+ """
14
+ Custom Click Group that lazy-loads commands from other modules.
15
+ Ensures that heavy dependencies (like NumPy or Rich) aren't loaded
16
+ until a specific command is actually executed.
17
+ """
18
+
19
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
20
+ lazy_mapping = {
21
+ "show": ("pypff._cli.root", "show", "Explore the structure of a PanoSETI run."),
22
+ "test": ("pypff._cli.test", "app", "Run pypff test suite."),
23
+ "profile": ("pypff._cli.profile", "app", "Run performance benchmarks on a PanoSETI run."),
24
+ "zarr": ("pypff._cli.zarr", "app", "PFF → Zarr v3 conversion tools."),
25
+ }
26
+ super().__init__(*args, lazy_mapping=lazy_mapping, **kwargs)
27
+
28
+
29
+ app = typer.Typer(
30
+ cls=PypffLazyGroup,
31
+ help="Pypff Management CLI",
32
+ context_settings={"help_option_names": ["-h", "--help"]},
33
+ )
34
+
35
+
36
+ @app.callback(invoke_without_command=True)
37
+ def main(
38
+ ctx: typer.Context,
39
+ tree: Annotated[
40
+ bool,
41
+ typer.Option(
42
+ "--tree",
43
+ "-t",
44
+ help="Display the command tree and exit.",
45
+ callback=display_tree_callback,
46
+ ),
47
+ ] = False,
48
+ ) -> None:
49
+ """PYPFF I/O CLI."""
50
+ if ctx.invoked_subcommand is None and not tree:
51
+ print(ctx.get_help())
52
+
53
+
54
+ if __name__ == "__main__":
55
+ app()
pypff/io.py ADDED
@@ -0,0 +1,336 @@
1
+ '''
2
+ This module provides methods to reading pff data file, including img16, img8, ph256, ph1024 and hk.pff
3
+ '''
4
+ import json
5
+ import datetime
6
+ import numpy as np
7
+ import mmap
8
+ from glob import glob
9
+ from . import pixelmap
10
+
11
+ MOBO_DIM = 16
12
+ QUABO_DIM = 32
13
+
14
+ # The metadata loc in the data is hard-coded here.
15
+ loc_arr = np.zeros(2, dtype=object)
16
+ # metadata loc for ph256
17
+ # metadata example
18
+ '''
19
+ b'{ "quabo_num": 0, "pkt_num": 32280, "pkt_tai": 398, "pkt_nsec": 723300414, "tv_sec": 1690934633, "tv_usec": 720082}\n'
20
+ '''
21
+ loc_arr[0] = {
22
+ 'quabo_num' : [14, 16],
23
+ 'pkt_num' : [28, 39],
24
+ 'pkt_tai' : [51, 56],
25
+ 'pkt_nsec' : [69, 79],
26
+ 'tv_sec' : [90, 101],
27
+ 'tv_usec' : [113, 120]
28
+ }
29
+ # metadata loc for ph1024, img16 and img8
30
+ # metadata example
31
+ '''
32
+ '{\n
33
+ "quabo_0": { "pkt_num": 23855, "pkt_tai": 906, "pkt_nsec": 774507484, "tv_sec": 1691048805, "tv_usec": 778782}, \n
34
+ "quabo_1": { "pkt_num": 16262, "pkt_tai": 906, "pkt_nsec": 774507492, "tv_sec": 1691048805, "tv_usec": 778789}, \n
35
+ "quabo_2": { "pkt_num": 9069, "pkt_tai": 906, "pkt_nsec": 774507484, "tv_sec": 1691048805, "tv_usec": 778800}, \n
36
+ "quabo_3": { "pkt_num": 1234, "pkt_tai": 906, "pkt_nsec": 774507484, "tv_sec": 1691048805, "tv_usec": 778804}
37
+ \n}\n'
38
+ '''
39
+ loc_arr[1] = {
40
+ 'quabo_0':
41
+ {
42
+ 'pkt_num': [28 ,39],
43
+ 'pkt_tai': [51 ,56],
44
+ 'pkt_nsec': [69, 79],
45
+ 'tv_sec': [90, 101],
46
+ 'tv_usec': [113, 120]
47
+ },
48
+ 'quabo_1':
49
+ {
50
+ 'pkt_num': [150 ,161],
51
+ 'pkt_tai': [173 ,178],
52
+ 'pkt_nsec': [191, 201],
53
+ 'tv_sec': [212, 223],
54
+ 'tv_usec': [235, 242]
55
+ },
56
+ 'quabo_2':
57
+ {
58
+ 'pkt_num': [272 ,283],
59
+ 'pkt_tai': [295 ,300],
60
+ 'pkt_nsec': [313, 323],
61
+ 'tv_sec': [334, 345],
62
+ 'tv_usec': [357, 364]
63
+ },
64
+ 'quabo_3':
65
+ {
66
+ 'pkt_num': [394 ,405],
67
+ 'pkt_tai': [417 ,422],
68
+ 'pkt_nsec': [435, 445],
69
+ 'tv_sec': [456, 467],
70
+ 'tv_usec': [479, 486]
71
+ }
72
+ }
73
+ md_loc = {
74
+ 'ph256': loc_arr[0],
75
+ 'ph1024': loc_arr[1],
76
+ 'img16': loc_arr[1],
77
+ 'img8': loc_arr[1]
78
+ }
79
+ # generate dict template
80
+ #
81
+ def _gen_dict_template(d):
82
+ template = {}
83
+ for k in d:
84
+ # chagne TEMP1 to DET_TEMP, and change TEMP1 to FPGA_TEMP
85
+ if k == 'TEMP1':
86
+ k = 'DET_TEMP'
87
+ if k == 'TEMP2':
88
+ k = 'FPGA_TEMP'
89
+ template[k] = []
90
+ return template
91
+
92
+ class hkpff(object):
93
+ '''
94
+ Description:
95
+ The hkpff class reads hk.pff, and returns a dict, including housekeeping of quabo, wrs, wps and gps
96
+ '''
97
+ def __init__(self,fn='hk.pff'):
98
+ '''
99
+ Description:
100
+ Create a hkpff object based on the filename.
101
+ Input:
102
+ -- fn(str): file name of a hk.pff
103
+ '''
104
+ self.fn = fn
105
+ self.hk_info = {}
106
+
107
+ def readhk(self):
108
+ '''
109
+ Description:
110
+ Read hk.pff, and convert the info to a dict.
111
+ Output:
112
+ -- hk_info(dict): a dict contains all of the hk info.
113
+ '''
114
+ with open(self.fn, 'rb') as f:
115
+ hk_lines = f.readlines()
116
+ for hk_str in hk_lines:
117
+ try:
118
+ hk = json.loads(hk_str)
119
+ except:
120
+ continue
121
+ key, = hk.keys()
122
+ # check if the key is already in the hk_info
123
+ if(not key in self.hk_info):
124
+ template = _gen_dict_template(hk[key])
125
+ self.hk_info[key] = template
126
+ for k,v in hk[key].items():
127
+ # chagne TEMP1 to DET_TEMP, and change TEMP1 to FPGA_TEMP
128
+ if k == 'TEMP1':
129
+ k = 'DET_TEMP'
130
+ if k == 'TEMP2':
131
+ k = 'FPGA_TEMP'
132
+ try:
133
+ # if the type of value is int
134
+ self.hk_info[key][k].append(int(v))
135
+ except:
136
+ try:
137
+ # if the type of value is float
138
+ self.hk_info[key][k].append(float(v))
139
+ except:
140
+ self.hk_info[key][k].append(v)
141
+ return self.hk_info
142
+
143
+
144
+ class datapff(object):
145
+ '''
146
+ Description:
147
+ The datapff class reads all kinds of data files, including img16, img8, ph256, ph1024.
148
+ '''
149
+
150
+ def __init__(self, fn):
151
+ '''
152
+ Description:
153
+ Read data from a data pff file.
154
+ Input:
155
+ -- fn(str): pff file name.
156
+ '''
157
+ self.fn = fn
158
+ fn_str = fn.split('/')[-1]
159
+ info = fn_str.split('.')
160
+ stringIndex = 0
161
+ if len(info[0]) != 0:
162
+ stringIndex = 0
163
+ else:
164
+ stringIndex = 1
165
+ startdt_str = info[stringIndex].split('_')[1]
166
+ stringIndex += 1
167
+ # It looks like we have two formats of file name
168
+ try:
169
+ self.startdt = datetime.datetime.strptime(startdt_str, '%Y-%m-%dT%H:%M:%SZ')
170
+ except:
171
+ # macos
172
+ self.startdt = datetime.datetime.strptime(startdt_str, '%Y-%m-%dT%H-%M-%SZ')
173
+ self.dp = info[stringIndex].split('_')[1]
174
+ stringIndex += 1
175
+ self.bpp = int(info[stringIndex].split('_')[1])
176
+ stringIndex += 1
177
+ self.module = int(info[stringIndex].split('_')[1])
178
+ stringIndex += 1
179
+ self.seqno = int(info[stringIndex].split('_')[1])
180
+ if self.dp == 'ph256':
181
+ self._md_size = 124
182
+ self._pixels = 256
183
+ self._d_size = self._pixels * self.bpp
184
+ self.datasize = self._md_size + self._d_size
185
+ else:
186
+ # TODO: check if the metadata size for ph1024/img16/img8 is the same
187
+ self._md_size = 492
188
+ self._pixels = 1024
189
+ self._d_size = self._pixels * self.bpp
190
+ self.datasize = self._md_size + self._d_size
191
+ if self.dp == 'ph256' or self.dp == 'ph1024':
192
+ self.dtype = np.int16
193
+ elif self.dp == 'img16':
194
+ self.dtype = np.uint16
195
+ else:
196
+ self.dtype = np.uint8
197
+ self.metadata = {}
198
+
199
+ def readpff(self, samples=-1, skip = 0, pixel = -1, ver='qfb', metadata=False, mode='mmap'):
200
+ '''
201
+ Description:
202
+ Read data from a data pff file.
203
+ Inputs:
204
+ -- samples(int): The sample number to be read out.
205
+ If it's -1, all of the data will be read out.
206
+ Default = -1
207
+ -- skip(int): Skip the number of smaples.
208
+ Default = 0
209
+ -- pixel(int): select the pixel.
210
+ If it's -1, we will get the data of all the channels.
211
+ Default = -1
212
+ -- quabo(int): It specifies the quabo number on the mobo.
213
+ Default = 0
214
+ -- ver(str): quabo version.
215
+ Default = 'qfp'
216
+ -- metadata(bool): If True, read Medatadata out.
217
+ Default = False
218
+ -- mode(str): reading mode. 'mmap' or 'read'.
219
+ Default = 'mmap'
220
+
221
+ Outputs:
222
+ -- metadata(dict): a dict contains the metadata from each sample.
223
+ -- data(np.array): data array.
224
+ '''
225
+ # get metadata location, which is hard-coded above
226
+ metadata_loc = md_loc[self.dp]
227
+ # read data out from a ph256, img16 or ph1024 file
228
+ with open(self.fn,'rb') as f:
229
+ if mode == 'mmap':
230
+ mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
231
+ if samples == -1:
232
+ tmp = np.frombuffer(mm,dtype = self.dtype)
233
+ else:
234
+ tmp = np.frombuffer(mm, dtype=self.dtype, count=samples*int(self.datasize/self.bpp), offset=0)
235
+ elif mode == 'read':
236
+ if samples == -1:
237
+ tmp = np.frombuffer(f.read(),dtype = self.dtype)
238
+ else:
239
+ tmp = np.frombuffer(f.read(samples*int(self.datasize/self.bpp)), dtype=self.dtype)
240
+ else:
241
+ raise ValueError(f"mode({mode} is not supported.)")
242
+ # reshape the data
243
+ tmp.shape = (-1, int(self.datasize/self.bpp))
244
+ # get data
245
+ self.data = tmp[:, int(self._md_size/self.bpp):]
246
+ if metadata==True and tmp.shape[0] != 0:
247
+ # we need to skip the '* ', which are 2 bytes
248
+ if self.bpp == 1:
249
+ metadataraw = tmp[:,0: int(self._md_size/self.bpp) - 2]
250
+ else:
251
+ metadataraw = tmp[:,0: int(self._md_size/self.bpp) - 1]
252
+ metadataraw = metadataraw.tobytes()
253
+ # convert byte to int8
254
+ metadataraw = np.frombuffer(metadataraw, dtype=np.int8)
255
+ metadataraw.shape = (-1, self._md_size - 2)
256
+ # create metadata template
257
+ md_json = json.loads(metadataraw[0].tobytes().decode('utf-8'))
258
+ if self.dp == 'ph1024' or self.dp == 'img16' or self.dp == 'img8':
259
+ # ph1024, img16 and img8 data has two stages of metadata
260
+ template = _gen_dict_template(md_json)
261
+ for key in template.keys():
262
+ subtemplate = _gen_dict_template(md_json[key])
263
+ template[key] = subtemplate
264
+ self.metadata = template
265
+ for k in metadata_loc.keys():
266
+ for subk in metadata_loc[k].keys():
267
+ # get the start row and end row from the metadata_loc
268
+ r0 = metadata_loc[k][subk][0]
269
+ r1 = metadata_loc[k][subk][1]
270
+ tmp = metadataraw[:, r0:r1]
271
+ # covert int8 to string
272
+ tmp = tmp.view(f'S{r1-r0}')
273
+ self.metadata[k][subk] = tmp.astype(np.uint64)
274
+ elif self.dp == 'ph256':
275
+ template = _gen_dict_template(md_json)
276
+ # ph256 data has one stage of metadata
277
+ self.metadata = template
278
+ for k in metadata_loc.keys():
279
+ # get the start row and end row from the metadata_loc
280
+ r0 = metadata_loc[k][0]
281
+ r1 = metadata_loc[k][1]
282
+ tmp = metadataraw[:, r0:r1]
283
+ # covert int8 to string
284
+ tmp = tmp.view(f'S{r1-r0}')
285
+ self.metadata[k] = tmp.astype(np.uint64)
286
+ else:
287
+ raise Exception('Data type is not supproted: %s'%(self.dp))
288
+ if self.dp == 'ph256':
289
+ for k in self.metadata.keys():
290
+ self.metadata[k] = np.array(self.metadata[k].flat)
291
+ elif self.dp == 'img16' or self.dp == 'img8' or self.dp == 'ph1024':
292
+ for k in self.metadata.keys():
293
+ for kk in self.metadata[k].keys():
294
+ self.metadata[k][kk] = np.array(self.metadata[k][kk].flat)
295
+ if pixel != -1:
296
+ self.data = self.data[:,pixel]
297
+ return self.data, self.metadata
298
+
299
+
300
+
301
+ class qconfig(object):
302
+ '''
303
+ Description:
304
+ This class is used for reading config json files, including obs_config, daq_config, data_config, quabo_config...
305
+ '''
306
+ def __init__(self, fn):
307
+ self.config = {}
308
+ jfiles = glob(fn)
309
+ if len(jfiles) == 0:
310
+ raise Exception("The config file(%s) can not be found!"%(fn))
311
+ for file in jfiles:
312
+ key = file.split('/')[-1][:-5]
313
+ with open(file,'rb') as f:
314
+ config = json.load(f)
315
+ self.config[key] = {}
316
+ for k, v in config.items():
317
+ # if it's quabo_config*, we need to convert the str to int
318
+ if(key.startswith('quabo_config')):
319
+ try:
320
+ tmp = v.split(',')
321
+ except:
322
+ tmp = []
323
+ if len(tmp) == 4:
324
+ self.config[key][k] = []
325
+ for vv in tmp:
326
+ if(vv.startswith('0x')):
327
+ self.config[key][k].append(int(vv,16))
328
+ else:
329
+ self.config[key][k].append(int(vv,10))
330
+ else:
331
+ if(v.startswith('0x')):
332
+ self.config[key][k] = int(v,16)
333
+ else:
334
+ self.config[key][k] = int(v,10)
335
+ else:
336
+ self.config[key] = config