sqil-core 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqil_core-0.0.1/PKG-INFO +115 -0
- sqil_core-0.0.1/README.md +95 -0
- sqil_core-0.0.1/pyproject.toml +49 -0
- sqil_core-0.0.1/sqil_core/__init__.py +3 -0
- sqil_core-0.0.1/sqil_core/utils/__init__.py +6 -0
- sqil_core-0.0.1/sqil_core/utils/analysis.py +68 -0
- sqil_core-0.0.1/sqil_core/utils/const.py +38 -0
- sqil_core-0.0.1/sqil_core/utils/formatter.py +134 -0
- sqil_core-0.0.1/sqil_core/utils/read.py +156 -0
sqil_core-0.0.1/PKG-INFO
ADDED
@@ -0,0 +1,115 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: sqil-core
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: The codebase of the SQIL group in EPFL
|
5
|
+
Author: Andrea Duina
|
6
|
+
Requires-Python: >=3.10,<4.0
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
8
|
+
Classifier: Programming Language :: Python :: 3.10
|
9
|
+
Classifier: Programming Language :: Python :: 3.11
|
10
|
+
Classifier: Programming Language :: Python :: 3.12
|
11
|
+
Classifier: Programming Language :: Python :: 3.13
|
12
|
+
Requires-Dist: h5py (>=3.12.1,<4.0.0)
|
13
|
+
Requires-Dist: isort (==5.9.3)
|
14
|
+
Requires-Dist: matplotlib (>=3.9.3,<4.0.0)
|
15
|
+
Requires-Dist: numpy (>=2.2.0,<3.0.0)
|
16
|
+
Requires-Dist: scipy (>=1.14.1,<2.0.0)
|
17
|
+
Requires-Dist: seaborn (>=0.13.2,<0.14.0)
|
18
|
+
Description-Content-Type: text/markdown
|
19
|
+
|
20
|
+
# For users
|
21
|
+
|
22
|
+
## Installation
|
23
|
+
|
24
|
+
```bash
|
25
|
+
$ pip install sqil_core
|
26
|
+
```
|
27
|
+
|
28
|
+
## Usage
|
29
|
+
|
30
|
+
You can find all the functions available and examples in the documentation.
|
31
|
+
|
32
|
+
```python
|
33
|
+
import sqil_core as sqil
|
34
|
+
|
35
|
+
path = 'path to your data folder'
|
36
|
+
|
37
|
+
# Extract data
|
38
|
+
mag, phase, freq = sqil.extract_h5_data(path, ['mag_dB', 'phase', 'ro_freq'])
|
39
|
+
```
|
40
|
+
|
41
|
+
# For developers
|
42
|
+
|
43
|
+
## Development
|
44
|
+
|
45
|
+
1. **Install poetry if you haven't already**
|
46
|
+
```bash
|
47
|
+
$ pip install poetry
|
48
|
+
```
|
49
|
+
|
50
|
+
2. **Install the required packages using poetry**
|
51
|
+
```bash
|
52
|
+
$ poetry install
|
53
|
+
```
|
54
|
+
|
55
|
+
3. **Install the pre-commit hooks**
|
56
|
+
If you are on windows you need to install git ([https://git-scm.com/downloads](here)) and add it to your windows PATH.
|
57
|
+
After the installation open a new terminal.
|
58
|
+
```bash
|
59
|
+
$ poetry run pre-commit install
|
60
|
+
```
|
61
|
+
This will check if your python files are formatted correctly when you try to commit.
|
62
|
+
If that's not the case the commit will be canceled and the files will be automatically formatted.
|
63
|
+
Then you'll have to add and commit again the new files.
|
64
|
+
|
65
|
+
4. **Start the virtual environment**
|
66
|
+
```bash
|
67
|
+
$ poetry shell
|
68
|
+
```
|
69
|
+
To exit the virtual environment just use `exit`
|
70
|
+
|
71
|
+
#### Test your changes
|
72
|
+
|
73
|
+
```bash
|
74
|
+
$ pip install -e . --user
|
75
|
+
```
|
76
|
+
|
77
|
+
**Anaconda**
|
78
|
+
If you want to install in a specific anaconda environment
|
79
|
+
|
80
|
+
- from your poetry shell build the package
|
81
|
+
|
82
|
+
```bash
|
83
|
+
$ poetry run build
|
84
|
+
```
|
85
|
+
|
86
|
+
- open an anaconda shell
|
87
|
+
- activate the desired environemnt
|
88
|
+
- pip install the wheel file (.whl) in the dist folder of the sqil-core project
|
89
|
+
|
90
|
+
```bash
|
91
|
+
$ pip install PATH_TO_SQIL_CORE_FOLDER/dist/SQIL_CORE-VERSION.whl
|
92
|
+
```
|
93
|
+
|
94
|
+
If you're using a jupyter notebook remember to restart the kernel
|
95
|
+
|
96
|
+
## Build
|
97
|
+
|
98
|
+
```bash
|
99
|
+
$ poetry run build
|
100
|
+
```
|
101
|
+
|
102
|
+
## Docs
|
103
|
+
|
104
|
+
Serve docs
|
105
|
+
|
106
|
+
```bash
|
107
|
+
$ poetry run docs_serve
|
108
|
+
```
|
109
|
+
|
110
|
+
Build docs
|
111
|
+
|
112
|
+
```bash
|
113
|
+
$ poetry run docs_build
|
114
|
+
```
|
115
|
+
|
@@ -0,0 +1,95 @@
|
|
1
|
+
# For users
|
2
|
+
|
3
|
+
## Installation
|
4
|
+
|
5
|
+
```bash
|
6
|
+
$ pip install sqil_core
|
7
|
+
```
|
8
|
+
|
9
|
+
## Usage
|
10
|
+
|
11
|
+
You can find all the functions available and examples in the documentation.
|
12
|
+
|
13
|
+
```python
|
14
|
+
import sqil_core as sqil
|
15
|
+
|
16
|
+
path = 'path to your data folder'
|
17
|
+
|
18
|
+
# Extract data
|
19
|
+
mag, phase, freq = sqil.extract_h5_data(path, ['mag_dB', 'phase', 'ro_freq'])
|
20
|
+
```
|
21
|
+
|
22
|
+
# For developers
|
23
|
+
|
24
|
+
## Development
|
25
|
+
|
26
|
+
1. **Install poetry if you haven't already**
|
27
|
+
```bash
|
28
|
+
$ pip install poetry
|
29
|
+
```
|
30
|
+
|
31
|
+
2. **Install the required packages using poetry**
|
32
|
+
```bash
|
33
|
+
$ poetry install
|
34
|
+
```
|
35
|
+
|
36
|
+
3. **Install the pre-commit hooks**
|
37
|
+
If you are on windows you need to install git ([https://git-scm.com/downloads](here)) and add it to your windows PATH.
|
38
|
+
After the installation open a new terminal.
|
39
|
+
```bash
|
40
|
+
$ poetry run pre-commit install
|
41
|
+
```
|
42
|
+
This will check if your python files are formatted correctly when you try to commit.
|
43
|
+
If that's not the case the commit will be canceled and the files will be automatically formatted.
|
44
|
+
Then you'll have to add and commit again the new files.
|
45
|
+
|
46
|
+
4. **Start the virtual environment**
|
47
|
+
```bash
|
48
|
+
$ poetry shell
|
49
|
+
```
|
50
|
+
To exit the virtual environment just use `exit`
|
51
|
+
|
52
|
+
#### Test your changes
|
53
|
+
|
54
|
+
```bash
|
55
|
+
$ pip install -e . --user
|
56
|
+
```
|
57
|
+
|
58
|
+
**Anaconda**
|
59
|
+
If you want to install in a specific anaconda environment
|
60
|
+
|
61
|
+
- from your poetry shell build the package
|
62
|
+
|
63
|
+
```bash
|
64
|
+
$ poetry run build
|
65
|
+
```
|
66
|
+
|
67
|
+
- open an anaconda shell
|
68
|
+
- activate the desired environemnt
|
69
|
+
- pip install the wheel file (.whl) in the dist folder of the sqil-core project
|
70
|
+
|
71
|
+
```bash
|
72
|
+
$ pip install PATH_TO_SQIL_CORE_FOLDER/dist/SQIL_CORE-VERSION.whl
|
73
|
+
```
|
74
|
+
|
75
|
+
If you're using a jupyter notebook remember to restart the kernel
|
76
|
+
|
77
|
+
## Build
|
78
|
+
|
79
|
+
```bash
|
80
|
+
$ poetry run build
|
81
|
+
```
|
82
|
+
|
83
|
+
## Docs
|
84
|
+
|
85
|
+
Serve docs
|
86
|
+
|
87
|
+
```bash
|
88
|
+
$ poetry run docs_serve
|
89
|
+
```
|
90
|
+
|
91
|
+
Build docs
|
92
|
+
|
93
|
+
```bash
|
94
|
+
$ poetry run docs_build
|
95
|
+
```
|
@@ -0,0 +1,49 @@
|
|
1
|
+
[tool.poetry]
|
2
|
+
name = "sqil-core"
|
3
|
+
version = "0.0.1"
|
4
|
+
description = "The codebase of the SQIL group in EPFL"
|
5
|
+
authors = ["Andrea Duina"]
|
6
|
+
readme = "README.md"
|
7
|
+
|
8
|
+
[tool.poetry.scripts]
|
9
|
+
build = "scripts:build"
|
10
|
+
docs-serve = "scripts:docs_serve"
|
11
|
+
docs-build = "scripts:docs_build"
|
12
|
+
|
13
|
+
[tool.poetry.dependencies]
|
14
|
+
python = "^3.10"
|
15
|
+
numpy = "^2.2.0"
|
16
|
+
scipy = "^1.14.1"
|
17
|
+
h5py = "^3.12.1"
|
18
|
+
matplotlib = "^3.9.3"
|
19
|
+
seaborn = "^0.13.2"
|
20
|
+
isort = "5.9.3"
|
21
|
+
|
22
|
+
|
23
|
+
[tool.poetry.group.dev.dependencies]
|
24
|
+
black = "^24.10.0"
|
25
|
+
pre-commit = "^4.0.1"
|
26
|
+
mkdocs = "^1.6.1"
|
27
|
+
mkdocs-material = "^9.5.48"
|
28
|
+
mkdocstrings-python = "^1.12.2"
|
29
|
+
pytest = "^8.3.4"
|
30
|
+
|
31
|
+
[build-system]
|
32
|
+
requires = ["poetry-core"]
|
33
|
+
build-backend = "poetry.core.masonry.api"
|
34
|
+
|
35
|
+
[tool.black]
|
36
|
+
line-length = 88
|
37
|
+
target-version = ["py310"]
|
38
|
+
exclude = '''
|
39
|
+
/(
|
40
|
+
\.git
|
41
|
+
| \.venv
|
42
|
+
| build
|
43
|
+
| dist
|
44
|
+
)/
|
45
|
+
'''
|
46
|
+
|
47
|
+
[tool.isort]
|
48
|
+
profile = "black"
|
49
|
+
known_first_party = ["sqil-core"]
|
@@ -0,0 +1,68 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
|
4
|
+
def remove_offset(data: np.ndarray, avg: int = 3) -> np.ndarray:
|
5
|
+
"""Removes the initial offset from a data matrix or vector by subtracting
|
6
|
+
the average of the first `avg` points. After applying this function,
|
7
|
+
the first point of each column of the data will be shifted to (about) 0.
|
8
|
+
|
9
|
+
Parameters
|
10
|
+
----------
|
11
|
+
data : np.ndarray
|
12
|
+
Input data, either a 1D vector or a 2D matrix
|
13
|
+
avg : int, optional
|
14
|
+
The number of initial points to average when calculating
|
15
|
+
the offset, by default 3
|
16
|
+
|
17
|
+
Returns
|
18
|
+
-------
|
19
|
+
np.ndarray
|
20
|
+
The input data with the offset removed
|
21
|
+
"""
|
22
|
+
is1D = len(data.shape) == 1
|
23
|
+
if is1D:
|
24
|
+
return data - np.mean(data[0:avg])
|
25
|
+
return data - np.mean(data[:, 0:avg], axis=1).reshape(data.shape[0], 1)
|
26
|
+
|
27
|
+
|
28
|
+
def estimate_linear_background(x: np.ndarray, data: np.ndarray, points_cut=0.1) -> list:
|
29
|
+
is1D = len(data.shape) == 1
|
30
|
+
points = data.shape[0] if is1D else data.shape[1]
|
31
|
+
cut = int(points * points_cut)
|
32
|
+
|
33
|
+
# Consider just the cut points
|
34
|
+
x_data = x[0:cut] if is1D else x[0:cut, :]
|
35
|
+
X = np.vstack([np.ones_like(x_data), x_data]).T
|
36
|
+
y_data = data[0:cut] if is1D else data[0:cut, :]
|
37
|
+
|
38
|
+
# Linear fit
|
39
|
+
coefficients, residuals, _, _ = np.linalg.lstsq(
|
40
|
+
X, y_data if is1D else y_data.T, rcond=None
|
41
|
+
)
|
42
|
+
|
43
|
+
return coefficients
|
44
|
+
|
45
|
+
|
46
|
+
def remove_linear_background(
|
47
|
+
x: np.ndarray, data: np.ndarray, points_cut=0.1
|
48
|
+
) -> np.ndarray:
|
49
|
+
"""Removes a linear background from the input data (e.g. the phase background
|
50
|
+
of a spectroscopy).
|
51
|
+
|
52
|
+
|
53
|
+
Parameters
|
54
|
+
----------
|
55
|
+
data : np.ndarray
|
56
|
+
Input data. Can be a 1D vector or a 2D matrix.
|
57
|
+
|
58
|
+
Returns
|
59
|
+
-------
|
60
|
+
np.ndarray
|
61
|
+
The input data with the linear background removed. The shape of the
|
62
|
+
returned array matches the input `data`.
|
63
|
+
"""
|
64
|
+
coefficients = estimate_linear_background(x, data, points_cut)
|
65
|
+
|
66
|
+
# Remove background over the whole array
|
67
|
+
X = np.vstack([np.ones_like(x), x]).T
|
68
|
+
return data - (X @ coefficients).T
|
@@ -0,0 +1,38 @@
|
|
1
|
+
EXP_UNIT_MAP = {
|
2
|
+
-15: "p",
|
3
|
+
-12: "f",
|
4
|
+
-9: "n",
|
5
|
+
-6: "\mu",
|
6
|
+
-3: "m",
|
7
|
+
0: "",
|
8
|
+
3: "k",
|
9
|
+
6: "M",
|
10
|
+
9: "G",
|
11
|
+
12: "T",
|
12
|
+
15: "P",
|
13
|
+
}
|
14
|
+
|
15
|
+
PARAM_METADATA = {
|
16
|
+
"current": {"name": "Current", "symbol": "I", "unit": "A", "scale": 1e3},
|
17
|
+
"ro_freq": {
|
18
|
+
"name": "Readout frequency",
|
19
|
+
"symbol": "f_{RO}",
|
20
|
+
"unit": "Hz",
|
21
|
+
"scale": 1e-9,
|
22
|
+
},
|
23
|
+
"ro_power": {
|
24
|
+
"name": "Readout power",
|
25
|
+
"symbol": "P_{RO}",
|
26
|
+
"unit": "dBm",
|
27
|
+
"scale": 1,
|
28
|
+
},
|
29
|
+
"qu_freq": {
|
30
|
+
"name": "Qubit frequency",
|
31
|
+
"symbol": "f_q",
|
32
|
+
"unit": "Hz",
|
33
|
+
"scale": 1e-9,
|
34
|
+
},
|
35
|
+
"qu_power": {"name": "Qubit power", "symbol": "P_q", "unit": "dBm", "scale": 1},
|
36
|
+
"vna_bw": {"name": "VNA bandwidth", "symbol": "BW_{VNA}", "unit": "Hz", "scale": 1},
|
37
|
+
"vna_avg": {"name": "VNA averages", "symbol": "avg_{VNA}", "unit": "", "scale": 1},
|
38
|
+
}
|
@@ -0,0 +1,134 @@
|
|
1
|
+
from decimal import ROUND_DOWN, Decimal
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
from .const import EXP_UNIT_MAP, PARAM_METADATA
|
6
|
+
from .read import read_json
|
7
|
+
|
8
|
+
|
9
|
+
def _cut_to_significant_digits(number, n):
|
10
|
+
"""Cut a number to n significant digits."""
|
11
|
+
if number == 0:
|
12
|
+
return 0 # Zero has no significant digits
|
13
|
+
d = Decimal(str(number))
|
14
|
+
shift = d.adjusted() # Get the exponent of the number
|
15
|
+
rounded = d.scaleb(-shift).quantize(
|
16
|
+
Decimal("1e-{0}".format(n - 1)), rounding=ROUND_DOWN
|
17
|
+
)
|
18
|
+
return float(rounded.scaleb(shift))
|
19
|
+
|
20
|
+
|
21
|
+
def format_number(
|
22
|
+
num: float | np.ndarray, precision: int = 3, unit: str = "", latex: bool = True
|
23
|
+
) -> str:
|
24
|
+
"""Format a number (or an array of numbers) in a nice way for printing.
|
25
|
+
|
26
|
+
Parameters
|
27
|
+
----------
|
28
|
+
num : float | np.ndarray
|
29
|
+
Input number (or array). Should not be rescaled,
|
30
|
+
e.g. input values in Hz, NOT GHz
|
31
|
+
precision : int
|
32
|
+
The number of digits of the output number. Must be >= 3.
|
33
|
+
unit : str, optional
|
34
|
+
Unit of measurement, by default ''
|
35
|
+
latex : bool, optional
|
36
|
+
Include Latex syntax, by default True
|
37
|
+
|
38
|
+
Returns
|
39
|
+
-------
|
40
|
+
str
|
41
|
+
Formatted number
|
42
|
+
"""
|
43
|
+
# Handle arrays
|
44
|
+
if isinstance(num, (list, np.ndarray)):
|
45
|
+
return [format_number(n, unit, latex) for n in num]
|
46
|
+
|
47
|
+
# Return if not a number
|
48
|
+
if not isinstance(num, (int, float, complex)):
|
49
|
+
return num
|
50
|
+
|
51
|
+
# Format number
|
52
|
+
exp_form = f"{num:.12e}"
|
53
|
+
base, exponent = exp_form.split("e")
|
54
|
+
# Make exponent a multiple of 3
|
55
|
+
base = float(base) * 10 ** (int(exponent) % 3)
|
56
|
+
exponent = (int(exponent) // 3) * 3
|
57
|
+
# Apply precision to the base
|
58
|
+
if precision < 3:
|
59
|
+
precision = 3
|
60
|
+
base_precise = _cut_to_significant_digits(
|
61
|
+
base, precision + 1
|
62
|
+
) # np.round(base, precision - (int(exponent) % 3))
|
63
|
+
base_precise = np.round(
|
64
|
+
base_precise, precision - len(str(base_precise).split(".")[0])
|
65
|
+
)
|
66
|
+
if int(base_precise) == float(base_precise):
|
67
|
+
base_precise = int(base_precise)
|
68
|
+
|
69
|
+
# Build string
|
70
|
+
if unit:
|
71
|
+
res = f"{base_precise}{'~' if latex else ' '}{EXP_UNIT_MAP[exponent]}{unit}"
|
72
|
+
else:
|
73
|
+
res = f"{base_precise}" + (f" x 10^{{{exponent}}}" if exponent != 0 else "")
|
74
|
+
return f"${res}$" if latex else res
|
75
|
+
|
76
|
+
|
77
|
+
def get_name_and_unit(param_id: str) -> str:
|
78
|
+
"""Get the name and unit of measurement of a prameter, e.g. Frequency [GHz].
|
79
|
+
|
80
|
+
Parameters
|
81
|
+
----------
|
82
|
+
param : str
|
83
|
+
Parameter ID, as defined in the param_dict.json file.
|
84
|
+
|
85
|
+
Returns
|
86
|
+
-------
|
87
|
+
str
|
88
|
+
Name and [unit]
|
89
|
+
"""
|
90
|
+
meta = PARAM_METADATA[param_id]
|
91
|
+
scale = meta["scale"] if "scale" in meta else 1
|
92
|
+
exponent = -(int(f"{scale:.0e}".split("e")[1]) // 3) * 3
|
93
|
+
return f"{meta['name']} [{EXP_UNIT_MAP[exponent]}{meta['unit']}]"
|
94
|
+
|
95
|
+
|
96
|
+
def get_x_id_by_plot_dim(exp_id: str, plot_dim: str, sweep_param: str | None) -> str:
|
97
|
+
if exp_id == "CW_onetone":
|
98
|
+
if plot_dim == "1":
|
99
|
+
return sweep_param or "ro_freq"
|
100
|
+
return "ro_freq"
|
101
|
+
|
102
|
+
|
103
|
+
def build_title(title: str, path: str, params: list[str]) -> str:
|
104
|
+
"""Build a plot title that includes the values of given parameters found in
|
105
|
+
the params_dict.json file, e.g. One tone with I = 0.5 mA.
|
106
|
+
|
107
|
+
Parameters
|
108
|
+
----------
|
109
|
+
title : str
|
110
|
+
Title of the plot to which the parameters will be appended.
|
111
|
+
|
112
|
+
path: str
|
113
|
+
Path to the param_dict.json file.
|
114
|
+
|
115
|
+
params : List[str]
|
116
|
+
List of keys of parameters in the param_dict.json file.
|
117
|
+
|
118
|
+
Returns
|
119
|
+
-------
|
120
|
+
str
|
121
|
+
The original title followed by parameter values.
|
122
|
+
"""
|
123
|
+
dic = read_json(f"{path}/param_dict.json")
|
124
|
+
title += " with "
|
125
|
+
for idx, param in enumerate(params):
|
126
|
+
if not (param in PARAM_METADATA.keys()) or not (param in dic):
|
127
|
+
title += f"{param} = ? & "
|
128
|
+
continue
|
129
|
+
meta = PARAM_METADATA[param]
|
130
|
+
value = format_number(dic[param], meta["unit"])
|
131
|
+
title += f"${meta['symbol']} =${value} & "
|
132
|
+
if idx % 2 == 0 and idx != 0:
|
133
|
+
title += "\n"
|
134
|
+
return title[0:-3]
|
@@ -0,0 +1,156 @@
|
|
1
|
+
import json
|
2
|
+
import os
|
3
|
+
|
4
|
+
import h5py
|
5
|
+
import numpy as np
|
6
|
+
|
7
|
+
from .const import PARAM_METADATA
|
8
|
+
|
9
|
+
|
10
|
+
def extract_h5_data(
|
11
|
+
path: str, keys: list[str] | None = None
|
12
|
+
) -> dict | tuple[np.ndarray, ...]:
|
13
|
+
"""Extract data at the given keys from an HDF5 file. If no keys are
|
14
|
+
given (None) returns the data field of the object.
|
15
|
+
|
16
|
+
Parameters
|
17
|
+
----------
|
18
|
+
path : str
|
19
|
+
path to the HDF5 file or a folder in which is contained a data.ddh5 file
|
20
|
+
keys : None or List, optional
|
21
|
+
list of keys to extract from file['data'], by default None
|
22
|
+
|
23
|
+
Returns
|
24
|
+
-------
|
25
|
+
Dict or Tuple[np.ndarray, ...]
|
26
|
+
The full data dictionary if keys = None.
|
27
|
+
The tuple with the requested keys otherwise.
|
28
|
+
|
29
|
+
Example
|
30
|
+
-------
|
31
|
+
Extract the data object from the dataset:
|
32
|
+
>>> data = extract_h5_data(path)
|
33
|
+
Extracting only 'amp' and 'phase' from the dataset:
|
34
|
+
>>> amp, phase = extract_h5_data(path, ['amp', 'phase'])
|
35
|
+
Extracting only 'phase':
|
36
|
+
>>> phase, = extract_h5_data(path, ['phase'])
|
37
|
+
"""
|
38
|
+
# If the path is to a folder open /data.ddh5
|
39
|
+
if os.path.isdir(path):
|
40
|
+
path = os.path.join(path, "data.ddh5")
|
41
|
+
|
42
|
+
with h5py.File(path, "r") as h5file:
|
43
|
+
data = h5file["data"]
|
44
|
+
data_keys = data.keys()
|
45
|
+
# Extract only the requested keys
|
46
|
+
if bool(keys) and (len(keys) > 0):
|
47
|
+
res = []
|
48
|
+
for key in keys:
|
49
|
+
key = str(key)
|
50
|
+
if (not bool(key)) | (key not in data_keys):
|
51
|
+
res.append([])
|
52
|
+
continue
|
53
|
+
res.append(np.array(data[key][:]))
|
54
|
+
return tuple(res)
|
55
|
+
# Extract the whole data dictionary
|
56
|
+
return _h5_to_dict(data)
|
57
|
+
|
58
|
+
|
59
|
+
def _h5_to_dict(obj) -> dict:
|
60
|
+
"""Convert h5 data into a dictionary"""
|
61
|
+
data_dict = {}
|
62
|
+
for key in obj.keys():
|
63
|
+
item = obj[key]
|
64
|
+
if isinstance(item, h5py.Dataset):
|
65
|
+
data_dict[key] = item[:]
|
66
|
+
elif isinstance(item, h5py.Group):
|
67
|
+
data_dict[key] = extract_h5_data(item)
|
68
|
+
return data_dict
|
69
|
+
|
70
|
+
|
71
|
+
def read_json(path: str) -> dict:
|
72
|
+
"""Reads a json file and returns the data as a dictionary."""
|
73
|
+
with open(path) as f:
|
74
|
+
dictionary = json.load(f)
|
75
|
+
return dictionary
|
76
|
+
|
77
|
+
|
78
|
+
class ParamInfo:
|
79
|
+
"""Parameter information for items of param_dict
|
80
|
+
|
81
|
+
Attributes:
|
82
|
+
id (str): param_dict key
|
83
|
+
value (any): the value of the parameter
|
84
|
+
name (str): full name of the parameter (e.g. Readout frequency)
|
85
|
+
symbol (str): symbol of the parameter in Latex notation (e.g. f_{RO})
|
86
|
+
unit (str): base unit of measurement (e.g. Hz)
|
87
|
+
scale (int): the scale that should be generally applied to raw data (e.g. 1e-9 to take raw Hz to GHz)
|
88
|
+
"""
|
89
|
+
|
90
|
+
def __init__(self, id, value):
|
91
|
+
self.id = id
|
92
|
+
self.value = value
|
93
|
+
if id in PARAM_METADATA:
|
94
|
+
meta = PARAM_METADATA[id]
|
95
|
+
else:
|
96
|
+
meta = {}
|
97
|
+
self.name = meta["name"] if "name" in meta else id
|
98
|
+
self.symbol = meta["symbol"] if "symbol" in meta else id
|
99
|
+
self.unit = meta["unit"] if "unit" in meta else ""
|
100
|
+
self.scale = meta["scale"] if "scale" in meta else 1
|
101
|
+
|
102
|
+
def to_dict(self):
|
103
|
+
"""Convert ParamInfo to a dictionary."""
|
104
|
+
return {
|
105
|
+
"id": self.id,
|
106
|
+
"value": self.value,
|
107
|
+
"name": self.name,
|
108
|
+
"symbol": self.symbol,
|
109
|
+
"unit": self.unit,
|
110
|
+
"scale": self.scale,
|
111
|
+
}
|
112
|
+
|
113
|
+
def __str__(self):
|
114
|
+
"""Return a JSON-formatted string of the object."""
|
115
|
+
return json.dumps(self.to_dict())
|
116
|
+
|
117
|
+
def __eq__(self, other):
|
118
|
+
if isinstance(other, ParamInfo):
|
119
|
+
return (self.id == other.id) & (self.value == other.value)
|
120
|
+
if isinstance(other, (int, float, complex, str)):
|
121
|
+
return self.value == other
|
122
|
+
return False
|
123
|
+
|
124
|
+
|
125
|
+
ParamDict = dict[str, ParamInfo | dict[str, ParamInfo]]
|
126
|
+
|
127
|
+
|
128
|
+
def _enrich_param_dict(param_dict: dict) -> ParamDict:
|
129
|
+
"""Add metadata to param_dict entries."""
|
130
|
+
res = {}
|
131
|
+
for key, value in param_dict.items():
|
132
|
+
if isinstance(value, dict):
|
133
|
+
# Recursive step for nested dictionaries
|
134
|
+
res[key] = _enrich_param_dict(value)
|
135
|
+
else:
|
136
|
+
res[key] = ParamInfo(key, value)
|
137
|
+
return res
|
138
|
+
|
139
|
+
|
140
|
+
def read_param_dict(path: str) -> ParamDict:
|
141
|
+
"""Read param_dict and include additional information for each entry.
|
142
|
+
|
143
|
+
Parameters
|
144
|
+
----------
|
145
|
+
path : str
|
146
|
+
Path to the file or a folder in which is contained a param_dict.json file
|
147
|
+
|
148
|
+
Returns
|
149
|
+
-------
|
150
|
+
ParamDict
|
151
|
+
The param_dict with additional metadata
|
152
|
+
"""
|
153
|
+
# If the path is to a folder open /param_dict.json
|
154
|
+
if os.path.isdir(path):
|
155
|
+
path = os.path.join(path, "param_dict.json")
|
156
|
+
return _enrich_param_dict(read_json(path))
|