QuLab 2.5.3__cp312-cp312-macosx_10_13_universal2.whl → 2.5.5__cp312-cp312-macosx_10_13_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: QuLab
3
- Version: 2.5.3
3
+ Version: 2.5.5
4
4
  Summary: contral instruments and manage data
5
5
  Author-email: feihoo87 <feihoo87@gmail.com>
6
6
  Maintainer-email: feihoo87 <feihoo87@gmail.com>
@@ -1,19 +1,19 @@
1
1
  qulab/__init__.py,sha256=XnSePkDDgfbmEpu5uXK6spygxaqVt9emMubHYKIjSwc,244
2
2
  qulab/__main__.py,sha256=fjaRSL_uUjNIzBGNgjlGswb9TJ2VD5qnkZHW3hItrD4,68
3
3
  qulab/dicttree.py,sha256=tRRMpGZYVOLw0TEByE3_2Ss8FdOmzuGL9e1DWbs8qoY,13684
4
- qulab/fun.cpython-312-darwin.so,sha256=qd8SnofcjuEYWZA-RI1Kh3f1nwX60ck2E4kudttPs9c,126864
4
+ qulab/fun.cpython-312-darwin.so,sha256=QF8lQx9j67B87hsv9j23Rz8oaFV79gqCKRvj3Am3FtE,126864
5
5
  qulab/typing.py,sha256=5xCLfrp1aZpKpDy4p2arbFszw2eK3hGUjZa-XSvC_-8,69
6
- qulab/version.py,sha256=Znn1LprC8WFThPHcZ2uVYHDtCMP4LIy30e8v1CDWFvU,21
6
+ qulab/version.py,sha256=MDur6AV_Dwa0PNW31e6addEkEKMAh_dHbKFdazRMbHo,21
7
7
  qulab/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  qulab/cli/commands.py,sha256=tgDIkkeIoasQXAifJZ6NU8jDgpNgb2a-B0C4nF0evrE,559
9
9
  qulab/cli/config.py,sha256=SdNmWzweWAdyk8M2oKYhMxnkaJ0qIayPlsLGCNlVqME,3108
10
10
  qulab/executor/__init__.py,sha256=LosPzOMaljSZY1thy_Fxtbrgq7uubJszMABEB7oM7tU,101
11
- qulab/executor/cli.py,sha256=5rbAOnG8Me46qeWh9LbutE3zIcPD-MZVqmYoWUY4l3o,6674
12
- qulab/executor/load.py,sha256=OZmE_Pj2XcHzRDw5-1lF-ZDoMVqpaR-dWXf1K39ThXQ,12925
13
- qulab/executor/schedule.py,sha256=4D8o68Cx6vCkpSTMHzULTwuuq1tS8unc2n8WdBA9oB0,12013
14
- qulab/executor/storage.py,sha256=s1xrI4bPTnyM4Nj0CkPxr3NdmnSp18LKnwm-ZoxR-Q0,7191
11
+ qulab/executor/cli.py,sha256=ZGnQdUFZmU812gsnqbKhaJzwWLdoPt42CukPbZ7Yv8k,6684
12
+ qulab/executor/load.py,sha256=t48vZQVAViIwGBPnoRbfBafv5jSR4ix3z4pkvyGyX-w,17759
13
+ qulab/executor/schedule.py,sha256=DCZmqtNzrvsA1wOTxEldI22n6UWlHtxEx6QKsHn1S8k,13344
14
+ qulab/executor/storage.py,sha256=u7uAmZU8D-pvd6UNek1zzxbUBIZQpftSiXdag6hlkG8,7533
15
15
  qulab/executor/transform.py,sha256=AazWdlkEoOBaUJpTYsT5J4f0RanzCEeo-ThwEg8BB4Y,1262
16
- qulab/executor/utils.py,sha256=RNDEo17M-n_tge7uLITHxmoNNYSTm9OES75wQygR8dM,4065
16
+ qulab/executor/utils.py,sha256=VZ_VPYT2MFIfrCtfEW1I7T4NKFIbOvfVac8Sv377MTY,4221
17
17
  qulab/monitor/__init__.py,sha256=nTHelnDpxRS_fl_B38TsN0njgq8eVTEz9IAnN3NbDlM,42
18
18
  qulab/monitor/__main__.py,sha256=w3yUcqq195LzSnXTkQcuC1RSFRhy4oQ_PEBmucXguME,97
19
19
  qulab/monitor/config.py,sha256=fQ5JcsMApKc1UwANEnIvbDQZl8uYW0tle92SaYtX9lI,744
@@ -93,9 +93,9 @@ qulab/visualization/plot_seq.py,sha256=UWTS6p9nfX_7B8ehcYo6UnSTUCjkBsNU9jiOeW2ca
93
93
  qulab/visualization/qdat.py,sha256=ZeevBYWkzbww4xZnsjHhw7wRorJCBzbG0iEu-XQB4EA,5735
94
94
  qulab/visualization/rot3d.py,sha256=lMrEJlRLwYe6NMBlGkKYpp_V9CTipOAuDy6QW_cQK00,734
95
95
  qulab/visualization/widgets.py,sha256=6KkiTyQ8J-ei70LbPQZAK35wjktY47w2IveOa682ftA,3180
96
- QuLab-2.5.3.dist-info/LICENSE,sha256=PRzIKxZtpQcH7whTG6Egvzl1A0BvnSf30tmR2X2KrpA,1065
97
- QuLab-2.5.3.dist-info/METADATA,sha256=3Q3eK40_NlhnO2IeI2Ok-tPG7aEkDMQWj88cKwErDR0,3698
98
- QuLab-2.5.3.dist-info/WHEEL,sha256=iDXcyuxg-66TzzqHGH-kgw_HJdaJE_1RHznrvPNCSNs,115
99
- QuLab-2.5.3.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
100
- QuLab-2.5.3.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
101
- QuLab-2.5.3.dist-info/RECORD,,
96
+ QuLab-2.5.5.dist-info/LICENSE,sha256=PRzIKxZtpQcH7whTG6Egvzl1A0BvnSf30tmR2X2KrpA,1065
97
+ QuLab-2.5.5.dist-info/METADATA,sha256=yaWVrSEOko53q9lByNffnVDYleUm9__SuZM8Rld3NdQ,3698
98
+ QuLab-2.5.5.dist-info/WHEEL,sha256=iDXcyuxg-66TzzqHGH-kgw_HJdaJE_1RHznrvPNCSNs,115
99
+ QuLab-2.5.5.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
100
+ QuLab-2.5.5.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
101
+ QuLab-2.5.5.dist-info/RECORD,,
qulab/executor/cli.py CHANGED
@@ -83,7 +83,7 @@ def create(workflow, code):
83
83
  deps = find_unreferenced_workflows(code)
84
84
 
85
85
  with open(fname, 'w') as f:
86
- f.write(workflow_template(list(deps)))
86
+ f.write(workflow_template(workflow, list(deps)))
87
87
  click.echo(f'{workflow} created')
88
88
 
89
89
 
qulab/executor/load.py CHANGED
@@ -11,6 +11,7 @@ import warnings
11
11
  from importlib.util import module_from_spec, spec_from_file_location
12
12
  from pathlib import Path
13
13
  from types import ModuleType
14
+ from typing import Any
14
15
 
15
16
  from loguru import logger
16
17
 
@@ -107,7 +108,8 @@ def verify_calibrate_method(module: WorkflowType):
107
108
  def verify_check_method(module: WorkflowType):
108
109
  if not hasattr(module, 'check'):
109
110
  warnings.warn(
110
- f"Workflow {module.__file__} does not have 'check' function")
111
+ f"Workflow {module.__file__} does not have 'check' function, it will be set to 'calibrate' function"
112
+ )
111
113
  else:
112
114
  if not can_call_without_args(module.check):
113
115
  raise AttributeError(
@@ -120,6 +122,93 @@ def verify_check_method(module: WorkflowType):
120
122
  )
121
123
 
122
124
 
125
+ def verify_dependence_key(workflow: str | tuple[str, dict[str, Any]]
126
+ | tuple[str, str, dict[str, Any]]):
127
+ if isinstance(workflow, str):
128
+ return
129
+ if not isinstance(workflow, tuple) or len(workflow) not in [2, 3]:
130
+ raise ValueError(f"Invalid workflow: {workflow}")
131
+
132
+ if len(workflow) == 2:
133
+ file_name, mapping = workflow
134
+ if not Path(file_name).exists():
135
+ raise FileNotFoundError(f"File not found: {file_name}")
136
+ elif len(workflow) == 3:
137
+ template_path, target_path, mapping = workflow
138
+ if not Path(template_path).exists():
139
+ raise FileNotFoundError(f"File not found: {template_path}")
140
+ if not isinstance(target_path, (Path, str)) or target_path == '':
141
+ raise ValueError(f"Invalid target_path: {target_path}")
142
+ if not isinstance(target_path, (Path, str)):
143
+ raise ValueError(f"Invalid target_path: {target_path}")
144
+ if Path(target_path).suffix != '.py':
145
+ raise ValueError(
146
+ f"Invalid target_path: {target_path}. Only .py file is supported"
147
+ )
148
+ else:
149
+ raise ValueError(f"Invalid workflow: {workflow}")
150
+
151
+ if not isinstance(mapping, dict):
152
+ raise ValueError(f"Invalid mapping: {mapping}")
153
+
154
+ for key, value in mapping.items():
155
+ if not isinstance(key, str):
156
+ raise ValueError(
157
+ f"Invalid key: {key}, should be str type and valid identifier")
158
+ if not key.isidentifier():
159
+ raise ValueError(f"Invalid key: {key}, should be identifier")
160
+ try:
161
+ pickle.dumps(value)
162
+ except Exception as e:
163
+ raise ValueError(
164
+ f"Invalid value: {key}: {value}, should be pickleable") from e
165
+ return
166
+
167
+
168
+ def verify_depends(module: WorkflowType):
169
+ if not hasattr(module, 'depends'):
170
+ return
171
+
172
+ deps = []
173
+
174
+ if callable(module.depends):
175
+ if not can_call_without_args(module.depends):
176
+ raise AttributeError(
177
+ f"Workflow {module.__file__} 'depends' function should not have any parameters"
178
+ )
179
+ deps = list(module.depends())
180
+ elif isinstance(module.depends, (list, tuple)):
181
+ deps = module.depends
182
+ else:
183
+ raise AttributeError(
184
+ f"Workflow {module.__file__} 'depends' should be a callable or a list"
185
+ )
186
+ for workflow in deps:
187
+ verify_dependence_key(workflow)
188
+
189
+
190
+ def verify_entries(module: WorkflowType):
191
+ if not hasattr(module, 'entries'):
192
+ return
193
+
194
+ deps = []
195
+
196
+ if callable(module.entries):
197
+ if not can_call_without_args(module.entries):
198
+ raise AttributeError(
199
+ f"Workflow {module.__file__} 'entries' function should not have any parameters"
200
+ )
201
+ deps = list(module.entries())
202
+ elif isinstance(module.entries, (list, tuple)):
203
+ deps = module.entries
204
+ else:
205
+ raise AttributeError(
206
+ f"Workflow {module.__file__} 'entries' should be a callable or a list"
207
+ )
208
+ for workflow in deps:
209
+ verify_dependence_key(workflow)
210
+
211
+
123
212
  def is_workflow(module: ModuleType) -> bool:
124
213
  try:
125
214
  verify_calibrate_method(module)
@@ -218,14 +307,15 @@ def load_workflow_from_file(file_name: str,
218
307
  module.__mtime__ = (base_path / path).stat().st_mtime
219
308
 
220
309
  if hasattr(module, 'entries'):
310
+ verify_entries(module)
221
311
  return module
222
312
 
223
313
  if not hasattr(module, '__timeout__'):
224
314
  module.__timeout__ = None
225
315
 
226
316
  if not hasattr(module, 'depends'):
227
- module.depends = lambda: [[]]
228
-
317
+ module.depends = lambda: []
318
+ verify_depends(module)
229
319
  verify_calibrate_method(module)
230
320
  verify_check_method(module)
231
321
 
@@ -355,18 +445,50 @@ def load_workflow(workflow: str | tuple[str, dict],
355
445
 
356
446
  def get_dependents(workflow: WorkflowType,
357
447
  code_path: str | Path) -> list[WorkflowType]:
358
- return [
359
- load_workflow(n, code_path, mtime=workflow.__mtime__)
360
- for n in workflow.depends()
361
- ]
448
+ if callable(getattr(workflow, 'depends', None)):
449
+ if not can_call_without_args(workflow.depends):
450
+ raise AttributeError(
451
+ f'Workflow {workflow.__workflow_id__} "depends" function should not have any parameters'
452
+ )
453
+ return [
454
+ load_workflow(n, code_path, mtime=workflow.__mtime__)
455
+ for n in workflow.depends()
456
+ ]
457
+ elif isinstance(getattr(workflow, 'depends', None), (list, tuple)):
458
+ return [
459
+ load_workflow(n, code_path, mtime=workflow.__mtime__)
460
+ for n in workflow.depends
461
+ ]
462
+ elif getattr(workflow, 'entries', None) is None:
463
+ return []
464
+ else:
465
+ raise AttributeError(
466
+ f'Workflow {workflow.__workflow_id__} "depends" should be a callable or a list'
467
+ )
362
468
 
363
469
 
364
470
  def get_entries(workflow: WorkflowType,
365
471
  code_path: str | Path) -> list[WorkflowType]:
366
- return [
367
- load_workflow(n, code_path, mtime=workflow.__mtime__)
368
- for n in workflow.entries()
369
- ]
472
+ if callable(getattr(workflow, 'entries', None)):
473
+ if not can_call_without_args(workflow.entries):
474
+ raise AttributeError(
475
+ f'Workflow {workflow.__workflow_id__} "entries" function should not have any parameters'
476
+ )
477
+ return [
478
+ load_workflow(n, code_path, mtime=workflow.__mtime__)
479
+ for n in workflow.entries()
480
+ ]
481
+ elif isinstance(getattr(workflow, 'entries', None), (list, tuple)):
482
+ return [
483
+ load_workflow(n, code_path, mtime=workflow.__mtime__)
484
+ for n in workflow.entries
485
+ ]
486
+ elif getattr(workflow, 'entries', None) is None:
487
+ return []
488
+ else:
489
+ raise AttributeError(
490
+ f'Workflow {workflow.__workflow_id__} "entries" should be a callable or a list'
491
+ )
370
492
 
371
493
 
372
494
  def make_graph(workflow: WorkflowType, graph: dict, code_path: str | Path):
@@ -383,5 +505,7 @@ def make_graph(workflow: WorkflowType, graph: dict, code_path: str | Path):
383
505
  for w in get_dependents(workflow, code_path):
384
506
  graph[workflow.__workflow_id__].append(w.__workflow_id__)
385
507
  make_graph(w, graph=graph, code_path=code_path)
508
+ if graph[workflow.__workflow_id__] == []:
509
+ del graph[workflow.__workflow_id__]
386
510
 
387
511
  return graph
@@ -1,4 +1,5 @@
1
1
  import functools
2
+ import pickle
2
3
  import uuid
3
4
  from datetime import datetime, timedelta
4
5
  from pathlib import Path
@@ -15,6 +16,27 @@ class CalibrationFailedError(Exception):
15
16
  pass
16
17
 
17
18
 
19
+ def is_pickleable(obj) -> bool:
20
+ try:
21
+ pickle.dumps(obj)
22
+ return True
23
+ except:
24
+ return False
25
+
26
+
27
+ def veryfy_analyzed_result(result: Result, script: str, method: str):
28
+ if not isinstance(result, Result):
29
+ raise TypeError(f'"{script}" : "{method}" must return a Result object')
30
+ if not is_pickleable(result.parameters):
31
+ raise TypeError(
32
+ f'"{script}" : "{method}" return not pickleable data in .parameters'
33
+ )
34
+ if not is_pickleable(result.other_infomation):
35
+ raise TypeError(
36
+ f'"{script}" : "{method}" return not pickleable data in .other_infomation'
37
+ )
38
+
39
+
18
40
  def check_state(workflow: WorkflowType, code_path: str | Path,
19
41
  state_path: str | Path) -> bool:
20
42
  """
@@ -73,9 +95,11 @@ def call_analyzer(node,
73
95
  plot=False) -> Result:
74
96
  if check:
75
97
  result = node.check_analyze(result, history=history)
98
+ veryfy_analyzed_result(result, node.__workflow_id__, "check_analyze")
76
99
  result.fully_calibrated = False
77
100
  else:
78
101
  result = node.analyze(result, history=history)
102
+ veryfy_analyzed_result(result, node.__workflow_id__, "analyze")
79
103
  result.fully_calibrated = True
80
104
  if plot:
81
105
  call_plot(node, result)
@@ -122,6 +146,10 @@ def check_data(workflow: WorkflowType, code_path: str | Path,
122
146
  logger.debug(
123
147
  f'Checking "{workflow.__workflow_id__}" with "check" method ...')
124
148
  data = workflow.check()
149
+ if not is_pickleable(data):
150
+ raise TypeError(
151
+ f'"{workflow.__workflow_id__}" : "check" return not pickleable data'
152
+ )
125
153
  result = Result()
126
154
  result.data = data
127
155
  #save_result(workflow.__workflow_id__, result, state_path)
@@ -147,6 +175,10 @@ def check_data(workflow: WorkflowType, code_path: str | Path,
147
175
  f'Checking "{workflow.__workflow_id__}" with "calibrate" method ...'
148
176
  )
149
177
  data = workflow.calibrate()
178
+ if not is_pickleable(data):
179
+ raise TypeError(
180
+ f'"{workflow.__workflow_id__}" : "calibrate" return not pickleable data'
181
+ )
150
182
  result = Result()
151
183
  result.data = data
152
184
  save_result(workflow.__workflow_id__, result, state_path)
@@ -172,6 +204,10 @@ def calibrate(workflow: WorkflowType, code_path: str | Path,
172
204
 
173
205
  logger.debug(f'Calibrating "{workflow.__workflow_id__}" ...')
174
206
  data = workflow.calibrate()
207
+ if not is_pickleable(data):
208
+ raise TypeError(
209
+ f'"{workflow.__workflow_id__}" : "calibrate" return not pickleable data'
210
+ )
175
211
  result = Result()
176
212
  result.data = data
177
213
  save_result(workflow.__workflow_id__, result, state_path)
qulab/executor/storage.py CHANGED
@@ -3,6 +3,7 @@ import pickle
3
3
  import uuid
4
4
  from dataclasses import dataclass, field
5
5
  from datetime import datetime, timedelta
6
+ from functools import lru_cache
6
7
  from pathlib import Path
7
8
  from typing import Any, Literal
8
9
 
@@ -181,36 +182,42 @@ def get_heads(base_path: str | Path) -> Path | None:
181
182
  def create_index(name: str,
182
183
  base_path: str | Path,
183
184
  context: str,
184
- width: int,
185
+ width: int = -1,
185
186
  start: int = 0):
186
- path = Path(base_path) / "index" / f"{name}.seq"
187
- if path.exists():
188
- with open(path, "r") as f:
189
- index = int(f.read())
187
+
188
+ path = Path(base_path) / "index" / name
189
+ if width == -1:
190
+ width = len(context)
190
191
  else:
191
- index = start
192
- path.parent.mkdir(parents=True, exist_ok=True)
193
- with open(path, "w") as f:
194
- f.write(str(index + 1))
192
+ width = max(width, len(context))
195
193
 
196
- path = Path(base_path) / "index" / f"{name}.width"
197
- with open(path, "w") as f:
198
- f.write(str(width))
194
+ if path.with_suffix('.width').exists():
195
+ width = int(path.with_suffix('.width').read_text())
196
+ index = int(path.with_suffix('.seq').read_text())
197
+ else:
198
+ index = start
199
+ if width < len(context):
200
+ raise ValueError(
201
+ f"Context '{context}' is too long, existing width of '{name}' is {width}."
202
+ )
203
+ if not path.with_suffix('.width').exists():
204
+ path.parent.mkdir(parents=True, exist_ok=True)
205
+ path.with_suffix('.width').write_text(str(width))
199
206
 
200
- path = Path(base_path) / "index" / f"{name}.idx"
201
- with open(path, "a") as f:
207
+ path.with_suffix('.seq').write_text(str(index + 1))
202
208
 
209
+ with path.with_suffix('.idx').open("a") as f:
203
210
  f.write(f"{context.ljust(width)}\n")
204
211
 
205
212
  return index
206
213
 
207
214
 
215
+ @lru_cache(maxsize=4096)
208
216
  def query_index(name: str, base_path: str | Path, index: int):
209
- path = Path(base_path) / "index" / f"{name}.width"
210
- with open(path, "r") as f:
211
- width = int(f.read())
212
- path = Path(base_path) / "index" / f"{name}.idx"
213
- with open(path, "r") as f:
217
+ path = Path(base_path) / "index" / name
218
+ width = int(path.with_suffix('.width').read_text())
219
+
220
+ with path.with_suffix('.idx').open("r") as f:
214
221
  f.seek(index * (width + 1))
215
222
  context = f.read(width)
216
223
  return context.rstrip()
@@ -219,5 +226,8 @@ def query_index(name: str, base_path: str | Path, index: int):
219
226
  def get_result_by_index(
220
227
  index: int, base_path: str | Path = get_config_value("data", Path)
221
228
  ) -> Result | None:
222
- path = query_index("result", base_path, index)
223
- return load_result(path, base_path)
229
+ try:
230
+ path = query_index("result", base_path, index)
231
+ return load_result(path, base_path)
232
+ except:
233
+ return None
qulab/executor/utils.py CHANGED
@@ -30,7 +30,7 @@ def dependent_tree(node: str, code_path: str | Path) -> dict[str, list[str]]:
30
30
  return tree
31
31
 
32
32
 
33
- def workflow_template(deps: list[str]) -> str:
33
+ def workflow_template(workflow: str, deps: list[str]) -> str:
34
34
  return f"""def VAR(s): pass # 没有实际作用,只是用来抑制编辑器的警告。
35
35
 
36
36
  import numpy as np
@@ -47,7 +47,7 @@ def depends():
47
47
 
48
48
 
49
49
  def calibrate():
50
- logger.info(f"run {{__name__}}")
50
+ logger.info(f"running {workflow} ...")
51
51
 
52
52
  # calibrate 是一个完整的校准实验,如power Rabi,Ramsey等。
53
53
  # 你需要足够的扫描点,以使得后续的 analyze 可以拟合出合适的参数。
@@ -58,10 +58,11 @@ def calibrate():
58
58
  for i in x:
59
59
  y.append(np.sin(i))
60
60
 
61
+ logger.info(f"running {workflow} ... finished!")
61
62
  return x, y
62
63
 
63
64
 
64
- def analyze(result: Result, history: Result | None) -> Result:
65
+ def analyze(result: Result, history: Result | None = None) -> Result:
65
66
  \"\"\"
66
67
  分析校准结果。
67
68
 
@@ -93,7 +94,7 @@ def analyze(result: Result, history: Result | None) -> Result:
93
94
 
94
95
 
95
96
  def check():
96
- logger.info(f"check {{__name__}}")
97
+ logger.info(f"checking {workflow} ...")
97
98
 
98
99
  # check 是一个快速检查实验,用于检查校准是否过时。
99
100
  # 你只需要少数扫描点,让后续的 check_analyze 知道参数是否漂移,数据
@@ -105,10 +106,11 @@ def check():
105
106
  for i in x:
106
107
  y.append(np.sin(i))
107
108
 
109
+ logger.info(f"checking {workflow} ... finished!")
108
110
  return x, y
109
111
 
110
112
 
111
- def check_analyze(result: Result, history: Result) -> Result:
113
+ def check_analyze(result: Result, history: Result | None = None) -> Result:
112
114
  \"\"\"
113
115
  分析检查结果。
114
116
 
Binary file
qulab/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "2.5.3"
1
+ __version__ = "2.5.5"
File without changes
File without changes