atlas-schema 0.2.3__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/PKG-INFO +18 -19
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/README.md +15 -16
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/pyproject.toml +8 -3
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/_version.py +9 -4
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/methods.py +26 -21
- atlas_schema-0.3.0/src/atlas_schema/schema.py +427 -0
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/typing_compat.py +2 -2
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/utils.py +6 -7
- atlas_schema-0.2.3/src/atlas_schema/schema.py +0 -214
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/.gitignore +0 -0
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/LICENSE +0 -0
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/__init__.py +0 -0
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/_version.pyi +0 -0
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/enums.py +0 -0
- {atlas_schema-0.2.3 → atlas_schema-0.3.0}/src/atlas_schema/py.typed +0 -0
@@ -1,11 +1,11 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: atlas-schema
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.3.0
|
4
4
|
Summary: Helper python package for ATLAS Common NTuple Analysis work.
|
5
5
|
Project-URL: Homepage, https://github.com/scipp-atlas/atlas-schema
|
6
6
|
Project-URL: Bug Tracker, https://github.com/scipp-atlas/atlas-schema/issues
|
7
7
|
Project-URL: Discussions, https://github.com/scipp-atlas/atlas-schema/discussions
|
8
|
-
Project-URL: Documentation, https://atlas-schema.readthedocs.io/en/v0.
|
8
|
+
Project-URL: Documentation, https://atlas-schema.readthedocs.io/en/v0.3.0/
|
9
9
|
Project-URL: Releases, https://github.com/scipp-atlas/atlas-schema/releases
|
10
10
|
Project-URL: Release Notes, https://atlas-schema.readthedocs.io/en/latest/history.html
|
11
11
|
Author-email: Giordon Stark <kratsg@gmail.com>
|
@@ -227,7 +227,7 @@ Classifier: Programming Language :: Python :: 3.12
|
|
227
227
|
Classifier: Topic :: Scientific/Engineering
|
228
228
|
Classifier: Typing :: Typed
|
229
229
|
Requires-Python: >=3.9
|
230
|
-
Requires-Dist: coffea[dask]>=
|
230
|
+
Requires-Dist: coffea[dask]>=2025.7.0
|
231
231
|
Requires-Dist: particle>=0.25.0
|
232
232
|
Provides-Extra: dev
|
233
233
|
Requires-Dist: pytest-cov>=3; extra == 'dev'
|
@@ -251,7 +251,7 @@ Requires-Dist: tbump>=6.7.0; extra == 'test'
|
|
251
251
|
Requires-Dist: twine; extra == 'test'
|
252
252
|
Description-Content-Type: text/markdown
|
253
253
|
|
254
|
-
# atlas-schema v0.
|
254
|
+
# atlas-schema v0.3.0
|
255
255
|
|
256
256
|
[![Actions Status][actions-badge]][actions-link]
|
257
257
|
[![Documentation Status][rtd-badge]][rtd-link]
|
@@ -335,11 +335,9 @@ like below:
|
|
335
335
|
|
336
336
|
```python
|
337
337
|
import awkward as ak
|
338
|
-
import
|
339
|
-
import hist.dask as had
|
338
|
+
from hist import Hist
|
340
339
|
import matplotlib.pyplot as plt
|
341
340
|
from coffea import processor
|
342
|
-
from coffea.nanoevents import NanoEventsFactory
|
343
341
|
from distributed import Client
|
344
342
|
|
345
343
|
from atlas_schema.schema import NtupleSchema
|
@@ -352,7 +350,7 @@ class MyFirstProcessor(processor.ProcessorABC):
|
|
352
350
|
def process(self, events):
|
353
351
|
dataset = events.metadata["dataset"]
|
354
352
|
h_ph_pt = (
|
355
|
-
|
353
|
+
Hist.new.StrCat(["all", "pass", "fail"], name="isEM")
|
356
354
|
.Regular(200, 0.0, 2000.0, name="pt", label="$pt_{\gamma}$ [GeV]")
|
357
355
|
.Int64()
|
358
356
|
)
|
@@ -376,17 +374,18 @@ class MyFirstProcessor(processor.ProcessorABC):
|
|
376
374
|
if __name__ == "__main__":
|
377
375
|
client = Client()
|
378
376
|
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
out =
|
388
|
-
|
389
|
-
print(
|
377
|
+
fileset = {"700352.Zqqgamma.mc20d.v1": {"files": {"ntuple.root": "analysis"}}}
|
378
|
+
|
379
|
+
run = processor.Runner(
|
380
|
+
executor=processor.IterativeExecutor(compression=None),
|
381
|
+
schema=NtupleSchema,
|
382
|
+
savemetrics=True,
|
383
|
+
)
|
384
|
+
|
385
|
+
out, metrics = run(fileset, processor_instance=MyFirstProcessor())
|
386
|
+
|
387
|
+
print(out)
|
388
|
+
print(metrics)
|
390
389
|
|
391
390
|
fig, ax = plt.subplots()
|
392
391
|
computed["700352.Zqqgamma.mc20d.v1"]["ph_pt"].plot1d(ax=ax)
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# atlas-schema v0.
|
1
|
+
# atlas-schema v0.3.0
|
2
2
|
|
3
3
|
[![Actions Status][actions-badge]][actions-link]
|
4
4
|
[![Documentation Status][rtd-badge]][rtd-link]
|
@@ -82,11 +82,9 @@ like below:
|
|
82
82
|
|
83
83
|
```python
|
84
84
|
import awkward as ak
|
85
|
-
import
|
86
|
-
import hist.dask as had
|
85
|
+
from hist import Hist
|
87
86
|
import matplotlib.pyplot as plt
|
88
87
|
from coffea import processor
|
89
|
-
from coffea.nanoevents import NanoEventsFactory
|
90
88
|
from distributed import Client
|
91
89
|
|
92
90
|
from atlas_schema.schema import NtupleSchema
|
@@ -99,7 +97,7 @@ class MyFirstProcessor(processor.ProcessorABC):
|
|
99
97
|
def process(self, events):
|
100
98
|
dataset = events.metadata["dataset"]
|
101
99
|
h_ph_pt = (
|
102
|
-
|
100
|
+
Hist.new.StrCat(["all", "pass", "fail"], name="isEM")
|
103
101
|
.Regular(200, 0.0, 2000.0, name="pt", label="$pt_{\gamma}$ [GeV]")
|
104
102
|
.Int64()
|
105
103
|
)
|
@@ -123,17 +121,18 @@ class MyFirstProcessor(processor.ProcessorABC):
|
|
123
121
|
if __name__ == "__main__":
|
124
122
|
client = Client()
|
125
123
|
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
out =
|
135
|
-
|
136
|
-
print(
|
124
|
+
fileset = {"700352.Zqqgamma.mc20d.v1": {"files": {"ntuple.root": "analysis"}}}
|
125
|
+
|
126
|
+
run = processor.Runner(
|
127
|
+
executor=processor.IterativeExecutor(compression=None),
|
128
|
+
schema=NtupleSchema,
|
129
|
+
savemetrics=True,
|
130
|
+
)
|
131
|
+
|
132
|
+
out, metrics = run(fileset, processor_instance=MyFirstProcessor())
|
133
|
+
|
134
|
+
print(out)
|
135
|
+
print(metrics)
|
137
136
|
|
138
137
|
fig, ax = plt.subplots()
|
139
138
|
computed["700352.Zqqgamma.mc20d.v1"]["ph_pt"].plot1d(ax=ax)
|
@@ -28,7 +28,7 @@ classifiers = [
|
|
28
28
|
"Typing :: Typed",
|
29
29
|
]
|
30
30
|
dynamic = ["version"]
|
31
|
-
dependencies = ["coffea[dask] >=
|
31
|
+
dependencies = ["coffea[dask] >= 2025.7.0", "particle >= 0.25.0"]
|
32
32
|
|
33
33
|
[project.optional-dependencies]
|
34
34
|
test = [
|
@@ -60,7 +60,7 @@ docs = [
|
|
60
60
|
Homepage = "https://github.com/scipp-atlas/atlas-schema"
|
61
61
|
"Bug Tracker" = "https://github.com/scipp-atlas/atlas-schema/issues"
|
62
62
|
Discussions = "https://github.com/scipp-atlas/atlas-schema/discussions"
|
63
|
-
Documentation = "https://atlas-schema.readthedocs.io/en/v0.
|
63
|
+
Documentation = "https://atlas-schema.readthedocs.io/en/v0.3.0/"
|
64
64
|
Releases = "https://github.com/scipp-atlas/atlas-schema/releases"
|
65
65
|
"Release Notes" = "https://atlas-schema.readthedocs.io/en/latest/history.html"
|
66
66
|
|
@@ -111,14 +111,19 @@ addopts = [
|
|
111
111
|
]
|
112
112
|
xfail_strict = true
|
113
113
|
filterwarnings = [
|
114
|
-
|
114
|
+
"error",
|
115
|
+
"ignore:In version 2025.1.0 .*, this will be an error:FutureWarning",
|
115
116
|
]
|
117
|
+
|
116
118
|
log_cli_level = "INFO"
|
117
119
|
testpaths = [
|
118
120
|
"src",
|
119
121
|
"tests",
|
120
122
|
"docs",
|
121
123
|
]
|
124
|
+
norecursedirs = [
|
125
|
+
"tests/helpers"
|
126
|
+
]
|
122
127
|
|
123
128
|
[tool.coverage]
|
124
129
|
run.source = ["atlas_schema"]
|
@@ -1,8 +1,13 @@
|
|
1
|
-
# file generated by
|
1
|
+
# file generated by setuptools-scm
|
2
2
|
# don't change, don't track in version control
|
3
|
+
|
4
|
+
__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
|
5
|
+
|
3
6
|
TYPE_CHECKING = False
|
4
7
|
if TYPE_CHECKING:
|
5
|
-
from typing import Tuple
|
8
|
+
from typing import Tuple
|
9
|
+
from typing import Union
|
10
|
+
|
6
11
|
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
7
12
|
else:
|
8
13
|
VERSION_TUPLE = object
|
@@ -12,5 +17,5 @@ __version__: str
|
|
12
17
|
__version_tuple__: VERSION_TUPLE
|
13
18
|
version_tuple: VERSION_TUPLE
|
14
19
|
|
15
|
-
__version__ = version = '0.
|
16
|
-
__version_tuple__ = version_tuple = (0,
|
20
|
+
__version__ = version = '0.3.0'
|
21
|
+
__version_tuple__ = version_tuple = (0, 3, 0)
|
@@ -8,7 +8,6 @@ from operator import ior
|
|
8
8
|
import awkward
|
9
9
|
import particle
|
10
10
|
from coffea.nanoevents.methods import base, candidate, vector
|
11
|
-
from dask_awkward import dask_method
|
12
11
|
|
13
12
|
from atlas_schema.enums import PhotonID
|
14
13
|
from atlas_schema.typing_compat import Behavior
|
@@ -63,22 +62,9 @@ class Particle(vector.PtEtaPhiMLorentzVector):
|
|
63
62
|
- '{obj}_select'
|
64
63
|
"""
|
65
64
|
|
66
|
-
@property
|
67
|
-
def mass(self):
|
68
|
-
r"""Invariant mass (+, -, -, -)
|
69
|
-
|
70
|
-
:math:`\sqrt{t^2-x^2-y^2-z^2}`
|
71
|
-
"""
|
72
|
-
return self["mass"] / 1.0e3
|
73
|
-
|
74
|
-
@dask_method
|
75
65
|
def passes(self, name):
|
76
66
|
return self[f"select_{name}"] == 1
|
77
67
|
|
78
|
-
@passes.dask
|
79
|
-
def passes(self, dask_array, name):
|
80
|
-
return dask_array[f"select_{name}"] == 1
|
81
|
-
|
82
68
|
# NB: fields with the name 'pt' take precedence over this
|
83
69
|
# @dask_property
|
84
70
|
# def pt(self):
|
@@ -166,8 +152,8 @@ behavior.update(awkward._util.copy_behaviors("Particle", "Electron", behavior))
|
|
166
152
|
class Electron(Particle, base.NanoCollection, base.Systematic):
|
167
153
|
@property
|
168
154
|
def mass(self):
|
169
|
-
"""Electron mass in
|
170
|
-
return particle.literals.e_minus.mass
|
155
|
+
"""Electron mass in MeV"""
|
156
|
+
return awkward.ones_like(self.pt) * particle.literals.e_minus.mass
|
171
157
|
|
172
158
|
|
173
159
|
_set_repr_name("Electron")
|
@@ -184,8 +170,8 @@ behavior.update(awkward._util.copy_behaviors("Particle", "Muon", behavior))
|
|
184
170
|
class Muon(Particle, base.NanoCollection, base.Systematic):
|
185
171
|
@property
|
186
172
|
def mass(self):
|
187
|
-
"""Muon mass in
|
188
|
-
return particle.literals.mu_minus.mass
|
173
|
+
"""Muon mass in MeV"""
|
174
|
+
return awkward.ones_like(self.pt) * particle.literals.mu_minus.mass
|
189
175
|
|
190
176
|
|
191
177
|
_set_repr_name("Muon")
|
@@ -202,8 +188,8 @@ behavior.update(awkward._util.copy_behaviors("Particle", "Tau", behavior))
|
|
202
188
|
class Tau(Particle, base.NanoCollection, base.Systematic):
|
203
189
|
@property
|
204
190
|
def mass(self):
|
205
|
-
"""Tau mass in
|
206
|
-
return particle.literals.tau_minus.mass
|
191
|
+
"""Tau mass in MeV"""
|
192
|
+
return awkward.ones_like(self.pt) * particle.literals.tau_minus.mass
|
207
193
|
|
208
194
|
|
209
195
|
_set_repr_name("Tau")
|
@@ -218,7 +204,14 @@ behavior.update(awkward._util.copy_behaviors("Particle", "Jet", behavior))
|
|
218
204
|
|
219
205
|
|
220
206
|
@awkward.mixin_class(behavior)
|
221
|
-
class Jet(Particle, base.NanoCollection, base.Systematic):
|
207
|
+
class Jet(Particle, base.NanoCollection, base.Systematic):
|
208
|
+
@property
|
209
|
+
def mass(self):
|
210
|
+
r"""Invariant mass (+, -, -, -)
|
211
|
+
|
212
|
+
:math:`\sqrt{t^2-x^2-y^2-z^2}`
|
213
|
+
"""
|
214
|
+
return self["m"]
|
222
215
|
|
223
216
|
|
224
217
|
_set_repr_name("Jet")
|
@@ -230,12 +223,24 @@ JetArray.MomentumClass = vector.LorentzVectorArray # noqa: F821
|
|
230
223
|
|
231
224
|
__all__ = [
|
232
225
|
"Electron",
|
226
|
+
"ElectronArray", # noqa: F822
|
227
|
+
"ElectronRecord", # noqa: F822
|
233
228
|
"Jet",
|
229
|
+
"JetArray", # noqa: F822
|
230
|
+
"JetRecord", # noqa: F822
|
234
231
|
"MissingET",
|
232
|
+
"MissingETArray", # noqa: F822
|
233
|
+
"MissingETRecord", # noqa: F822
|
235
234
|
"Muon",
|
235
|
+
"MuonArray", # noqa: F822
|
236
|
+
"MuonRecord", # noqa: F822
|
236
237
|
"NtupleEvents",
|
237
238
|
"Particle",
|
239
|
+
"ParticleArray", # noqa: F822
|
240
|
+
"ParticleRecord", # noqa: F822
|
238
241
|
"Pass",
|
239
242
|
"Photon",
|
243
|
+
"PhotonArray", # noqa: F822
|
244
|
+
"PhotonRecord", # noqa: F822
|
240
245
|
"Weight",
|
241
246
|
]
|
@@ -0,0 +1,427 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import difflib
|
4
|
+
import warnings
|
5
|
+
from collections.abc import KeysView, ValuesView
|
6
|
+
from typing import Any, ClassVar
|
7
|
+
|
8
|
+
from coffea.nanoevents.schemas.base import BaseSchema, zip_forms
|
9
|
+
|
10
|
+
from atlas_schema.methods import behavior as roaster
|
11
|
+
from atlas_schema.typing_compat import Behavior, Self
|
12
|
+
|
13
|
+
|
14
|
+
class NtupleSchema(BaseSchema): # type: ignore[misc]
|
15
|
+
"""The schema for building ATLAS ntuples following the typical centralized formats.
|
16
|
+
|
17
|
+
This schema is built from all branches found in a tree in the supplied
|
18
|
+
file, based on the naming pattern of the branches. This naming pattern is
|
19
|
+
typically assumed to be
|
20
|
+
|
21
|
+
.. code-block:: bash
|
22
|
+
|
23
|
+
{collection:str}_{subcollection:str}_{systematic:str}
|
24
|
+
|
25
|
+
where:
|
26
|
+
* ``collection`` is assumed to be a prefix with typical characters, following the regex ``[a-zA-Z][a-zA-Z0-9]*``; that is starting with a case-insensitive letter, and proceeded by zero or more alphanumeric characters,
|
27
|
+
* ``subcollection`` is assumed to be anything with typical characters (allowing for underscores) following the regex ``[a-zA-Z_][a-zA-Z0-9_]*``; that is starting with a case-insensitive letter or underscore, and proceeded by zero or more alphanumeric characters including underscores, and
|
28
|
+
* ``systematic`` is assumed to be either ``NOSYS`` to indicate a branch with potential systematic variariations, or anything with typical characters (allowing for underscores) following the same regular expression as the ``subcollection``.
|
29
|
+
|
30
|
+
Here, a collection refers to the top-level entry to access an item - a collection called ``el`` will be accessible under the ``el`` attributes via ``events['el']`` or ``events.el``. A subcollection called ``pt`` will be accessible under that collection, such as ``events['el']['pt']`` or ``events.el.pt``. This is the power of the schema providing a more user-friendly (and programmatic) access to the underlying branches.
|
31
|
+
|
32
|
+
The above logic means that the following branches below will be categorized as follows:
|
33
|
+
|
34
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
35
|
+
| branch | collection | subcollection | systematic |
|
36
|
+
+===============================+===================+=======================+==================+
|
37
|
+
| ``'eventNumber'`` | ``'eventNumber'`` | ``None`` | ``None`` |
|
38
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
39
|
+
| ``'runNumber'`` | ``'runNumber'`` | ``None`` | ``None`` |
|
40
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
41
|
+
| ``'el_pt_NOSYS'`` | ``'el'`` | ``'pt'`` | ``'NOSYS'`` |
|
42
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
43
|
+
| ``'jet_cleanTightBad_NOSYS'`` | ``'jet'`` | ``'cleanTightBad'`` | ``'NOSYS'`` |
|
44
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
45
|
+
| ``'jet_select_btag_NOSYS'`` | ``'jet'`` | ``'select_btag'`` | ``'NOSYS'`` |
|
46
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
47
|
+
| ``'jet_e_NOSYS'`` | ``'jet'`` | ``'e'`` | ``'NOSYS'`` |
|
48
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
49
|
+
| ``'truthel_phi'`` | ``'truthel'`` | ``'phi'`` | ``None`` |
|
50
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
51
|
+
| ``'truthel_pt'`` | ``'truthel'`` | ``'pt'`` | ``None`` |
|
52
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
53
|
+
| ``'ph_eta'`` | ``'ph'`` | ``'eta'`` | ``None`` |
|
54
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
55
|
+
| ``'ph_phi_SCALE__1up'`` | ``'ph'`` | ``'phi'`` | ``'SCALE__1up'`` |
|
56
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
57
|
+
| ``'mu_TTVA_effSF_NOSYS'`` | ``'mu'`` | ``'TTVA_effSF'`` | ``'NOSYS'`` |
|
58
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
59
|
+
| ``'recojet_antikt4PFlow_pt'`` | ``'recojet'`` | ``'antikt4PFlow_pt'`` | ``'NOSYS'`` |
|
60
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
61
|
+
| ``'recojet_antikt10UFO_m'`` | ``'recojet'`` | ``'antikt10UFO_m'`` | ``None`` |
|
62
|
+
+-------------------------------+-------------------+-----------------------+------------------+
|
63
|
+
|
64
|
+
Sometimes this logic is not what you want, and there are ways to teach ``NtupleSchema`` how to group some of these better for atypical cases. We can address these case-by-case.
|
65
|
+
|
66
|
+
**Singletons**
|
67
|
+
|
68
|
+
Sometimes you have particular branches that you don't want to be treated as a collection (with subcollections). And sometimes you will see warnings about this (see :ref:`faq`). There are some pre-defined ``singletons`` stored under :attr:`event_ids`, and these will be lazily treated as a _singleton_. For other cases where you add your own branches, you can additionally extend this class to add your own :attr:`singletons`:
|
69
|
+
|
70
|
+
.. code-block:: python
|
71
|
+
|
72
|
+
from atlas_schema.schema import NtupleSchema
|
73
|
+
|
74
|
+
|
75
|
+
class MySchema(NtupleSchema):
|
76
|
+
singletons = {"RandomRunNumber"}
|
77
|
+
|
78
|
+
and use this schema in your analysis code. The rest of the logic will be handled for you, and you can access your singletons under ``events.RandomRunNumber`` as expected.
|
79
|
+
|
80
|
+
**Mixins (collections, subcollections)**
|
81
|
+
|
82
|
+
In more complicated scenarios, you might need to teach :class:`NtupleSchema` how to handle collections that end up having underscores in their name, or other characters that make the grouping non-trivial. In some other scenarios, you want to tell the schema to assign a certain set of behaviors to a collection - rather than the default :class:`atlas_schema.methods.Particle` behavior. This is where :attr:`mixins` comes in. Similar to how :attr:`singletons` are handled, you extend this schema to include your own ``mixins`` pointing them at one of the behaviors defined in :mod:`atlas_schema.methods`.
|
83
|
+
|
84
|
+
Let's demonstrate both cases. Imagine you want to have your ``truthel`` collections above treated as :class:`atlas_schema.methods.Electron`, then you would extend the existing :attr:`mixins`:
|
85
|
+
|
86
|
+
.. code-block:: python
|
87
|
+
|
88
|
+
from atlas_schema.schema import NtupleSchema
|
89
|
+
|
90
|
+
|
91
|
+
class MySchema(NtupleSchema):
|
92
|
+
mixins = {"truthel": "Electron", **NtupleSchema.mixins}
|
93
|
+
|
94
|
+
Now, ``events.truthel`` will give you arrays zipped up with :class:`atlas_schema.methods.Electron` behaviors.
|
95
|
+
|
96
|
+
If instead, you run into problems with mixing different branches in the same collection, because the default behavior of this schema described above is not smart enough to handle the atypical cases, you can explicitly fix this by defining your collections:
|
97
|
+
|
98
|
+
.. code-block:: python
|
99
|
+
|
100
|
+
from atlas_schema.schema import NtupleSchema
|
101
|
+
|
102
|
+
|
103
|
+
class MySchema(NtupleSchema):
|
104
|
+
mixins = {
|
105
|
+
"recojet_antikt4PFlow": "Jet",
|
106
|
+
"recojet_antikt10UFO": "Jet",
|
107
|
+
**NtupleSchema.mixins,
|
108
|
+
}
|
109
|
+
|
110
|
+
Now, ``events.recojet_antikt4PFlow`` and ``events.recojet_antikt10UFO`` will be separate collections, instead of a single ``events.recojet`` that incorrectly merged branches from each of these collections.
|
111
|
+
"""
|
112
|
+
|
113
|
+
__dask_capable__: ClassVar[bool] = True
|
114
|
+
|
115
|
+
warn_missing_crossrefs: ClassVar[bool] = True
|
116
|
+
|
117
|
+
#: Treat missing event-level branches as error instead of warning (default is ``False``)
|
118
|
+
error_missing_event_ids: ClassVar[bool] = False
|
119
|
+
#: Determine closest behavior for a given branch or treat branch as :attr:`default_behavior` (default is ``True``)
|
120
|
+
identify_closest_behavior: ClassVar[bool] = True
|
121
|
+
|
122
|
+
#: event IDs to expect in data datasets
|
123
|
+
event_ids_data: ClassVar[set[str]] = {
|
124
|
+
"lumiBlock",
|
125
|
+
"averageInteractionsPerCrossing",
|
126
|
+
"actualInteractionsPerCrossing",
|
127
|
+
"dataTakingYear",
|
128
|
+
}
|
129
|
+
#: event IDs to expect in MC datasets
|
130
|
+
event_ids_mc: ClassVar[set[str]] = {
|
131
|
+
"mcChannelNumber",
|
132
|
+
"runNumber",
|
133
|
+
"eventNumber",
|
134
|
+
"mcEventWeights",
|
135
|
+
}
|
136
|
+
#: all event IDs to expect in the dataset
|
137
|
+
event_ids: ClassVar[set[str]] = {*event_ids_data, *event_ids_mc}
|
138
|
+
|
139
|
+
#: mixins defining the mapping from collection name to behavior to use for that collection
|
140
|
+
mixins: ClassVar[dict[str, str]] = {
|
141
|
+
"el": "Electron",
|
142
|
+
"jet": "Jet",
|
143
|
+
"met": "MissingET",
|
144
|
+
"mu": "Muon",
|
145
|
+
"pass": "Pass",
|
146
|
+
"ph": "Photon",
|
147
|
+
"trigPassed": "Trigger",
|
148
|
+
"weight": "Weight",
|
149
|
+
}
|
150
|
+
|
151
|
+
#: additional branches to pass-through with no zipping or additional interpretation (such as those stored as length-1 vectors)
|
152
|
+
singletons: ClassVar[set[str]] = set()
|
153
|
+
|
154
|
+
#: docstrings to assign for specific subcollections across the various collections identified by this schema
|
155
|
+
docstrings: ClassVar[dict[str, str]] = {
|
156
|
+
"charge": "charge",
|
157
|
+
"eta": "pseudorapidity",
|
158
|
+
"met": "missing transverse energy [MeV]",
|
159
|
+
"mass": "invariant mass [MeV]",
|
160
|
+
"pt": "transverse momentum [MeV]",
|
161
|
+
"phi": "azimuthal angle",
|
162
|
+
}
|
163
|
+
|
164
|
+
#: default behavior to use for any collection (default ``"NanoCollection"``, from :class:`coffea.nanoevents.methods.base.NanoCollection`)
|
165
|
+
default_behavior: ClassVar[str] = "NanoCollection"
|
166
|
+
|
167
|
+
def __init__(self, base_form: dict[str, Any], version: str = "latest"):
|
168
|
+
super().__init__(base_form)
|
169
|
+
self._version = version
|
170
|
+
if version == "latest":
|
171
|
+
pass
|
172
|
+
else:
|
173
|
+
pass
|
174
|
+
self._form["fields"], self._form["contents"] = self._build_collections(
|
175
|
+
self._form["fields"], self._form["contents"]
|
176
|
+
)
|
177
|
+
self._form["parameters"]["metadata"]["version"] = self._version
|
178
|
+
|
179
|
+
@classmethod
|
180
|
+
def v1(cls, base_form: dict[str, Any]) -> Self:
|
181
|
+
"""Build the NtupleEvents
|
182
|
+
|
183
|
+
For example, one can use ``NanoEventsFactory.from_root("file.root", schemaclass=NtupleSchema.v1)``
|
184
|
+
to ensure NanoAODv7 compatibility.
|
185
|
+
"""
|
186
|
+
return cls(base_form, version="1")
|
187
|
+
|
188
|
+
def _build_collections(
|
189
|
+
self, field_names: list[str], input_contents: list[Any]
|
190
|
+
) -> tuple[KeysView[str], ValuesView[dict[str, Any]]]:
|
191
|
+
branch_forms = dict(zip(field_names, input_contents))
|
192
|
+
|
193
|
+
# parse into high-level records (collections, list collections, and singletons)
|
194
|
+
collections = {
|
195
|
+
k.split("_")[0] for k in branch_forms if k not in self.singletons
|
196
|
+
}
|
197
|
+
collections -= self.event_ids
|
198
|
+
collections -= set(self.singletons)
|
199
|
+
|
200
|
+
# now handle any collections that we identified that are substrings of the items in the mixins
|
201
|
+
# convert all valid branch_forms into strings to make the lookups a bit faster
|
202
|
+
bf_str = ",".join(branch_forms.keys())
|
203
|
+
for mixin in self.mixins:
|
204
|
+
if mixin in collections:
|
205
|
+
continue
|
206
|
+
if f",{mixin}_" not in bf_str and not bf_str.startswith(f"{mixin}_"):
|
207
|
+
continue
|
208
|
+
if "_" in mixin:
|
209
|
+
warnings.warn(
|
210
|
+
f"I identified a mixin that I did not automatically identify as a collection because it contained an underscore: '{mixin}'. I will add this to the known collections. To suppress this warning next time, please create your ntuples with collections without underscores. [mixin-underscore]",
|
211
|
+
RuntimeWarning,
|
212
|
+
stacklevel=2,
|
213
|
+
)
|
214
|
+
collections.add(mixin)
|
215
|
+
for collection in list(collections):
|
216
|
+
if mixin.startswith(f"{collection}_"):
|
217
|
+
warnings.warn(
|
218
|
+
f"I found a misidentified collection: '{collection}'. I will remove this from the known collections. To suppress this warning next time, please create your ntuples with collections that are not similarly named with underscores. [collection-subset]",
|
219
|
+
RuntimeWarning,
|
220
|
+
stacklevel=2,
|
221
|
+
)
|
222
|
+
collections.remove(collection)
|
223
|
+
break
|
224
|
+
|
225
|
+
# rename needed because easyjet breaks the AMG assumptions
|
226
|
+
# https://gitlab.cern.ch/easyjet/easyjet/-/issues/246
|
227
|
+
for k in list(branch_forms):
|
228
|
+
if "NOSYS" not in k:
|
229
|
+
continue
|
230
|
+
branch_forms[k.replace("_NOSYS", "") + "_NOSYS"] = branch_forms.pop(k)
|
231
|
+
|
232
|
+
# these are collections with systematic variations
|
233
|
+
try:
|
234
|
+
subcollections = {
|
235
|
+
k.split("__")[0].split("_", 1)[1].replace("_NOSYS", "")
|
236
|
+
for k in branch_forms
|
237
|
+
if "NOSYS" in k and k not in self.singletons
|
238
|
+
}
|
239
|
+
except IndexError as exc:
|
240
|
+
msg = "One of the branches does not follow the assumed pattern for this schema. [invalid-branch-name]"
|
241
|
+
raise RuntimeError(msg) from exc
|
242
|
+
|
243
|
+
# Check the presence of the event_ids
|
244
|
+
missing_event_ids = [
|
245
|
+
event_id for event_id in self.event_ids if event_id not in branch_forms
|
246
|
+
]
|
247
|
+
|
248
|
+
missing_singletons = [
|
249
|
+
singleton for singleton in self.singletons if singleton not in branch_forms
|
250
|
+
]
|
251
|
+
|
252
|
+
if len(missing_event_ids) > 0:
|
253
|
+
if self.error_missing_event_ids:
|
254
|
+
msg = f"There are missing event ID fields: {missing_event_ids} \n\n\
|
255
|
+
The event ID fields {self.event_ids} are necessary to perform sub-run identification \
|
256
|
+
(e.g. for corrections and sub-dividing data during different detector conditions),\
|
257
|
+
to cross-validate MC and Data (i.e. matching events for comparison), and to generate event displays. \
|
258
|
+
It's advised to never drop these branches from the dataformat.\n\n\
|
259
|
+
This error can be demoted to a warning by setting the class level variable error_missing_event_ids to False."
|
260
|
+
raise RuntimeError(msg)
|
261
|
+
warnings.warn(
|
262
|
+
f"Missing event_ids : {missing_event_ids}",
|
263
|
+
RuntimeWarning,
|
264
|
+
stacklevel=2,
|
265
|
+
)
|
266
|
+
|
267
|
+
if len(missing_singletons) > 0:
|
268
|
+
# These singletons are simply branches we do not parse or handle
|
269
|
+
# explicitly in atlas-schema (e.g. they are copied directly to the
|
270
|
+
# output structure we provide you), however there can be false
|
271
|
+
# positives when you submit multiple files with different branch
|
272
|
+
# structures and this warning could be safely ignored.
|
273
|
+
warnings.warn(
|
274
|
+
f"Missing singletons : {missing_singletons}. [singleton-missing]",
|
275
|
+
RuntimeWarning,
|
276
|
+
stacklevel=2,
|
277
|
+
)
|
278
|
+
|
279
|
+
output = {}
|
280
|
+
|
281
|
+
# first, register singletons (event-level, others)
|
282
|
+
for name in {*self.event_ids, *self.singletons}:
|
283
|
+
if name in [*missing_event_ids, *missing_singletons]:
|
284
|
+
continue
|
285
|
+
|
286
|
+
output[name] = branch_forms[name]
|
287
|
+
|
288
|
+
# next, go through and start grouping up collections
|
289
|
+
for name in collections:
|
290
|
+
content = {}
|
291
|
+
used = set()
|
292
|
+
|
293
|
+
for subname in subcollections:
|
294
|
+
prefix = f"{name}_{subname}_"
|
295
|
+
used.update({k for k in branch_forms if k.startswith(prefix)})
|
296
|
+
subcontent = {
|
297
|
+
k[len(prefix) :]: branch_forms[k]
|
298
|
+
for k in branch_forms
|
299
|
+
if k.startswith(prefix)
|
300
|
+
}
|
301
|
+
if subcontent:
|
302
|
+
# create the nominal version
|
303
|
+
content[subname] = branch_forms[f"{prefix}NOSYS"]
|
304
|
+
# create a collection of the systematic variations for the given variable
|
305
|
+
content[f"{subname}_syst"] = zip_forms(
|
306
|
+
subcontent, f"{name}_syst", record_name="NanoCollection"
|
307
|
+
)
|
308
|
+
|
309
|
+
content.update(
|
310
|
+
{
|
311
|
+
k[len(name) + 1 :]: branch_forms[k]
|
312
|
+
for k in branch_forms
|
313
|
+
if k.startswith(name + "_") and k not in used
|
314
|
+
}
|
315
|
+
)
|
316
|
+
|
317
|
+
if not used and not content:
|
318
|
+
warnings.warn(
|
319
|
+
f"I identified a branch that likely does not have any leaves: '{name}'. I will treat this as a 'singleton'. To suppress this warning next time, please define your singletons explicitly. [singleton-undefined]",
|
320
|
+
RuntimeWarning,
|
321
|
+
stacklevel=2,
|
322
|
+
)
|
323
|
+
self.singletons.add(name)
|
324
|
+
output[name] = branch_forms[name]
|
325
|
+
|
326
|
+
else:
|
327
|
+
behavior = self.mixins.get(name, "")
|
328
|
+
if not behavior:
|
329
|
+
behavior = self.suggested_behavior(name)
|
330
|
+
warnings.warn(
|
331
|
+
f"I found a collection with no defined mixin: '{name}'. I will assume behavior: '{behavior}'. To suppress this warning next time, please define mixins for your custom collections. [mixin-undefined]",
|
332
|
+
RuntimeWarning,
|
333
|
+
stacklevel=2,
|
334
|
+
)
|
335
|
+
|
336
|
+
output[name] = zip_forms(content, name, record_name=behavior)
|
337
|
+
|
338
|
+
output[name].setdefault("parameters", {})
|
339
|
+
output[name]["parameters"].update({"collection_name": name})
|
340
|
+
|
341
|
+
if output[name]["class"] == "ListOffsetArray":
|
342
|
+
if output[name]["class"] == "RecordArray":
|
343
|
+
parameters = output[name]["content"]["fields"]
|
344
|
+
contents = output[name]["content"]["contents"]
|
345
|
+
else:
|
346
|
+
# these are also singletons of another kind that we just pass through
|
347
|
+
continue
|
348
|
+
elif output[name]["class"] == "RecordArray":
|
349
|
+
parameters = output[name]["fields"]
|
350
|
+
contents = output[name]["contents"]
|
351
|
+
elif output[name]["class"] == "NumpyArray":
|
352
|
+
# these are singletons that we just pass through
|
353
|
+
continue
|
354
|
+
else:
|
355
|
+
msg = f"Unhandled class {output[name]['class']}"
|
356
|
+
raise RuntimeError(msg)
|
357
|
+
|
358
|
+
# update docstrings as needed
|
359
|
+
# NB: must be before flattening for easier logic
|
360
|
+
for index, parameter in enumerate(parameters):
|
361
|
+
if "parameters" not in contents[index]:
|
362
|
+
continue
|
363
|
+
|
364
|
+
parsed_name = parameter.replace("_NOSYS", "")
|
365
|
+
contents[index]["parameters"]["__doc__"] = self.docstrings.get(
|
366
|
+
parsed_name,
|
367
|
+
contents[index]["parameters"].get(
|
368
|
+
"__doc__", "no docstring available"
|
369
|
+
),
|
370
|
+
)
|
371
|
+
|
372
|
+
return output.keys(), output.values()
|
373
|
+
|
374
|
+
@classmethod
|
375
|
+
def behavior(cls) -> Behavior:
|
376
|
+
"""Behaviors necessary to implement this schema
|
377
|
+
|
378
|
+
Returns:
|
379
|
+
dict[str | tuple['*', str], type[awkward.Record]]: an :data:`awkward.behavior` dictionary
|
380
|
+
"""
|
381
|
+
return roaster
|
382
|
+
|
383
|
+
@classmethod
|
384
|
+
def suggested_behavior(cls, key: str, cutoff: float = 0.4) -> str:
|
385
|
+
"""
|
386
|
+
Suggest e behavior to use for a provided collection or branch name.
|
387
|
+
|
388
|
+
Default behavior: :class:`~coffea.nanoevents.methods.base.NanoCollection`.
|
389
|
+
|
390
|
+
Note:
|
391
|
+
If :attr:`identify_closest_behavior` is ``False``, then this function will return the default behavior ``NanoCollection``.
|
392
|
+
|
393
|
+
Warning:
|
394
|
+
If no behavior is found above the *cutoff* score, then this function will return the default behavior.
|
395
|
+
|
396
|
+
Args:
|
397
|
+
key (str): collection name to suggest a matching behavior for
|
398
|
+
cutoff (float): o ptional argument cutoff (default ``0.4``) is a float in the range ``[0, 1]``. Possibilities that don't score at least that similar to *key* are ignored.
|
399
|
+
|
400
|
+
Returns:
|
401
|
+
str: suggested behavior to use by string
|
402
|
+
|
403
|
+
Example:
|
404
|
+
>>> from atlas_schema.schema import NtupleSchema
|
405
|
+
>>> NtupleSchema.suggested_behavior("truthjet")
|
406
|
+
'Jet'
|
407
|
+
>>> NtupleSchema.suggested_behavior("SignalElectron")
|
408
|
+
'Electron'
|
409
|
+
>>> NtupleSchema.suggested_behavior("generatorWeight")
|
410
|
+
'Weight'
|
411
|
+
>>> NtupleSchema.suggested_behavior("aVeryStrangelyNamedBranchWithNoMatch")
|
412
|
+
'NanoCollection'
|
413
|
+
"""
|
414
|
+
if cls.identify_closest_behavior:
|
415
|
+
# lowercase everything to do case-insensitive matching
|
416
|
+
behaviors = [b for b in cls.behavior() if isinstance(b, str)]
|
417
|
+
behaviors_l = [b.lower() for b in behaviors]
|
418
|
+
results = difflib.get_close_matches(
|
419
|
+
key.lower(), behaviors_l, n=1, cutoff=cutoff
|
420
|
+
)
|
421
|
+
if not results:
|
422
|
+
return cls.default_behavior
|
423
|
+
|
424
|
+
behavior = results[0]
|
425
|
+
# need to identify the index and return the unlowered version
|
426
|
+
return behaviors[behaviors_l.index(behavior)]
|
427
|
+
return cls.default_behavior
|
@@ -5,7 +5,7 @@ Typing helpers.
|
|
5
5
|
from __future__ import annotations
|
6
6
|
|
7
7
|
import sys
|
8
|
-
from typing import Annotated
|
8
|
+
from typing import Annotated, Literal, Union
|
9
9
|
|
10
10
|
import awkward
|
11
11
|
|
@@ -19,6 +19,6 @@ if sys.version_info >= (3, 11):
|
|
19
19
|
else:
|
20
20
|
from typing_extensions import Self
|
21
21
|
|
22
|
-
Behavior: TypeAlias = dict[str, type[awkward.Record]]
|
22
|
+
Behavior: TypeAlias = dict[Union[str, tuple[Literal["*"]], str], type[awkward.Record]]
|
23
23
|
|
24
24
|
__all__ = ("Annotated", "Behavior", "Self")
|
@@ -1,16 +1,15 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from enum import Enum
|
4
|
-
from typing import TypeVar,
|
4
|
+
from typing import TypeVar, cast
|
5
5
|
|
6
6
|
import awkward as ak
|
7
|
-
import dask_awkward as dak
|
8
7
|
|
9
|
-
Array = TypeVar("Array", bound=
|
8
|
+
Array = TypeVar("Array", bound=ak.Array)
|
10
9
|
_E = TypeVar("_E", bound=Enum)
|
11
10
|
|
12
11
|
|
13
|
-
def isin(element: Array, test_elements:
|
12
|
+
def isin(element: Array, test_elements: ak.Array, axis: int = -1) -> Array:
|
14
13
|
"""
|
15
14
|
Find test_elements in element. Similar in API as :func:`numpy.isin`.
|
16
15
|
|
@@ -21,12 +20,12 @@ def isin(element: Array, test_elements: dak.Array | ak.Array, axis: int = -1) ->
|
|
21
20
|
comparison.
|
22
21
|
|
23
22
|
Args:
|
24
|
-
element (
|
25
|
-
test_elements (
|
23
|
+
element (ak.Array): input array of values.
|
24
|
+
test_elements (ak.Array): one-dimensional set of values against which to test each value of *element*.
|
26
25
|
axis (int): the axis along which the comparison is performed
|
27
26
|
|
28
27
|
Returns:
|
29
|
-
|
28
|
+
ak.Array: result of comparison for test_elements in *element*
|
30
29
|
|
31
30
|
Example:
|
32
31
|
>>> import awkward as ak
|
@@ -1,214 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
import warnings
|
4
|
-
from collections.abc import KeysView, ValuesView
|
5
|
-
from typing import Any, ClassVar
|
6
|
-
|
7
|
-
from coffea.nanoevents.schemas.base import BaseSchema, zip_forms
|
8
|
-
|
9
|
-
from atlas_schema.typing_compat import Behavior, Self
|
10
|
-
|
11
|
-
|
12
|
-
class NtupleSchema(BaseSchema): # type: ignore[misc]
|
13
|
-
"""Ntuple schema builder
|
14
|
-
|
15
|
-
The Ntuple schema is built from all branches found in the supplied file, based on
|
16
|
-
the naming pattern of the branches. The following additional arrays are constructed:
|
17
|
-
|
18
|
-
- n/a
|
19
|
-
"""
|
20
|
-
|
21
|
-
__dask_capable__ = True
|
22
|
-
|
23
|
-
warn_missing_crossrefs = True
|
24
|
-
error_missing_event_ids = False
|
25
|
-
|
26
|
-
event_ids_data: ClassVar[set[str]] = {
|
27
|
-
"lumiBlock",
|
28
|
-
"averageInteractionsPerCrossing",
|
29
|
-
"actualInteractionsPerCrossing",
|
30
|
-
"dataTakingYear",
|
31
|
-
}
|
32
|
-
event_ids_mc: ClassVar[set[str]] = {
|
33
|
-
"mcChannelNumber",
|
34
|
-
"runNumber",
|
35
|
-
"eventNumber",
|
36
|
-
"mcEventWeights",
|
37
|
-
}
|
38
|
-
event_ids: ClassVar[set[str]] = {*event_ids_data, *event_ids_mc}
|
39
|
-
|
40
|
-
mixins: ClassVar[dict[str, str]] = {
|
41
|
-
"el": "Electron",
|
42
|
-
"jet": "Jet",
|
43
|
-
"met": "MissingET",
|
44
|
-
"mu": "Muon",
|
45
|
-
"pass": "Pass",
|
46
|
-
"ph": "Photon",
|
47
|
-
"trigPassed": "Trigger",
|
48
|
-
"weight": "Weight",
|
49
|
-
}
|
50
|
-
|
51
|
-
# These are stored as length-1 vectors unnecessarily
|
52
|
-
singletons: ClassVar[set[str]] = set()
|
53
|
-
|
54
|
-
docstrings: ClassVar[dict[str, str]] = {
|
55
|
-
"charge": "charge",
|
56
|
-
"eta": "pseudorapidity",
|
57
|
-
"met": "missing transverse energy [MeV]",
|
58
|
-
"mass": "invariant mass [MeV]",
|
59
|
-
"pt": "transverse momentum [MeV]",
|
60
|
-
"phi": "azimuthal angle",
|
61
|
-
}
|
62
|
-
|
63
|
-
def __init__(self, base_form: dict[str, Any], version: str = "latest"):
|
64
|
-
super().__init__(base_form)
|
65
|
-
self._version = version
|
66
|
-
if version == "latest":
|
67
|
-
pass
|
68
|
-
else:
|
69
|
-
pass
|
70
|
-
self._form["fields"], self._form["contents"] = self._build_collections(
|
71
|
-
self._form["fields"], self._form["contents"]
|
72
|
-
)
|
73
|
-
self._form["parameters"]["metadata"]["version"] = self._version
|
74
|
-
|
75
|
-
@classmethod
|
76
|
-
def v1(cls, base_form: dict[str, Any]) -> Self:
|
77
|
-
"""Build the NtupleEvents
|
78
|
-
|
79
|
-
For example, one can use ``NanoEventsFactory.from_root("file.root", schemaclass=NtupleSchema.v1)``
|
80
|
-
to ensure NanoAODv7 compatibility.
|
81
|
-
"""
|
82
|
-
return cls(base_form, version="1")
|
83
|
-
|
84
|
-
def _build_collections(
|
85
|
-
self, field_names: list[str], input_contents: list[Any]
|
86
|
-
) -> tuple[KeysView[str], ValuesView[dict[str, Any]]]:
|
87
|
-
branch_forms = dict(zip(field_names, input_contents))
|
88
|
-
|
89
|
-
# parse into high-level records (collections, list collections, and singletons)
|
90
|
-
collections = {k.split("_")[0] for k in branch_forms}
|
91
|
-
collections -= self.event_ids
|
92
|
-
collections -= set(self.singletons)
|
93
|
-
|
94
|
-
# rename needed because easyjet breaks the AMG assumptions
|
95
|
-
# https://gitlab.cern.ch/easyjet/easyjet/-/issues/246
|
96
|
-
for k in list(branch_forms):
|
97
|
-
if "NOSYS" not in k:
|
98
|
-
continue
|
99
|
-
branch_forms[k.replace("_NOSYS", "") + "_NOSYS"] = branch_forms.pop(k)
|
100
|
-
|
101
|
-
# these are collections with systematic variations
|
102
|
-
subcollections = {
|
103
|
-
k.split("__")[0].split("_", 1)[1].replace("_NOSYS", "")
|
104
|
-
for k in branch_forms
|
105
|
-
if "NOSYS" in k
|
106
|
-
}
|
107
|
-
|
108
|
-
# Check the presence of the event_ids
|
109
|
-
missing_event_ids = [
|
110
|
-
event_id for event_id in self.event_ids if event_id not in branch_forms
|
111
|
-
]
|
112
|
-
|
113
|
-
if len(missing_event_ids) > 0:
|
114
|
-
if self.error_missing_event_ids:
|
115
|
-
msg = f"There are missing event ID fields: {missing_event_ids} \n\n\
|
116
|
-
The event ID fields {self.event_ids} are necessary to perform sub-run identification \
|
117
|
-
(e.g. for corrections and sub-dividing data during different detector conditions),\
|
118
|
-
to cross-validate MC and Data (i.e. matching events for comparison), and to generate event displays. \
|
119
|
-
It's advised to never drop these branches from the dataformat.\n\n\
|
120
|
-
This error can be demoted to a warning by setting the class level variable error_missing_event_ids to False."
|
121
|
-
raise RuntimeError(msg)
|
122
|
-
warnings.warn(
|
123
|
-
f"Missing event_ids : {missing_event_ids}",
|
124
|
-
RuntimeWarning,
|
125
|
-
stacklevel=2,
|
126
|
-
)
|
127
|
-
|
128
|
-
output = {}
|
129
|
-
|
130
|
-
# first, register singletons (event-level, others)
|
131
|
-
for name in {*self.event_ids, *self.singletons}:
|
132
|
-
if name in missing_event_ids:
|
133
|
-
continue
|
134
|
-
output[name] = branch_forms[name]
|
135
|
-
|
136
|
-
# next, go through and start grouping up collections
|
137
|
-
for name in collections:
|
138
|
-
mixin = self.mixins.get(name, "NanoCollection")
|
139
|
-
content = {}
|
140
|
-
used = set()
|
141
|
-
|
142
|
-
for subname in subcollections:
|
143
|
-
prefix = f"{name}_{subname}_"
|
144
|
-
used.update({k for k in branch_forms if k.startswith(prefix)})
|
145
|
-
subcontent = {
|
146
|
-
k[len(prefix) :]: branch_forms[k]
|
147
|
-
for k in branch_forms
|
148
|
-
if k.startswith(prefix)
|
149
|
-
}
|
150
|
-
if subcontent:
|
151
|
-
# create the nominal version
|
152
|
-
content[subname] = branch_forms[f"{prefix}NOSYS"]
|
153
|
-
# create a collection of the systematic variations for the given variable
|
154
|
-
content[f"{subname}_syst"] = zip_forms(
|
155
|
-
subcontent, f"{name}_syst", record_name="NanoCollection"
|
156
|
-
)
|
157
|
-
|
158
|
-
content.update(
|
159
|
-
{
|
160
|
-
k[len(name) + 1 :]: branch_forms[k]
|
161
|
-
for k in branch_forms
|
162
|
-
if k.startswith(name + "_") and k not in used
|
163
|
-
}
|
164
|
-
)
|
165
|
-
|
166
|
-
if not used and not content:
|
167
|
-
warnings.warn(
|
168
|
-
f"I identified a branch that likely does not have any leaves: '{name}'. I will treat this as a 'singleton'. To suppress this warning next time, please define your singletons explicitly.",
|
169
|
-
RuntimeWarning,
|
170
|
-
stacklevel=2,
|
171
|
-
)
|
172
|
-
self.singletons.add(name)
|
173
|
-
output[name] = branch_forms[name]
|
174
|
-
|
175
|
-
else:
|
176
|
-
output[name] = zip_forms(content, name, record_name=mixin)
|
177
|
-
|
178
|
-
output[name].setdefault("parameters", {})
|
179
|
-
output[name]["parameters"].update({"collection_name": name})
|
180
|
-
|
181
|
-
if output[name]["class"] == "ListOffsetArray":
|
182
|
-
parameters = output[name]["content"]["fields"]
|
183
|
-
contents = output[name]["content"]["contents"]
|
184
|
-
elif output[name]["class"] == "RecordArray":
|
185
|
-
parameters = output[name]["fields"]
|
186
|
-
contents = output[name]["contents"]
|
187
|
-
elif output[name]["class"] == "NumpyArray":
|
188
|
-
# these are singletons that we just pass through
|
189
|
-
continue
|
190
|
-
else:
|
191
|
-
msg = f"Unhandled class {output[name]['class']}"
|
192
|
-
raise RuntimeError(msg)
|
193
|
-
# update docstrings as needed
|
194
|
-
# NB: must be before flattening for easier logic
|
195
|
-
for index, parameter in enumerate(parameters):
|
196
|
-
if "parameters" not in contents[index]:
|
197
|
-
continue
|
198
|
-
|
199
|
-
parsed_name = parameter.replace("_NOSYS", "")
|
200
|
-
contents[index]["parameters"]["__doc__"] = self.docstrings.get(
|
201
|
-
parsed_name,
|
202
|
-
contents[index]["parameters"].get(
|
203
|
-
"__doc__", "no docstring available"
|
204
|
-
),
|
205
|
-
)
|
206
|
-
|
207
|
-
return output.keys(), output.values()
|
208
|
-
|
209
|
-
@classmethod
|
210
|
-
def behavior(cls) -> Behavior:
|
211
|
-
"""Behaviors necessary to implement this schema"""
|
212
|
-
from atlas_schema.methods import behavior as roaster
|
213
|
-
|
214
|
-
return roaster
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|