triggerflow 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trigger_loader/cluster_manager.py +3 -3
- trigger_loader/loader.py +55 -3
- triggerflow/core.py +107 -73
- triggerflow/interfaces/__init__.py +0 -0
- triggerflow/interfaces/uGT.py +127 -0
- triggerflow/templates/build_ugt.tcl +46 -0
- triggerflow/templates/data_types.h +524 -0
- triggerflow/templates/model-gt.cpp +104 -0
- {triggerflow-0.2.1.dist-info → triggerflow-0.2.3.dist-info}/METADATA +76 -7
- {triggerflow-0.2.1.dist-info → triggerflow-0.2.3.dist-info}/RECORD +13 -8
- {triggerflow-0.2.1.dist-info → triggerflow-0.2.3.dist-info}/WHEEL +0 -0
- {triggerflow-0.2.1.dist-info → triggerflow-0.2.3.dist-info}/entry_points.txt +0 -0
- {triggerflow-0.2.1.dist-info → triggerflow-0.2.3.dist-info}/top_level.txt +0 -0
|
@@ -4,9 +4,6 @@ import logging
|
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
6
|
from dask.distributed import Client, LocalCluster
|
|
7
|
-
from dask_cuda import LocalCUDACluster
|
|
8
|
-
from dask_jobqueue import HTCondorCluster
|
|
9
|
-
from dask_kubernetes import KubeCluster
|
|
10
7
|
|
|
11
8
|
logger = logging.getLogger(__name__)
|
|
12
9
|
|
|
@@ -63,15 +60,18 @@ class ClusterManager:
|
|
|
63
60
|
self.cluster = LocalCluster(**self.cluster_config)
|
|
64
61
|
|
|
65
62
|
elif ct == "condor":
|
|
63
|
+
from dask_jobqueue import HTCondorCluster
|
|
66
64
|
self.cluster = HTCondorCluster(**self.cluster_config)
|
|
67
65
|
if self.jobs and self.jobs > 0:
|
|
68
66
|
# Scale to the requested number of jobs
|
|
69
67
|
self.cluster.scale(jobs=self.jobs)
|
|
70
68
|
|
|
71
69
|
elif ct == "cuda":
|
|
70
|
+
from dask_cuda import LocalCUDACluster
|
|
72
71
|
self.cluster = LocalCUDACluster(**self.cluster_config)
|
|
73
72
|
|
|
74
73
|
elif ct == "kubernetes":
|
|
74
|
+
from dask_kubernetes import KubeCluster
|
|
75
75
|
self.cluster = KubeCluster(**self.cluster_config)
|
|
76
76
|
if self.jobs and self.jobs > 0:
|
|
77
77
|
try:
|
trigger_loader/loader.py
CHANGED
|
@@ -45,8 +45,58 @@ class TriggerLoader:
|
|
|
45
45
|
)
|
|
46
46
|
|
|
47
47
|
def _load_sample_json(self, sample_json: str) -> dict:
|
|
48
|
+
"""
|
|
49
|
+
Loads the JSON and resolves file paths using the priority:
|
|
50
|
+
1. Explicit 'files' list or directory path (Local/Explicit)
|
|
51
|
+
2. 'DAS' query (Remote Fallback)
|
|
52
|
+
|
|
53
|
+
Returns the canonical coffea fileset format: {dataset_name: [file_path_list]}.
|
|
54
|
+
"""
|
|
55
|
+
import glob
|
|
56
|
+
import os
|
|
57
|
+
|
|
58
|
+
# Helper function definition needed here if it's not imported:
|
|
59
|
+
# def _fetch_files_from_das(das_query: str) -> list[str]: ... (placeholder or actual implementation)
|
|
60
|
+
|
|
48
61
|
with open(sample_json) as f:
|
|
49
|
-
|
|
62
|
+
full_data = json.load(f)
|
|
63
|
+
dataset_metadata = full_data.get("samples", full_data)
|
|
64
|
+
|
|
65
|
+
fileset = {}
|
|
66
|
+
for ds_name, ds_info in dataset_metadata.items():
|
|
67
|
+
files = []
|
|
68
|
+
|
|
69
|
+
if "files" in ds_info:
|
|
70
|
+
file_info = ds_info["files"]
|
|
71
|
+
|
|
72
|
+
if isinstance(file_info, list):
|
|
73
|
+
files = file_info
|
|
74
|
+
|
|
75
|
+
elif isinstance(file_info, str):
|
|
76
|
+
if os.path.isdir(file_info):
|
|
77
|
+
path_glob = os.path.join(file_info, "*.root")
|
|
78
|
+
files = glob.glob(path_glob)
|
|
79
|
+
logger.info(f"Resolved {len(files)} files from directory {file_info}.")
|
|
80
|
+
else:
|
|
81
|
+
files = [file_info]
|
|
82
|
+
|
|
83
|
+
if files:
|
|
84
|
+
logger.info(f"Using {len(files)} local/explicit files for {ds_name}.")
|
|
85
|
+
|
|
86
|
+
if not files and "DAS" in ds_info:
|
|
87
|
+
try:
|
|
88
|
+
files = _fetch_files_from_das(ds_info["DAS"])
|
|
89
|
+
logger.info(f"Resolved {len(files)} files via DAS for {ds_name}.")
|
|
90
|
+
except NameError:
|
|
91
|
+
logger.error("DAS fetching skipped: _fetch_files_from_das is not defined.")
|
|
92
|
+
|
|
93
|
+
if not files:
|
|
94
|
+
logger.warning(f"No files found for dataset: {ds_name}. Skipping.")
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
fileset[ds_name] = files
|
|
98
|
+
|
|
99
|
+
return fileset
|
|
50
100
|
|
|
51
101
|
def _write_run_metadata_file(self, path: str, duration_s: float | None = None):
|
|
52
102
|
meta_path = f"{path}/run_metadata.json"
|
|
@@ -58,9 +108,11 @@ class TriggerLoader:
|
|
|
58
108
|
json.dump(data, f, indent=2)
|
|
59
109
|
|
|
60
110
|
def _run(self, runner: processor.Runner, label: str):
|
|
61
|
-
logger.log(f"Starting processing ({label})...")
|
|
111
|
+
logger.log(logging.INFO, f"Starting processing ({label})...")
|
|
62
112
|
start = time.time()
|
|
63
113
|
proc = self._build_processor()
|
|
114
|
+
print(self.fileset)
|
|
115
|
+
|
|
64
116
|
acc = runner(
|
|
65
117
|
self.fileset,
|
|
66
118
|
treename="Events",
|
|
@@ -68,7 +120,7 @@ class TriggerLoader:
|
|
|
68
120
|
)
|
|
69
121
|
elapsed = time.time() - start
|
|
70
122
|
self._write_run_metadata_file(self.output_path, elapsed)
|
|
71
|
-
logger.log(f"Finished in {elapsed:.2f}s (run_uuid={self.run_uuid})")
|
|
123
|
+
logger.log(logging.INFO, f"Finished in {elapsed:.2f}s (run_uuid={self.run_uuid})")
|
|
72
124
|
return acc
|
|
73
125
|
|
|
74
126
|
def run_distributed(self, cluster_type: str, cluster_config: dict,
|
triggerflow/core.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
2
|
import json
|
|
3
|
+
import yaml
|
|
3
4
|
import numpy as np
|
|
4
5
|
import tarfile
|
|
5
6
|
import importlib
|
|
@@ -8,6 +9,7 @@ from typing import Optional, Dict, Any, Union
|
|
|
8
9
|
import shutil, warnings
|
|
9
10
|
import importlib.resources as pkg_resources
|
|
10
11
|
import triggerflow.templates
|
|
12
|
+
from triggerflow.interfaces.uGT import build_ugt_model
|
|
11
13
|
|
|
12
14
|
|
|
13
15
|
class ModelConverter(ABC):
|
|
@@ -73,37 +75,31 @@ class NoOpConverter(ModelConverter):
|
|
|
73
75
|
|
|
74
76
|
|
|
75
77
|
class HLS4MLStrategy(CompilerStrategy):
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
def compile(self, model, workspace: Path, config: Optional[Dict] = None, **kwargs) -> Any:
|
|
78
|
+
def compile(self, model, workspace: Path, config: Optional[Dict] = None) -> Any:
|
|
79
79
|
import hls4ml
|
|
80
|
-
|
|
80
|
+
|
|
81
81
|
firmware_dir = workspace / "firmware"
|
|
82
82
|
firmware_dir.mkdir(exist_ok=True)
|
|
83
|
-
|
|
84
|
-
cfg = config or hls4ml.utils.config_from_keras_model(model, granularity="name")
|
|
85
83
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
hls_kwargs.update(kwargs)
|
|
84
|
+
hls_config = hls4ml.utils.config_from_keras_model(model, granularity="name")
|
|
85
|
+
hls_kwargs = {}
|
|
86
|
+
|
|
87
|
+
for key in ["project_name", "namespace", "io_type", "backend", "write_weights_txt"]:
|
|
88
|
+
if key in config:
|
|
89
|
+
hls_kwargs[key] = config[key]
|
|
93
90
|
|
|
94
91
|
firmware_model = hls4ml.converters.convert_from_keras_model(
|
|
95
92
|
model,
|
|
93
|
+
hls_config=hls_config,
|
|
94
|
+
output_dir=str(firmware_dir),
|
|
96
95
|
**hls_kwargs
|
|
97
96
|
)
|
|
98
97
|
|
|
99
98
|
firmware_model.compile()
|
|
100
|
-
if shutil.which("vivado") is not None:
|
|
101
|
-
firmware_model.build()
|
|
102
|
-
else:
|
|
103
|
-
warnings.warn("Vivado not found in PATH. Firmware build failed.", UserWarning)
|
|
104
99
|
firmware_model.save(workspace / "firmware_model.fml")
|
|
105
100
|
return firmware_model
|
|
106
|
-
|
|
101
|
+
|
|
102
|
+
|
|
107
103
|
def load_compiled_model(self, workspace: Path) -> Any:
|
|
108
104
|
from hls4ml.converters import link_existing_project
|
|
109
105
|
|
|
@@ -113,46 +109,31 @@ class HLS4MLStrategy(CompilerStrategy):
|
|
|
113
109
|
|
|
114
110
|
|
|
115
111
|
class ConiferStrategy(CompilerStrategy):
|
|
116
|
-
"""Conifer compilation strategy for XGBoost models"""
|
|
112
|
+
"""Conifer compilation strategy for XGBoost models, unified config/workspace handling."""
|
|
117
113
|
|
|
118
|
-
def compile(self, model, workspace: Path, config: Optional[Dict] = None
|
|
114
|
+
def compile(self, model, workspace: Path, config: Optional[Dict] = None) -> Any:
|
|
119
115
|
import conifer
|
|
120
|
-
import shutil
|
|
121
|
-
import warnings
|
|
122
116
|
import os
|
|
123
117
|
|
|
124
118
|
firmware_dir = workspace / "firmware"
|
|
125
119
|
firmware_dir.mkdir(exist_ok=True)
|
|
126
|
-
os.environ['JSON_ROOT'] = '/eos/user/m/maglowac/TriggerModel/json'
|
|
127
|
-
os.environ['XILINX_AP_INCLUDE'] = '/eos/user/m/maglowac/TriggerModel/HLS_arbitrary_Precision_Types/include'
|
|
128
120
|
|
|
129
|
-
|
|
130
|
-
cfg = conifer.backends.xilinxhls.auto_config()#config or conifer.backends.cpp.auto_config()
|
|
121
|
+
cfg = conifer.backends.xilinxhls.auto_config()
|
|
131
122
|
cfg['OutputDir'] = str(firmware_dir)
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
123
|
+
cfg['ProjectName'] = config['project_name']
|
|
124
|
+
cfg['XilinxPart'] = config['fpga_part']
|
|
125
|
+
cfg['ClockPeriod'] = config['clock_period']
|
|
135
126
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
config=cfg
|
|
140
|
-
)
|
|
127
|
+
if config:
|
|
128
|
+
for key, value in config.items():
|
|
129
|
+
cfg[key] = value
|
|
141
130
|
|
|
142
|
-
firmware_model.
|
|
143
|
-
proj_name = cfg.get('ProjectName', 'my_prj')
|
|
144
|
-
bridge_file = firmware_dir / "bridge.cpp"
|
|
145
|
-
text = bridge_file.read_text()
|
|
146
|
-
text = text.replace("my_prj.h", f"{proj_name}.h")
|
|
147
|
-
bridge_file.write_text(text)
|
|
131
|
+
firmware_model = conifer.converters.convert_from_xgboost(model, config=cfg)
|
|
148
132
|
firmware_model.compile()
|
|
149
|
-
if shutil.which("vivado") is not None:
|
|
150
|
-
firmware_model.build()
|
|
151
|
-
else:
|
|
152
|
-
warnings.warn("Vivado not found in PATH. Firmware build failed.", UserWarning)
|
|
153
|
-
|
|
154
133
|
firmware_model.save(firmware_dir / "firmware_model.fml")
|
|
134
|
+
|
|
155
135
|
return firmware_model
|
|
136
|
+
|
|
156
137
|
|
|
157
138
|
def load_compiled_model(self, workspace: Path) -> Any:
|
|
158
139
|
from conifer import load_model
|
|
@@ -384,33 +365,45 @@ class ModelSerializer:
|
|
|
384
365
|
return model, input_name
|
|
385
366
|
return None, None
|
|
386
367
|
|
|
368
|
+
|
|
387
369
|
class TriggerModel:
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
self.name = name
|
|
397
|
-
self.ml_backend = ml_backend.lower()
|
|
398
|
-
self.scales = scales
|
|
399
|
-
self.unscaled_type = unscaled_type
|
|
400
|
-
self.n_outputs = n_outputs
|
|
401
|
-
self.compiler = compiler.lower()
|
|
370
|
+
def __init__(self, config: Union[str, Path, Dict], native_model, scales):
|
|
371
|
+
if isinstance(config, (str, Path)):
|
|
372
|
+
with open(config, "r") as f:
|
|
373
|
+
config = yaml.safe_load(f)
|
|
374
|
+
elif not isinstance(config, dict):
|
|
375
|
+
raise TypeError("config must be a dict or path to a YAML file")
|
|
376
|
+
|
|
402
377
|
self.native_model = native_model
|
|
403
|
-
self.
|
|
404
|
-
|
|
378
|
+
self.scales = scales
|
|
379
|
+
|
|
380
|
+
self.compiler_cfg = config.get("compiler", {})
|
|
381
|
+
self.subsystem_cfg = config.get("subsystem", {})
|
|
382
|
+
|
|
383
|
+
self.name = self.compiler_cfg.get("name", "model")
|
|
384
|
+
self.ml_backend = self.compiler_cfg.get("ml_backend", "").lower()
|
|
385
|
+
self.compiler = self.compiler_cfg.get("compiler", "").lower()
|
|
386
|
+
|
|
387
|
+
self.n_outputs = self.compiler_cfg.get("n_outputs")
|
|
388
|
+
self.unscaled_type = self.subsystem_cfg.get("unscaled_type", "ap_fixed<16,6>")
|
|
389
|
+
|
|
390
|
+
if self.ml_backend not in ("keras", "xgboost"):
|
|
391
|
+
raise ValueError("Unsupported backend")
|
|
392
|
+
|
|
405
393
|
self.workspace_manager = WorkspaceManager()
|
|
406
|
-
self.converter = ConverterFactory.create_converter(ml_backend, compiler)
|
|
407
|
-
self.compiler_strategy = CompilerFactory.create_compiler(ml_backend, compiler)
|
|
408
|
-
|
|
394
|
+
self.converter = ConverterFactory.create_converter(self.ml_backend, self.compiler)
|
|
395
|
+
self.compiler_strategy = CompilerFactory.create_compiler(self.ml_backend, self.compiler)
|
|
396
|
+
|
|
409
397
|
self.firmware_model = None
|
|
410
398
|
self.model_qonnx = None
|
|
411
399
|
self.input_name = None
|
|
412
400
|
|
|
413
|
-
|
|
401
|
+
|
|
402
|
+
self.workspace_manager.setup_workspace(
|
|
403
|
+
self.name,
|
|
404
|
+
self.ml_backend,
|
|
405
|
+
self.compiler
|
|
406
|
+
)
|
|
414
407
|
|
|
415
408
|
@property
|
|
416
409
|
def workspace(self) -> Path:
|
|
@@ -427,8 +420,8 @@ class TriggerModel:
|
|
|
427
420
|
"""Get metadata dictionary"""
|
|
428
421
|
return self.workspace_manager.metadata
|
|
429
422
|
|
|
430
|
-
def __call__(self
|
|
431
|
-
"""Execute
|
|
423
|
+
def __call__(self):
|
|
424
|
+
"""Execute full model conversion and compilation pipeline using YAML config"""
|
|
432
425
|
self.parse_dataset_object()
|
|
433
426
|
|
|
434
427
|
# Save native model
|
|
@@ -445,22 +438,57 @@ class TriggerModel:
|
|
|
445
438
|
self.input_name = self.model_qonnx.graph.input[0].name
|
|
446
439
|
self.workspace_manager.add_artifact("qonnx", qonnx_path)
|
|
447
440
|
self.workspace_manager.add_version({"qonnx": str(qonnx_path)})
|
|
441
|
+
|
|
448
442
|
|
|
449
443
|
# Compile model
|
|
450
444
|
self.firmware_model = self.compiler_strategy.compile(
|
|
451
445
|
self.native_model,
|
|
452
446
|
self.workspace_manager.workspace,
|
|
453
|
-
self.
|
|
454
|
-
**
|
|
447
|
+
self.compiler_cfg,
|
|
448
|
+
**self.compiler_cfg.get("kwargs", {})
|
|
455
449
|
)
|
|
456
450
|
|
|
457
451
|
self.workspace_manager.add_artifact("firmware", self.workspace_manager.workspace / "firmware")
|
|
452
|
+
if self.compiler != "conifer" and self.scales is not None:
|
|
453
|
+
self.build_emulator(
|
|
454
|
+
self.scales['shifts'],
|
|
455
|
+
self.scales['offsets'],
|
|
456
|
+
self.n_outputs,
|
|
457
|
+
self.unscaled_type
|
|
458
|
+
)
|
|
458
459
|
|
|
459
|
-
if self.compiler is not "conifer" and self.scales is not None:
|
|
460
|
-
self.build_emulator(self.scales['shifts'], self.scales['offsets'], self.n_outputs, self.unscaled_type)
|
|
461
460
|
|
|
461
|
+
if shutil.which("vivado") is not None:
|
|
462
|
+
build_ugt_model(
|
|
463
|
+
templates_dir=self.subsystem_cfg.get("templates_dir", Path("templates")),
|
|
464
|
+
firmware_dir=self.workspace_manager.workspace / "firmware",
|
|
465
|
+
compiler = self.compiler,
|
|
466
|
+
model_name=self.name,
|
|
467
|
+
n_inputs=self.subsystem_cfg["n_inputs"],
|
|
468
|
+
n_outputs=self.subsystem_cfg.get("n_outputs", self.n_outputs),
|
|
469
|
+
nn_offsets=self.scales["offsets"],
|
|
470
|
+
nn_shifts=self.scales["shifts"],
|
|
471
|
+
muon_size=self.subsystem_cfg.get("muon_size", 0),
|
|
472
|
+
jet_size=self.subsystem_cfg.get("jet_size", 0),
|
|
473
|
+
egamma_size=self.subsystem_cfg.get("egamma_size", 0),
|
|
474
|
+
tau_size=self.subsystem_cfg.get("tau_size", 0),
|
|
475
|
+
output_type=self.subsystem_cfg.get("output_type", "result_t"),
|
|
476
|
+
offset_type=self.subsystem_cfg.get("offset_type", "ap_fixed<10,10>"),
|
|
477
|
+
shift_type=self.subsystem_cfg.get("shift_type", "ap_fixed<10,10>"),
|
|
478
|
+
object_features=self.subsystem_cfg.get("object_features"),
|
|
479
|
+
global_features=self.subsystem_cfg.get("global_features")
|
|
480
|
+
)
|
|
481
|
+
else:
|
|
482
|
+
warnings.warn(
|
|
483
|
+
"Vivado executable not found on the system PATH. "
|
|
484
|
+
"Skipping FW build. ",
|
|
485
|
+
UserWarning
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
|
|
462
489
|
self.workspace_manager.add_artifact("firmware", self.workspace_manager.workspace / "firmware")
|
|
463
490
|
self.workspace_manager.save_metadata()
|
|
491
|
+
|
|
464
492
|
|
|
465
493
|
@staticmethod
|
|
466
494
|
def parse_dataset_object():
|
|
@@ -482,10 +510,16 @@ class TriggerModel:
|
|
|
482
510
|
predictor = SoftwarePredictor(self.native_model, self.ml_backend)
|
|
483
511
|
return predictor.predict(input_data)
|
|
484
512
|
|
|
485
|
-
def qonnx_predict(self, input_data: np.ndarray) -> np.ndarray:
|
|
513
|
+
def qonnx_predict(self, input_data: np.ndarray) -> np.ndarray | None:
|
|
486
514
|
"""Make predictions using QONNX model"""
|
|
515
|
+
|
|
487
516
|
if self.model_qonnx is None:
|
|
488
|
-
|
|
517
|
+
warnings.warn(
|
|
518
|
+
"QONNX model is not available. Prediction skipped.",
|
|
519
|
+
UserWarning
|
|
520
|
+
)
|
|
521
|
+
return None
|
|
522
|
+
|
|
489
523
|
predictor = QONNXPredictor(self.model_qonnx, self.input_name)
|
|
490
524
|
return predictor.predict(input_data)
|
|
491
525
|
|
|
File without changes
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import shutil
|
|
3
|
+
import pkg_resources
|
|
4
|
+
from jinja2 import Template
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
def _render_template(template_file: str, output_file: Path, context: dict):
|
|
8
|
+
with open(template_file, "r") as f:
|
|
9
|
+
template_text = f.read()
|
|
10
|
+
|
|
11
|
+
template = Template(template_text)
|
|
12
|
+
rendered = template.render(**context)
|
|
13
|
+
|
|
14
|
+
with open(output_file, "w") as f:
|
|
15
|
+
f.write(rendered)
|
|
16
|
+
|
|
17
|
+
def build_ugt_model(
|
|
18
|
+
templates_dir: Path,
|
|
19
|
+
firmware_dir: Path,
|
|
20
|
+
compiler: str,
|
|
21
|
+
model_name: str,
|
|
22
|
+
n_inputs: int,
|
|
23
|
+
n_outputs: int,
|
|
24
|
+
nn_offsets: list,
|
|
25
|
+
nn_shifts: list,
|
|
26
|
+
muon_size: int,
|
|
27
|
+
jet_size: int,
|
|
28
|
+
egamma_size: int,
|
|
29
|
+
tau_size: int,
|
|
30
|
+
output_type: str = "result_t",
|
|
31
|
+
offset_type: str = "ap_fixed<10,10>",
|
|
32
|
+
shift_type: str = "ap_fixed<10,10>",
|
|
33
|
+
object_features: dict = None,
|
|
34
|
+
global_features: list = None
|
|
35
|
+
):
|
|
36
|
+
"""
|
|
37
|
+
Render uGT top func.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
if object_features is None:
|
|
42
|
+
object_features = {
|
|
43
|
+
"muons": ["pt", "eta_extrapolated", "phi_extrapolated"],
|
|
44
|
+
"jets": ["et", "eta", "phi"],
|
|
45
|
+
"egammas": ["et", "eta", "phi"],
|
|
46
|
+
"taus": ["et", "eta", "phi"]
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
if global_features is None:
|
|
50
|
+
global_features = [
|
|
51
|
+
"et.et",
|
|
52
|
+
"ht.et",
|
|
53
|
+
"etmiss.et", "etmiss.phi",
|
|
54
|
+
"htmiss.et", "htmiss.phi",
|
|
55
|
+
"ethfmiss.et", "ethfmiss.phi",
|
|
56
|
+
"hthfmiss.et", "hthfmiss.phi"
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
header_path = firmware_dir / "firmware" / f"{model_name}_project.h"
|
|
60
|
+
if compiler.lower() == "conifer":
|
|
61
|
+
output_layer = "score"
|
|
62
|
+
output_type = "score_arr_t"
|
|
63
|
+
header_path = firmware_dir / "firmware" / f"{model_name}_project.h"
|
|
64
|
+
removal_pattern = re.compile(
|
|
65
|
+
r',\s*score_t\s+tree_scores\[BDT::fn_classes\(n_classes\)\s*\*\s*n_trees\]',
|
|
66
|
+
re.DOTALL
|
|
67
|
+
)
|
|
68
|
+
modified_content = removal_pattern.sub('', header_path.read_text(encoding='utf-8'))
|
|
69
|
+
header_path.write_text(modified_content, encoding='utf-8')
|
|
70
|
+
out = output_layer
|
|
71
|
+
else:
|
|
72
|
+
header_content = header_path.read_text(encoding='utf-8')
|
|
73
|
+
layer_pattern = re.compile(
|
|
74
|
+
r'result_t\s+(\w+)\[\d+\]\s*\)',
|
|
75
|
+
re.DOTALL
|
|
76
|
+
)
|
|
77
|
+
match = layer_pattern.search(header_content)
|
|
78
|
+
layer_name = match.group(1)
|
|
79
|
+
output_layer = f"{layer_name}[{n_outputs}]"
|
|
80
|
+
out = layer_name
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
context = {
|
|
84
|
+
"MODEL_NAME": model_name,
|
|
85
|
+
"N_INPUTS": n_inputs,
|
|
86
|
+
"N_OUTPUTS": n_outputs,
|
|
87
|
+
"NN_OFFSETS": ", ".join(map(str, nn_offsets)),
|
|
88
|
+
"NN_SHIFTS": ", ".join(map(str, nn_shifts)),
|
|
89
|
+
"MUON_SIZE": muon_size,
|
|
90
|
+
"JET_SIZE": jet_size,
|
|
91
|
+
"EGAMMA_SIZE": egamma_size,
|
|
92
|
+
"TAU_SIZE": tau_size,
|
|
93
|
+
"OUTPUT_TYPE": output_type,
|
|
94
|
+
"OUTPUT_LAYER": output_layer,
|
|
95
|
+
"OUT": out,
|
|
96
|
+
"OFFSET_TYPE": offset_type,
|
|
97
|
+
"SHIFT_TYPE": shift_type,
|
|
98
|
+
"MUON_FEATURES": object_features["muons"],
|
|
99
|
+
"JET_FEATURES": object_features["jets"],
|
|
100
|
+
"EGAMMA_FEATURES": object_features["egammas"],
|
|
101
|
+
"TAU_FEATURES": object_features["taus"],
|
|
102
|
+
"GLOBAL_FEATURES": global_features
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
context_tcl = {
|
|
106
|
+
"MODEL_NAME": model_name,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
out_path = firmware_dir / "firmware/model-gt.cpp"
|
|
110
|
+
|
|
111
|
+
_render_template(f"{templates_dir}/model-gt.cpp", out_path, context)
|
|
112
|
+
|
|
113
|
+
out_path = firmware_dir / "firmware/build_ugt.tcl"
|
|
114
|
+
_render_template(f"{templates_dir}/build_ugt.tcl", out_path, context_tcl)
|
|
115
|
+
|
|
116
|
+
shutil.copy(f"{templates_dir}/data_types.h", firmware_dir / "firmware")
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
subprocess.run(
|
|
120
|
+
["vitis_hls", "-f", "build_ugt.tcl"],
|
|
121
|
+
cwd=firmware_dir/"firmware",
|
|
122
|
+
check=True
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
file mkdir prj_{{ MODEL_NAME }}
|
|
2
|
+
|
|
3
|
+
open_project -reset prj_{{ MODEL_NAME }}
|
|
4
|
+
|
|
5
|
+
set_top {{ MODEL_NAME }}_GT
|
|
6
|
+
|
|
7
|
+
set core_files "model-gt.cpp {{ MODEL_NAME }}_project.cpp"
|
|
8
|
+
|
|
9
|
+
if { [file exists "BDT.cpp"] } {
|
|
10
|
+
set all_files "$core_files BDT.cpp"
|
|
11
|
+
} else {
|
|
12
|
+
set all_files "$core_files"
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
add_files $all_files -cflags "-std=c++11 -I../"
|
|
16
|
+
|
|
17
|
+
open_solution -reset solution1
|
|
18
|
+
set_part {xc7vx690t-ffg1927-2}
|
|
19
|
+
|
|
20
|
+
create_clock -period 25
|
|
21
|
+
set_clock_uncertainty 0
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
config_array_partition -complete_threshold 2
|
|
25
|
+
|
|
26
|
+
csynth_design
|
|
27
|
+
|
|
28
|
+
file mkdir firmware
|
|
29
|
+
file mkdir firmware/hdl
|
|
30
|
+
file mkdir firmware/hdl/payload
|
|
31
|
+
file mkdir firmware/hdl/payload/gtl
|
|
32
|
+
file mkdir firmware/hdl/payload/gtl/model
|
|
33
|
+
file mkdir firmware/cfg
|
|
34
|
+
|
|
35
|
+
set f [open firmware/cfg/model.dep "w"]
|
|
36
|
+
|
|
37
|
+
if {[file exists prj_{{ MODEL_NAME }}/solution1/syn/vhdl]} {
|
|
38
|
+
foreach filepath [glob -nocomplain prj_{{ MODEL_NAME }}/solution1/syn/vhdl/*] {
|
|
39
|
+
set filename [file tail $filepath]
|
|
40
|
+
file copy -force $filepath firmware/hdl/payload/gtl/model/$filename
|
|
41
|
+
puts $f "src payload/gtl/model/$filename"
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
close $f
|
|
46
|
+
exit
|
|
@@ -0,0 +1,524 @@
|
|
|
1
|
+
#ifndef L1GT_DATATYPES
|
|
2
|
+
#define L1GT_DATATYPES
|
|
3
|
+
|
|
4
|
+
#include "ap_fixed.h"
|
|
5
|
+
#include <cmath>
|
|
6
|
+
|
|
7
|
+
// Author: sioni@cern.ch August 2022
|
|
8
|
+
// Object data types defined in https://github.com/cms-l1-globaltrigger/mp7_ugt_legacy/blob/master/firmware/hdl/packages/gtl_pkg.vhd
|
|
9
|
+
// Code mimics Phase 2 L1Trigger Particle Flow data types HLS:
|
|
10
|
+
// https://github.com/cms-sw/cmssw/tree/master/DataFormats/L1TParticleFlow/interface
|
|
11
|
+
|
|
12
|
+
typedef ap_fixed<9,2> cossin_t;
|
|
13
|
+
typedef ap_fixed<13,7> sinh_t;
|
|
14
|
+
|
|
15
|
+
static constexpr int N_TABLE = 2048;
|
|
16
|
+
|
|
17
|
+
/* ---
|
|
18
|
+
* Constants useful for converting physical units to hardware integers
|
|
19
|
+
* --- */
|
|
20
|
+
namespace Scales{
|
|
21
|
+
static const double MUON_PHI_LSB = 2 * M_PI / 576;
|
|
22
|
+
static const double CALO_PHI_LSB = 2 * M_PI / 144;
|
|
23
|
+
|
|
24
|
+
static const double MUON_ETA_LSB = 0.0870 / 8;
|
|
25
|
+
static const double CALO_ETA_LSB = 0.0870 / 2;
|
|
26
|
+
|
|
27
|
+
static const double MUON_PT_LSB = 0.5;
|
|
28
|
+
static const double CALO_PT_LSB = 0.5;
|
|
29
|
+
|
|
30
|
+
const int INTPHI_PI = 720;
|
|
31
|
+
|
|
32
|
+
static const int MUON_HALF_PI = 144;
|
|
33
|
+
static const int CALO_HALF_PI = 36;
|
|
34
|
+
}; // namespace Scales
|
|
35
|
+
|
|
36
|
+
/* ---
|
|
37
|
+
* Functions for packing and unpacking ap_ objects
|
|
38
|
+
* --- */
|
|
39
|
+
template <typename U, typename T>
|
|
40
|
+
inline void pack_into_bits(U& u, unsigned int& start, const T& data) {
|
|
41
|
+
const unsigned int w = T::width;
|
|
42
|
+
u(start + w - 1, start) = data(w - 1, 0);
|
|
43
|
+
start += w;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
template <typename U, typename T>
|
|
47
|
+
inline void unpack_from_bits(const U& u, unsigned int& start, T& data) {
|
|
48
|
+
const unsigned int w = T::width;
|
|
49
|
+
data(w - 1, 0) = u(start + w - 1, start);
|
|
50
|
+
start += w;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/* ---
|
|
54
|
+
* Definitions of the objects received by the GT
|
|
55
|
+
* --- */
|
|
56
|
+
/* ---
|
|
57
|
+
* Muon
|
|
58
|
+
* --- */
|
|
59
|
+
struct Muon{
|
|
60
|
+
ap_uint<10> phi_extrapolated;
|
|
61
|
+
ap_ufixed<9,8> pt;
|
|
62
|
+
ap_uint<4> quality;
|
|
63
|
+
ap_int<9> eta_extrapolated;
|
|
64
|
+
ap_uint<2> iso;
|
|
65
|
+
ap_uint<1> charge_sign;
|
|
66
|
+
ap_uint<1> charge_valid;
|
|
67
|
+
ap_uint<7> index_bits;
|
|
68
|
+
ap_uint<10> phi_out;
|
|
69
|
+
ap_uint<8> pt_unconstrained;
|
|
70
|
+
ap_uint<1> hadronic_shower_trigger;
|
|
71
|
+
ap_uint<2> impact_parameter;
|
|
72
|
+
|
|
73
|
+
static const int BITWIDTH = 64;
|
|
74
|
+
|
|
75
|
+
inline void clear(){
|
|
76
|
+
phi_extrapolated = 0;
|
|
77
|
+
pt = 0;
|
|
78
|
+
quality = 0;
|
|
79
|
+
eta_extrapolated = 0;
|
|
80
|
+
iso = 0;
|
|
81
|
+
charge_sign = 0;
|
|
82
|
+
charge_valid = 0;
|
|
83
|
+
index_bits = 0;
|
|
84
|
+
phi_out = 0;
|
|
85
|
+
pt_unconstrained = 0;
|
|
86
|
+
hadronic_shower_trigger = 0;
|
|
87
|
+
impact_parameter = 0;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
inline ap_uint<BITWIDTH> pack() const{
|
|
91
|
+
ap_uint<BITWIDTH> ret;
|
|
92
|
+
unsigned int start = 0;
|
|
93
|
+
pack_into_bits(ret, start, phi_extrapolated);
|
|
94
|
+
pack_into_bits(ret, start, pt);
|
|
95
|
+
pack_into_bits(ret, start, quality);
|
|
96
|
+
pack_into_bits(ret, start, eta_extrapolated);
|
|
97
|
+
pack_into_bits(ret, start, iso);
|
|
98
|
+
pack_into_bits(ret, start, charge_sign);
|
|
99
|
+
pack_into_bits(ret, start, charge_valid);
|
|
100
|
+
pack_into_bits(ret, start, index_bits);
|
|
101
|
+
pack_into_bits(ret, start, phi_out);
|
|
102
|
+
pack_into_bits(ret, start, pt_unconstrained);
|
|
103
|
+
pack_into_bits(ret, start, hadronic_shower_trigger);
|
|
104
|
+
pack_into_bits(ret, start, impact_parameter);
|
|
105
|
+
return ret;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
inline void initFromBits(const ap_uint<BITWIDTH> &src){
|
|
109
|
+
unsigned int start = 0;
|
|
110
|
+
unpack_from_bits(src, start, phi_extrapolated);
|
|
111
|
+
unpack_from_bits(src, start, pt);
|
|
112
|
+
unpack_from_bits(src, start, quality);
|
|
113
|
+
unpack_from_bits(src, start, eta_extrapolated);
|
|
114
|
+
unpack_from_bits(src, start, iso);
|
|
115
|
+
unpack_from_bits(src, start, charge_sign);
|
|
116
|
+
unpack_from_bits(src, start, charge_valid);
|
|
117
|
+
unpack_from_bits(src, start, index_bits);
|
|
118
|
+
unpack_from_bits(src, start, phi_out);
|
|
119
|
+
unpack_from_bits(src, start, pt_unconstrained);
|
|
120
|
+
unpack_from_bits(src, start, hadronic_shower_trigger);
|
|
121
|
+
unpack_from_bits(src, start, impact_parameter);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
inline static Muon unpack(const ap_uint<BITWIDTH> &src){
|
|
125
|
+
Muon ret;
|
|
126
|
+
ret.initFromBits(src);
|
|
127
|
+
return ret;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
inline static Muon initFromPhysicalDoubles(const double pt, const double eta, const double phi){
|
|
131
|
+
Muon ret;
|
|
132
|
+
ret.clear();
|
|
133
|
+
ret.pt = pt;
|
|
134
|
+
ret.eta_extrapolated = round(eta / Scales::MUON_ETA_LSB);
|
|
135
|
+
ret.phi_extrapolated = round(phi / Scales::MUON_PHI_LSB);
|
|
136
|
+
return ret;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
inline static Muon initFromHWInt(int pt, int eta, int phi){
|
|
140
|
+
Muon muon;
|
|
141
|
+
muon.clear();
|
|
142
|
+
muon.pt.V = pt;
|
|
143
|
+
muon.eta_extrapolated.V = eta;
|
|
144
|
+
muon.phi_extrapolated.V = phi;
|
|
145
|
+
return muon;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
}; // struct Muon
|
|
149
|
+
|
|
150
|
+
/* ---
|
|
151
|
+
* Jet
|
|
152
|
+
* --- */
|
|
153
|
+
struct Jet{
|
|
154
|
+
ap_ufixed<11,10> et;
|
|
155
|
+
ap_int<8> eta;
|
|
156
|
+
ap_uint<8> phi;
|
|
157
|
+
ap_uint<1> disp;
|
|
158
|
+
ap_uint<2> quality;
|
|
159
|
+
ap_uint<2> spare;
|
|
160
|
+
|
|
161
|
+
static const int BITWIDTH = 32;
|
|
162
|
+
|
|
163
|
+
inline void clear(){
|
|
164
|
+
et = 0;
|
|
165
|
+
eta = 0;
|
|
166
|
+
phi = 0;
|
|
167
|
+
disp = 0;
|
|
168
|
+
quality = 0;
|
|
169
|
+
spare = 0;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
inline ap_uint<BITWIDTH> pack() const{
|
|
173
|
+
ap_uint<BITWIDTH> ret;
|
|
174
|
+
unsigned int start = 0;
|
|
175
|
+
pack_into_bits(ret, start, et);
|
|
176
|
+
pack_into_bits(ret, start, eta);
|
|
177
|
+
pack_into_bits(ret, start, phi);
|
|
178
|
+
pack_into_bits(ret, start, disp);
|
|
179
|
+
pack_into_bits(ret, start, quality);
|
|
180
|
+
pack_into_bits(ret, start, spare);
|
|
181
|
+
return ret;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
inline void initFromBits(const ap_uint<BITWIDTH> &src){
|
|
185
|
+
unsigned int start = 0;
|
|
186
|
+
unpack_from_bits(src, start, et);
|
|
187
|
+
unpack_from_bits(src, start, eta);
|
|
188
|
+
unpack_from_bits(src, start, phi);
|
|
189
|
+
unpack_from_bits(src, start, disp);
|
|
190
|
+
unpack_from_bits(src, start, quality);
|
|
191
|
+
unpack_from_bits(src, start, spare);
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
inline static Jet unpack(const ap_uint<BITWIDTH> &src){
|
|
195
|
+
Jet ret;
|
|
196
|
+
ret.initFromBits(src);
|
|
197
|
+
return ret;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
inline static Jet initFromPhysicalDoubles(const double et, const double eta, const double phi){
|
|
201
|
+
Jet ret;
|
|
202
|
+
ret.clear();
|
|
203
|
+
ret.et = et;
|
|
204
|
+
ret.eta = round(eta / Scales::CALO_ETA_LSB);
|
|
205
|
+
ret.phi = round(phi / Scales::CALO_PHI_LSB);
|
|
206
|
+
return ret;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
inline static Jet initFromHWInt(const int et, const int eta, const int phi){
|
|
210
|
+
Jet ret;
|
|
211
|
+
ret.clear();
|
|
212
|
+
ret.et.V = et;
|
|
213
|
+
ret.eta.V = eta;
|
|
214
|
+
ret.phi.V = phi;
|
|
215
|
+
return ret;
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
}; // struct Jet
|
|
219
|
+
|
|
220
|
+
/* ---
|
|
221
|
+
* e / gamma or tau (same format)
|
|
222
|
+
* --- */
|
|
223
|
+
struct CaloCommon{
|
|
224
|
+
ap_ufixed<9,8> et;
|
|
225
|
+
ap_int<8> eta;
|
|
226
|
+
ap_uint<8> phi;
|
|
227
|
+
ap_uint<2> iso;
|
|
228
|
+
ap_uint<5> spare;
|
|
229
|
+
|
|
230
|
+
static const int BITWIDTH = 32;
|
|
231
|
+
|
|
232
|
+
inline void clear(){
|
|
233
|
+
et = 0;
|
|
234
|
+
eta = 0;
|
|
235
|
+
phi = 0;
|
|
236
|
+
iso = 0;
|
|
237
|
+
spare = 0;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
inline ap_uint<BITWIDTH> pack() const{
|
|
241
|
+
ap_uint<BITWIDTH> ret;
|
|
242
|
+
unsigned int start = 0;
|
|
243
|
+
pack_into_bits(ret, start, et);
|
|
244
|
+
pack_into_bits(ret, start, eta);
|
|
245
|
+
pack_into_bits(ret, start, phi);
|
|
246
|
+
pack_into_bits(ret, start, iso);
|
|
247
|
+
pack_into_bits(ret, start, spare);
|
|
248
|
+
return ret;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
inline void initFromBits(const ap_uint<BITWIDTH> &src){
|
|
252
|
+
unsigned int start = 0;
|
|
253
|
+
unpack_from_bits(src, start, et);
|
|
254
|
+
unpack_from_bits(src, start, eta);
|
|
255
|
+
unpack_from_bits(src, start, phi);
|
|
256
|
+
unpack_from_bits(src, start, iso);
|
|
257
|
+
unpack_from_bits(src, start, spare);
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
inline static CaloCommon unpack(const ap_uint<BITWIDTH> &src){
|
|
261
|
+
CaloCommon ret;
|
|
262
|
+
ret.initFromBits(src);
|
|
263
|
+
return ret;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
inline static CaloCommon initFromPhysicalDoubles(const double et, const double eta, const double phi){
|
|
267
|
+
CaloCommon ret;
|
|
268
|
+
ret.clear();
|
|
269
|
+
ret.et = et;
|
|
270
|
+
ret.eta = round(eta / Scales::CALO_ETA_LSB);
|
|
271
|
+
ret.phi = round(phi / Scales::CALO_PHI_LSB);
|
|
272
|
+
return ret;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
inline static CaloCommon initFromHWInt(const int et, const int eta, const int phi){
|
|
276
|
+
CaloCommon ret;
|
|
277
|
+
ret.clear();
|
|
278
|
+
ret.et.V = et;
|
|
279
|
+
ret.eta.V = eta;
|
|
280
|
+
ret.phi.V = phi;
|
|
281
|
+
return ret;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
}; // struct CaloCommon
|
|
285
|
+
|
|
286
|
+
typedef CaloCommon EGamma;
|
|
287
|
+
typedef CaloCommon Tau;
|
|
288
|
+
|
|
289
|
+
/* ---
|
|
290
|
+
* Scalar Sums
|
|
291
|
+
* --- */
|
|
292
|
+
struct ET{
|
|
293
|
+
ap_ufixed<12,11> et;
|
|
294
|
+
ap_ufixed<12,11> ettem;
|
|
295
|
+
ap_uint<4> spare;
|
|
296
|
+
ap_uint<4> minimum_bias_hf;
|
|
297
|
+
|
|
298
|
+
static const int BITWIDTH = 32;
|
|
299
|
+
|
|
300
|
+
inline void clear(){
|
|
301
|
+
et = 0;
|
|
302
|
+
ettem = 0;
|
|
303
|
+
spare = 0;
|
|
304
|
+
minimum_bias_hf = 0;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
inline ap_uint<BITWIDTH> pack() const{
|
|
308
|
+
ap_uint<BITWIDTH> ret;
|
|
309
|
+
unsigned int start = 0;
|
|
310
|
+
pack_into_bits(ret, start, et);
|
|
311
|
+
pack_into_bits(ret, start, ettem);
|
|
312
|
+
pack_into_bits(ret, start, spare);
|
|
313
|
+
pack_into_bits(ret, start, minimum_bias_hf);
|
|
314
|
+
return ret;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
inline void initFromBits(const ap_uint<BITWIDTH> &src){
|
|
318
|
+
unsigned int start = 0;
|
|
319
|
+
unpack_from_bits(src, start, et);
|
|
320
|
+
unpack_from_bits(src, start, ettem);
|
|
321
|
+
unpack_from_bits(src, start, spare);
|
|
322
|
+
unpack_from_bits(src, start, minimum_bias_hf);
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
inline static ET unpack(const ap_uint<BITWIDTH> &src){
|
|
326
|
+
ET ret;
|
|
327
|
+
ret.initFromBits(src);
|
|
328
|
+
return ret;
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
inline static ET initFromPhysicalDoubles(const double et, const double ettem){
|
|
332
|
+
ET ret;
|
|
333
|
+
ret.clear();
|
|
334
|
+
ret.et = et;
|
|
335
|
+
ret.ettem = ettem;
|
|
336
|
+
return ret;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
inline static ET initFromHWInt(const int et, const int ettem){
|
|
340
|
+
ET ret;
|
|
341
|
+
ret.clear();
|
|
342
|
+
ret.et.V = et;
|
|
343
|
+
ret.ettem.V = ettem;
|
|
344
|
+
return ret;
|
|
345
|
+
}
|
|
346
|
+
}; // struct ET
|
|
347
|
+
|
|
348
|
+
struct HT{
|
|
349
|
+
ap_ufixed<12,11> et;
|
|
350
|
+
ap_uint<13> tower_count;
|
|
351
|
+
ap_uint<3> spare;
|
|
352
|
+
ap_uint<4> minimum_bias_hf;
|
|
353
|
+
|
|
354
|
+
static const int BITWIDTH = 32;
|
|
355
|
+
|
|
356
|
+
inline void clear(){
|
|
357
|
+
et = 0;
|
|
358
|
+
tower_count = 0;
|
|
359
|
+
spare = 0;
|
|
360
|
+
minimum_bias_hf = 0;
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
inline ap_uint<BITWIDTH> pack() const{
|
|
364
|
+
ap_uint<BITWIDTH> ret;
|
|
365
|
+
unsigned int start = 0;
|
|
366
|
+
pack_into_bits(ret, start, et);
|
|
367
|
+
pack_into_bits(ret, start, tower_count);
|
|
368
|
+
pack_into_bits(ret, start, spare);
|
|
369
|
+
pack_into_bits(ret, start, minimum_bias_hf);
|
|
370
|
+
return ret;
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
inline void initFromBits(const ap_uint<BITWIDTH> &src){
|
|
374
|
+
unsigned int start = 0;
|
|
375
|
+
unpack_from_bits(src, start, et);
|
|
376
|
+
unpack_from_bits(src, start, tower_count);
|
|
377
|
+
unpack_from_bits(src, start, spare);
|
|
378
|
+
unpack_from_bits(src, start, minimum_bias_hf);
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
inline static HT unpack(const ap_uint<BITWIDTH> &src){
|
|
382
|
+
HT ret;
|
|
383
|
+
ret.initFromBits(src);
|
|
384
|
+
return ret;
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
inline static HT initFromPhysicalDoubles(const double et){
|
|
388
|
+
HT ret;
|
|
389
|
+
ret.clear();
|
|
390
|
+
ret.et = et;
|
|
391
|
+
return ret;
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
inline static HT initFromHWInt(const int et){
|
|
395
|
+
HT ret;
|
|
396
|
+
ret.clear();
|
|
397
|
+
ret.et.V = et;
|
|
398
|
+
return ret;
|
|
399
|
+
}
|
|
400
|
+
}; // struct HT
|
|
401
|
+
|
|
402
|
+
/* ---
|
|
403
|
+
* Vector Sums
|
|
404
|
+
* --- */
|
|
405
|
+
struct VectorSumsCommon{
|
|
406
|
+
ap_ufixed<12,11> et;
|
|
407
|
+
ap_uint<8> phi;
|
|
408
|
+
ap_uint<8> asy;
|
|
409
|
+
ap_uint<4> other;
|
|
410
|
+
|
|
411
|
+
static const int BITWIDTH = 32;
|
|
412
|
+
|
|
413
|
+
inline void clear(){
|
|
414
|
+
et = 0;
|
|
415
|
+
phi = 0;
|
|
416
|
+
asy = 0;
|
|
417
|
+
other = 0;
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
inline ap_uint<BITWIDTH> pack() const{
|
|
421
|
+
ap_uint<BITWIDTH> ret;
|
|
422
|
+
unsigned int start = 0;
|
|
423
|
+
pack_into_bits(ret, start, et);
|
|
424
|
+
pack_into_bits(ret, start, phi);
|
|
425
|
+
pack_into_bits(ret, start, asy);
|
|
426
|
+
pack_into_bits(ret, start, other);
|
|
427
|
+
return ret;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
inline void initFromBits(const ap_uint<BITWIDTH> &src){
|
|
431
|
+
unsigned int start = 0;
|
|
432
|
+
unpack_from_bits(src, start, et);
|
|
433
|
+
unpack_from_bits(src, start, phi);
|
|
434
|
+
unpack_from_bits(src, start, asy);
|
|
435
|
+
unpack_from_bits(src, start, other);
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
inline static VectorSumsCommon unpack(const ap_uint<BITWIDTH> &src){
|
|
439
|
+
VectorSumsCommon ret;
|
|
440
|
+
ret.initFromBits(src);
|
|
441
|
+
return ret;
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
inline static VectorSumsCommon initFromPhysicalDoubles(const double et, const double phi){
|
|
445
|
+
VectorSumsCommon ret;
|
|
446
|
+
ret.clear();
|
|
447
|
+
ret.et = et;
|
|
448
|
+
ret.phi = round(phi / Scales::CALO_PHI_LSB);
|
|
449
|
+
return ret;
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
inline static VectorSumsCommon initFromHWInt(const int et, const int phi){
|
|
453
|
+
VectorSumsCommon ret;
|
|
454
|
+
ret.clear();
|
|
455
|
+
ret.et.V = et;
|
|
456
|
+
ret.phi.V = phi;
|
|
457
|
+
return ret;
|
|
458
|
+
}
|
|
459
|
+
}; // struct VectorSumsCommon
|
|
460
|
+
|
|
461
|
+
typedef VectorSumsCommon ETMiss;
|
|
462
|
+
typedef VectorSumsCommon HTMiss;
|
|
463
|
+
typedef VectorSumsCommon ETHFMiss;
|
|
464
|
+
typedef VectorSumsCommon HTHFMiss;
|
|
465
|
+
|
|
466
|
+
static const int NMUONS = 8;
|
|
467
|
+
static const int NJETS = 12;
|
|
468
|
+
static const int NEGAMMAS = 12;
|
|
469
|
+
static const int NTAUS = 12;
|
|
470
|
+
|
|
471
|
+
/* ---
|
|
472
|
+
* Definitions of common objects used for ML triggers
|
|
473
|
+
* TODO: this is a first implementation, to be improved & expanded
|
|
474
|
+
* TODO: these data types for px, py, pz are not optimized
|
|
475
|
+
* --- */
|
|
476
|
+
typedef ap_fixed<18,13> unscaled_t;
|
|
477
|
+
struct PxPyPz{
|
|
478
|
+
|
|
479
|
+
unscaled_t px;
|
|
480
|
+
unscaled_t py;
|
|
481
|
+
unscaled_t pz;
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
static const int BITWIDTH = 36;
|
|
485
|
+
|
|
486
|
+
inline void clear(){
|
|
487
|
+
px = 0;
|
|
488
|
+
py = 0;
|
|
489
|
+
pz = 0;
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
inline ap_uint<BITWIDTH> pack() const{
|
|
493
|
+
ap_uint<BITWIDTH> ret;
|
|
494
|
+
unsigned int start = 0;
|
|
495
|
+
pack_into_bits(ret, start, px);
|
|
496
|
+
pack_into_bits(ret, start, py);
|
|
497
|
+
pack_into_bits(ret, start, pz);
|
|
498
|
+
return ret;
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
inline void initFromBits(const ap_uint<BITWIDTH> &src){
|
|
502
|
+
unsigned int start = 0;
|
|
503
|
+
unpack_from_bits(src, start, px);
|
|
504
|
+
unpack_from_bits(src, start, py);
|
|
505
|
+
unpack_from_bits(src, start, pz);
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
inline static PxPyPz unpack(const ap_uint<BITWIDTH> &src){
|
|
509
|
+
PxPyPz ret;
|
|
510
|
+
ret.initFromBits(src);
|
|
511
|
+
return ret;
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
inline static PxPyPz initFromPhysicalDoubles(const double px, const double py, const double pz){
|
|
515
|
+
PxPyPz ret;
|
|
516
|
+
ret.clear();
|
|
517
|
+
ret.px = px;
|
|
518
|
+
ret.py = py;
|
|
519
|
+
ret.pz = pz;
|
|
520
|
+
return ret;
|
|
521
|
+
}
|
|
522
|
+
}; // struct PxPyPz
|
|
523
|
+
|
|
524
|
+
#endif
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
#include "{{MODEL_NAME}}_project.h"
|
|
2
|
+
#include "data_types.h"
|
|
3
|
+
|
|
4
|
+
namespace {{MODEL_NAME}} {
|
|
5
|
+
|
|
6
|
+
typedef {{OFFSET_TYPE}} offset_t;
|
|
7
|
+
typedef {{SHIFT_TYPE}} shift_t;
|
|
8
|
+
|
|
9
|
+
static const offset_t NN_OFFSETS[{{N_INPUTS}}] = { {{NN_OFFSETS}} };
|
|
10
|
+
static const shift_t NN_SHIFTS[{{N_INPUTS}}] = { {{NN_SHIFTS}} };
|
|
11
|
+
|
|
12
|
+
static void scaleNNInputs(
|
|
13
|
+
input_t unscaled[{{N_INPUTS}}],
|
|
14
|
+
input_t scaled[{{N_INPUTS}}]
|
|
15
|
+
) {
|
|
16
|
+
#pragma HLS pipeline
|
|
17
|
+
for (int i = 0; i < {{N_INPUTS}}; i++) {
|
|
18
|
+
#pragma HLS unroll
|
|
19
|
+
input_t tmp0 = unscaled[i] - NN_OFFSETS[i];
|
|
20
|
+
input_t tmp1 = tmp0 >> NN_SHIFTS[i];
|
|
21
|
+
scaled[i] = tmp1;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
void {{MODEL_NAME}}_GT(
|
|
26
|
+
Muon muons[{{MUON_SIZE}}],
|
|
27
|
+
Jet jets[{{JET_SIZE}}],
|
|
28
|
+
EGamma egammas[{{EGAMMA_SIZE}}],
|
|
29
|
+
Tau taus[{{TAU_SIZE}}],
|
|
30
|
+
ET et,
|
|
31
|
+
HT ht,
|
|
32
|
+
ETMiss etmiss,
|
|
33
|
+
HTMiss htmiss,
|
|
34
|
+
ETHFMiss ethfmiss,
|
|
35
|
+
HTHFMiss hthfmiss,
|
|
36
|
+
{{OUTPUT_TYPE}} {{OUTPUT_LAYER}}
|
|
37
|
+
) {
|
|
38
|
+
#pragma HLS aggregate variable=muons compact=bit
|
|
39
|
+
#pragma HLS aggregate variable=jets compact=bit
|
|
40
|
+
#pragma HLS aggregate variable=egammas compact=bit
|
|
41
|
+
#pragma HLS aggregate variable=taus compact=bit
|
|
42
|
+
#pragma HLS aggregate variable=et compact=bit
|
|
43
|
+
#pragma HLS aggregate variable=ht compact=bit
|
|
44
|
+
#pragma HLS aggregate variable=etmiss compact=bit
|
|
45
|
+
#pragma HLS aggregate variable=htmiss compact=bit
|
|
46
|
+
#pragma HLS aggregate variable=ethfmiss compact=bit
|
|
47
|
+
#pragma HLS aggregate variable=hthfmiss compact=bit
|
|
48
|
+
|
|
49
|
+
#pragma HLS array_partition variable=muons complete
|
|
50
|
+
#pragma HLS array_partition variable=jets complete
|
|
51
|
+
#pragma HLS array_partition variable=egammas complete
|
|
52
|
+
#pragma HLS array_partition variable=taus complete
|
|
53
|
+
|
|
54
|
+
#pragma HLS pipeline II=1
|
|
55
|
+
#pragma HLS latency min=2 max=2
|
|
56
|
+
#pragma HLS inline recursive
|
|
57
|
+
|
|
58
|
+
input_t input_unscaled[{{N_INPUTS}}];
|
|
59
|
+
input_t input_scaled[{{N_INPUTS}}];
|
|
60
|
+
int idx = 0;
|
|
61
|
+
|
|
62
|
+
// Muons
|
|
63
|
+
for (int i = 0; i < {{MUON_SIZE}}; i++) {
|
|
64
|
+
#pragma HLS unroll
|
|
65
|
+
{% for f in MUON_FEATURES %}
|
|
66
|
+
input_unscaled[idx++] = muons[i].{{f}};
|
|
67
|
+
{% endfor %}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Jets
|
|
71
|
+
for (int i = 0; i < {{JET_SIZE}}; i++) {
|
|
72
|
+
#pragma HLS unroll
|
|
73
|
+
{% for f in JET_FEATURES %}
|
|
74
|
+
input_unscaled[idx++] = jets[i].{{f}};
|
|
75
|
+
{% endfor %}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// EGammas
|
|
79
|
+
for (int i = 0; i < {{EGAMMA_SIZE}}; i++) {
|
|
80
|
+
#pragma HLS unroll
|
|
81
|
+
{% for f in EGAMMA_FEATURES %}
|
|
82
|
+
input_unscaled[idx++] = egammas[i].{{f}};
|
|
83
|
+
{% endfor %}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Taus
|
|
87
|
+
for (int i = 0; i < {{TAU_SIZE}}; i++) {
|
|
88
|
+
#pragma HLS unroll
|
|
89
|
+
{% for f in TAU_FEATURES %}
|
|
90
|
+
input_unscaled[idx++] = taus[i].{{f}};
|
|
91
|
+
{% endfor %}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Scalars / global objects
|
|
95
|
+
{% for f in GLOBAL_FEATURES %}
|
|
96
|
+
input_unscaled[idx++] = {{f}};
|
|
97
|
+
{% endfor %}
|
|
98
|
+
|
|
99
|
+
scaleNNInputs(input_unscaled, input_scaled);
|
|
100
|
+
|
|
101
|
+
{{MODEL_NAME}}_project(input_scaled, {{OUT}});
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
} // namespace {{MODEL_NAME}}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: triggerflow
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: Utilities for ML models targeting hardware triggers
|
|
5
5
|
Classifier: Programming Language :: Python :: 3
|
|
6
6
|
Classifier: License :: OSI Approved :: MIT License
|
|
@@ -35,22 +35,91 @@ pip install triggerflow
|
|
|
35
35
|
|
|
36
36
|
from triggerflow.core import TriggerModel
|
|
37
37
|
|
|
38
|
-
|
|
39
|
-
|
|
38
|
+
|
|
39
|
+
scales = {'offsets': np.array([18, 0, 72, 7, 0, 73, 4, 0, 73, 4, 0, 72, 3, 0, 72, 6, -0, 286, 3, -2, 285, 3, -2, 282, 3, -2, 286, 29, 0, 72, 22, 0, 72, 18, 0, 72, 14, 0, 72, 11, 0, 72, 10, 0, 72, 10, 0, 73, 9, 0], dtype='int'),
|
|
40
|
+
'shifts': np.array([3, 0, 6, 2, 5, 6, 0, 5, 6, 0, 5, 6, -1, 5, 6, 2, 7, 8, 0, 7, 8, 0, 7, 8, 0, 7, 8, 4, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6], dtype='int')}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
trigger_model = TriggerModel(
|
|
44
|
+
config="triggermodel_config.yaml",
|
|
45
|
+
native_model=model, #Native XGboost/Keras model
|
|
46
|
+
scales=scales
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
trigger_model() #Vivado requird on $PATH for Firmware build.
|
|
40
50
|
|
|
41
51
|
# then:
|
|
42
|
-
output_software =
|
|
43
|
-
output_firmware =
|
|
44
|
-
output_qonnx =
|
|
52
|
+
output_software = trigger_model.software_predict(input_data)
|
|
53
|
+
output_firmware = trigger_model.firmware_predict(input_data)
|
|
54
|
+
output_qonnx = trigger_model.qonnx_predict(input_data)
|
|
45
55
|
|
|
46
56
|
# save and load trigger models:
|
|
47
|
-
|
|
57
|
+
trigger_model.save("triggerflow.tar.xz")
|
|
48
58
|
|
|
49
59
|
# in a separate session:
|
|
50
60
|
from triggerflow.core import TriggerModel
|
|
51
61
|
triggerflow = TriggerModel.load("triggerflow.tar.xz")
|
|
52
62
|
```
|
|
53
63
|
|
|
64
|
+
## The Config file:
|
|
65
|
+
|
|
66
|
+
Use this `.yaml` template and change as needed.
|
|
67
|
+
|
|
68
|
+
```yaml
|
|
69
|
+
compiler:
|
|
70
|
+
name: "AXO"
|
|
71
|
+
ml_backend: "keras"
|
|
72
|
+
compiler: "hls4ml"
|
|
73
|
+
fpga_part: "xc7vx690t-ffg1927-2"
|
|
74
|
+
clock_period: 25
|
|
75
|
+
n_outputs: 1
|
|
76
|
+
project_name: "AXO_project"
|
|
77
|
+
namespace: "AXO"
|
|
78
|
+
io_type: "io_parallel"
|
|
79
|
+
backend: "Vitis"
|
|
80
|
+
write_weights_txt: false
|
|
81
|
+
|
|
82
|
+
subsystem:
|
|
83
|
+
name: "uGT"
|
|
84
|
+
n_inputs: 50
|
|
85
|
+
offset_type: "ap_fixed<10,10>"
|
|
86
|
+
shift_type: "ap_fixed<10,10>"
|
|
87
|
+
|
|
88
|
+
objects:
|
|
89
|
+
muons:
|
|
90
|
+
size: 4
|
|
91
|
+
features: [pt, eta_extrapolated, phi_extrapolated]
|
|
92
|
+
|
|
93
|
+
jets:
|
|
94
|
+
size: 4
|
|
95
|
+
features: [et, eta, phi]
|
|
96
|
+
|
|
97
|
+
egammas:
|
|
98
|
+
size: 4
|
|
99
|
+
features: [et, eta, phi]
|
|
100
|
+
|
|
101
|
+
taus:
|
|
102
|
+
size: 4
|
|
103
|
+
features: [et, eta, phi]
|
|
104
|
+
|
|
105
|
+
global_features:
|
|
106
|
+
#- et.et
|
|
107
|
+
#- ht.et
|
|
108
|
+
- etmiss.et
|
|
109
|
+
- etmiss.phi
|
|
110
|
+
#- htmiss.et
|
|
111
|
+
#- htmiss.phi
|
|
112
|
+
#- ethfmiss.et
|
|
113
|
+
#- ethfmiss.phi
|
|
114
|
+
#- hthfmiss.et
|
|
115
|
+
#- hthfmiss.phi
|
|
116
|
+
|
|
117
|
+
muon_size: 4
|
|
118
|
+
jet_size: 4
|
|
119
|
+
egamma_size: 4
|
|
120
|
+
tau_size: 4
|
|
121
|
+
```
|
|
122
|
+
|
|
54
123
|
## Logging with MLflow
|
|
55
124
|
|
|
56
125
|
```python
|
|
@@ -1,13 +1,15 @@
|
|
|
1
1
|
trigger_dataset/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
trigger_dataset/core.py,sha256=ZX96U6rWxxfCatDQbst6IRZvtlyDj1_2JA7stPydGTQ,2645
|
|
3
3
|
trigger_loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
trigger_loader/cluster_manager.py,sha256=
|
|
5
|
-
trigger_loader/loader.py,sha256=
|
|
4
|
+
trigger_loader/cluster_manager.py,sha256=XgmY1xeW8zrpQDJqssKamWzjn6TQ60NGNzpcdZwL6NE,3617
|
|
5
|
+
trigger_loader/loader.py,sha256=wMkeZ3k36wpxt-B8OpKOa6j7z0-fnJUqQ-5AbVjNpBM,5158
|
|
6
6
|
trigger_loader/processor.py,sha256=cvBfYmvcr4FLzOHgGE50oy7EkFzFaV80Z_66amqfsEY,7724
|
|
7
7
|
triggerflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
triggerflow/cli.py,sha256=ZNQb3XQN8Ir6Hp6KX_ugec9bm2kqxLNZ0KdVGJmnqFA,4498
|
|
9
|
-
triggerflow/core.py,sha256=
|
|
9
|
+
triggerflow/core.py,sha256=QMU_zuWrYXuZekB7z8Q90Cuaga3B_uuDnYOPthQGdps,22465
|
|
10
10
|
triggerflow/mlflow_wrapper.py,sha256=yCaIS-H7oC2KxnExj24ka9ylF4A1wgzRIpc7Y43ervI,10667
|
|
11
|
+
triggerflow/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
+
triggerflow/interfaces/uGT.py,sha256=UBCK0WtF-MUkI9mWrulOdExtDXgwKPsHDS4C-FXIgMs,3637
|
|
11
13
|
triggerflow/starter/.gitignore,sha256=tH2z_M-tPM9MLWC2wPz1Z43Dq-wdVmb_kVYtrLT3tN4,2052
|
|
12
14
|
triggerflow/starter/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
15
|
triggerflow/starter/cookiecutter.json,sha256=neplzXvVe3g6OVqYD8M2olfmjRKNr1UKqmflzP3j1UY,259
|
|
@@ -86,12 +88,15 @@ triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_training/
|
|
|
86
88
|
triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_training/test_pipeline.py,sha256=0N747l50lEmjAgCy9K8LrNMW436edMUMo1E7tS4MADY,297
|
|
87
89
|
triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_validation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
88
90
|
triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_validation/test_pipeline.py,sha256=2BTupcyuFEXTyMrGLVmF6Yv0CAZcdedc7fxEfmLGmxo,299
|
|
91
|
+
triggerflow/templates/build_ugt.tcl,sha256=P1y42s9Ju6zDyRskCVlD-vSnblAmTafw7Jpzsq4_HWA,1061
|
|
92
|
+
triggerflow/templates/data_types.h,sha256=m7_jLsRixSVjp8auxHZUUNAcSO777C5TTZl__9ENk90,12640
|
|
89
93
|
triggerflow/templates/makefile,sha256=A-aetsLC51Bop0T_-yPY8Z8Hg29ApN4YPvKx_jjPuHw,970
|
|
90
94
|
triggerflow/templates/makefile_version,sha256=6kFc_u2oiM9l2rH7RK_BLzdZu1ZEK8PQTQKGBLRY0v4,328
|
|
95
|
+
triggerflow/templates/model-gt.cpp,sha256=qZwuTtsvrKB_mOB-HDb2uOD7mDo4-20EjFiQzRjMdPo,2969
|
|
91
96
|
triggerflow/templates/model_template.cpp,sha256=jMNRcO7NgC6I9Wd2BV3Bim-P1qPsAl_oeVQ8KofQGEw,1807
|
|
92
97
|
triggerflow/templates/scales.h,sha256=MFcB5S0DEvfzHuUhyZqILR0O4ktugOG-fLnuCDUUewM,373
|
|
93
|
-
triggerflow-0.2.
|
|
94
|
-
triggerflow-0.2.
|
|
95
|
-
triggerflow-0.2.
|
|
96
|
-
triggerflow-0.2.
|
|
97
|
-
triggerflow-0.2.
|
|
98
|
+
triggerflow-0.2.3.dist-info/METADATA,sha256=8FMEii-KeQD1cm7c2ZrpHI-4f-efE9DmrlP4H9VzmxQ,4353
|
|
99
|
+
triggerflow-0.2.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
100
|
+
triggerflow-0.2.3.dist-info/entry_points.txt,sha256=5QSV9YDseB_FqgVh9q10BdL4b1I6t68rGwPLXgVL60g,53
|
|
101
|
+
triggerflow-0.2.3.dist-info/top_level.txt,sha256=cX0jkuM9tfxGp002ZBQ1AYgx-6D_NgBtomgPL0WA9bE,43
|
|
102
|
+
triggerflow-0.2.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|