triggerflow 0.2__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {triggerflow-0.2 → triggerflow-0.2.1}/PKG-INFO +1 -1
- {triggerflow-0.2 → triggerflow-0.2.1}/pyproject.toml +1 -1
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/core.py +142 -126
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/templates/makefile +3 -3
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/templates/makefile_version +2 -2
- triggerflow-0.2.1/src/triggerflow/templates/model_template.cpp +60 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/templates/scales.h +1 -1
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow.egg-info/PKG-INFO +1 -1
- {triggerflow-0.2 → triggerflow-0.2.1}/tests/test.py +15 -12
- triggerflow-0.2/src/triggerflow/templates/model_template.cpp +0 -59
- {triggerflow-0.2 → triggerflow-0.2.1}/MANIFEST.in +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/README.md +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/setup.cfg +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/trigger_dataset/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/trigger_dataset/core.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/trigger_loader/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/trigger_loader/cluster_manager.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/trigger_loader/loader.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/trigger_loader/processor.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/cli.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/mlflow_wrapper.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/.gitignore +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/README.md +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/cookiecutter.json +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/prompts.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/.dvcignore +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/.gitignore +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/.gitlab-ci.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/README.md +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/README.md +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/base/catalog.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/base/parameters.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/base/parameters_compile.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/base/parameters_data_processing.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/base/parameters_load_data.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/base/parameters_model_training.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/base/parameters_model_validation.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/local/catalog.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/local/parameters.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/local/parameters_compile.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/local/parameters_data_processing.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/local/parameters_load_data.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/local/parameters_model_training.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/local/parameters_model_validation.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/conf/logging.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/01_raw/.gitkeep +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/01_raw/samples.json +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/01_raw/samples_dummy.json +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/02_loaded/.gitkeep +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/03_preprocessed/.gitkeep +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/04_models/.gitkeep +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/05_validation/.gitkeep +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/06_compile/.gitkeep +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/data/07_reporting/.gitkeep +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/dvc.yaml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/environment.yml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/pyproject.toml +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/__main__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/datasets/any_object.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/datasets/base_dataset.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/datasets/meta_dataset.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/datasets/{{ cookiecutter.python_package }}_dataset.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/models/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/models/base_model.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/models/{{ cookiecutter.python_package }}_model.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipeline_registry.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/compile/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/compile/nodes.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/compile/pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_processing/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_processing/nodes.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_processing/pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/load_data/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/load_data/nodes.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/load_data/pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/model_training/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/model_training/nodes.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/model_training/pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/model_validation/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/model_validation/nodes.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/model_validation/pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/utils/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/utils/metric.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/utils/plotting.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/compile/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/compile/test_pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/data_processing/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/data_processing/test_pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/load_data/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/load_data/test_pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_training/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_training/test_pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_validation/__init__.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/pipelines/model_validation/test_pipeline.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/tests/test_run.py +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow.egg-info/SOURCES.txt +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow.egg-info/dependency_links.txt +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow.egg-info/entry_points.txt +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow.egg-info/requires.txt +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow.egg-info/top_level.txt +0 -0
- {triggerflow-0.2 → triggerflow-0.2.1}/tests/test_loader.py +0 -0
|
@@ -1,38 +1,32 @@
|
|
|
1
|
-
import
|
|
2
|
-
import importlib.resources as pkg_resources
|
|
1
|
+
from pathlib import Path
|
|
3
2
|
import json
|
|
4
|
-
import
|
|
5
|
-
import shutil
|
|
3
|
+
import numpy as np
|
|
6
4
|
import tarfile
|
|
7
|
-
import
|
|
5
|
+
import importlib
|
|
8
6
|
from abc import ABC, abstractmethod
|
|
9
|
-
from
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
import
|
|
13
|
-
|
|
14
|
-
import triggerflow.templates
|
|
15
|
-
|
|
16
|
-
logger = logging.getLogger(__name__)
|
|
7
|
+
from typing import Optional, Dict, Any, Union
|
|
8
|
+
import shutil, warnings
|
|
9
|
+
import importlib.resources as pkg_resources
|
|
10
|
+
import triggerflow.templates
|
|
17
11
|
|
|
18
12
|
|
|
19
13
|
class ModelConverter(ABC):
|
|
20
14
|
"""Abstract base class for model converters"""
|
|
21
|
-
|
|
15
|
+
|
|
22
16
|
@abstractmethod
|
|
23
|
-
def convert(self, model, workspace: Path, **kwargs) -> Path
|
|
17
|
+
def convert(self, model, workspace: Path, **kwargs) -> Optional[Path]:
|
|
24
18
|
"""Convert model to intermediate format"""
|
|
25
19
|
pass
|
|
26
20
|
|
|
27
21
|
|
|
28
22
|
class CompilerStrategy(ABC):
|
|
29
23
|
"""Abstract base class for compilation strategies"""
|
|
30
|
-
|
|
24
|
+
|
|
31
25
|
@abstractmethod
|
|
32
|
-
def compile(self, model, workspace: Path, config:
|
|
26
|
+
def compile(self, model, workspace: Path, config: Optional[Dict] = None, **kwargs) -> Any:
|
|
33
27
|
"""Compile model to firmware"""
|
|
34
28
|
pass
|
|
35
|
-
|
|
29
|
+
|
|
36
30
|
@abstractmethod
|
|
37
31
|
def load_compiled_model(self, workspace: Path) -> Any:
|
|
38
32
|
"""Load a previously compiled model"""
|
|
@@ -41,7 +35,7 @@ class CompilerStrategy(ABC):
|
|
|
41
35
|
|
|
42
36
|
class ModelPredictor(ABC):
|
|
43
37
|
"""Abstract base class for model predictors"""
|
|
44
|
-
|
|
38
|
+
|
|
45
39
|
@abstractmethod
|
|
46
40
|
def predict(self, input_data: np.ndarray) -> np.ndarray:
|
|
47
41
|
"""Make predictions using the model"""
|
|
@@ -50,7 +44,7 @@ class ModelPredictor(ABC):
|
|
|
50
44
|
|
|
51
45
|
class KerasToQONNXConverter(ModelConverter):
|
|
52
46
|
"""Converts Keras models to QONNX format"""
|
|
53
|
-
|
|
47
|
+
|
|
54
48
|
def convert(self, model, workspace: Path, **kwargs) -> Path:
|
|
55
49
|
import tensorflow as tf
|
|
56
50
|
from qonnx.converters import keras as keras_converter
|
|
@@ -58,7 +52,7 @@ class KerasToQONNXConverter(ModelConverter):
|
|
|
58
52
|
from qonnx.transformation.channels_last import ConvertToChannelsLastAndClean
|
|
59
53
|
from qonnx.transformation.gemm_to_matmul import GemmToMatMul
|
|
60
54
|
from qonnx.util.cleanup import cleanup_model
|
|
61
|
-
|
|
55
|
+
|
|
62
56
|
qonnx_path = workspace / "model_qonnx.onnx"
|
|
63
57
|
input_signature = [tf.TensorSpec(1, model.inputs[0].dtype, name="input_0")]
|
|
64
58
|
qonnx_model, _ = keras_converter.from_keras(model, input_signature, output_path=qonnx_path)
|
|
@@ -67,26 +61,26 @@ class KerasToQONNXConverter(ModelConverter):
|
|
|
67
61
|
qonnx_model = qonnx_model.transform(ConvertToChannelsLastAndClean())
|
|
68
62
|
qonnx_model = qonnx_model.transform(GemmToMatMul())
|
|
69
63
|
cleaned_model = cleanup_model(qonnx_model)
|
|
70
|
-
|
|
64
|
+
|
|
71
65
|
return qonnx_path, cleaned_model
|
|
72
66
|
|
|
73
67
|
|
|
74
68
|
class NoOpConverter(ModelConverter):
|
|
75
69
|
"""No-operation converter for models that don't need conversion"""
|
|
76
|
-
|
|
77
|
-
def convert(self, model, workspace: Path, **kwargs) -> Path
|
|
70
|
+
|
|
71
|
+
def convert(self, model, workspace: Path, **kwargs) -> Optional[Path]:
|
|
78
72
|
return None
|
|
79
73
|
|
|
80
74
|
|
|
81
75
|
class HLS4MLStrategy(CompilerStrategy):
|
|
82
76
|
"""HLS4ML compilation strategy for Keras models"""
|
|
83
|
-
|
|
84
|
-
def compile(self, model, workspace: Path, config:
|
|
77
|
+
|
|
78
|
+
def compile(self, model, workspace: Path, config: Optional[Dict] = None, **kwargs) -> Any:
|
|
85
79
|
import hls4ml
|
|
86
|
-
|
|
80
|
+
|
|
87
81
|
firmware_dir = workspace / "firmware"
|
|
88
82
|
firmware_dir.mkdir(exist_ok=True)
|
|
89
|
-
|
|
83
|
+
|
|
90
84
|
cfg = config or hls4ml.utils.config_from_keras_model(model, granularity="name")
|
|
91
85
|
|
|
92
86
|
hls_kwargs = {
|
|
@@ -109,34 +103,48 @@ class HLS4MLStrategy(CompilerStrategy):
|
|
|
109
103
|
warnings.warn("Vivado not found in PATH. Firmware build failed.", UserWarning)
|
|
110
104
|
firmware_model.save(workspace / "firmware_model.fml")
|
|
111
105
|
return firmware_model
|
|
112
|
-
|
|
106
|
+
|
|
113
107
|
def load_compiled_model(self, workspace: Path) -> Any:
|
|
114
108
|
from hls4ml.converters import link_existing_project
|
|
115
|
-
|
|
109
|
+
|
|
116
110
|
firmware_model = link_existing_project(workspace / "firmware")
|
|
117
111
|
firmware_model.compile()
|
|
118
112
|
return firmware_model
|
|
119
113
|
|
|
114
|
+
|
|
120
115
|
class ConiferStrategy(CompilerStrategy):
|
|
121
116
|
"""Conifer compilation strategy for XGBoost models"""
|
|
122
117
|
|
|
123
|
-
def compile(self, model, workspace: Path, config:
|
|
118
|
+
def compile(self, model, workspace: Path, config: Optional[Dict] = None, **kwargs) -> Any:
|
|
124
119
|
import conifer
|
|
120
|
+
import shutil
|
|
121
|
+
import warnings
|
|
122
|
+
import os
|
|
125
123
|
|
|
126
124
|
firmware_dir = workspace / "firmware"
|
|
127
125
|
firmware_dir.mkdir(exist_ok=True)
|
|
126
|
+
os.environ['JSON_ROOT'] = '/eos/user/m/maglowac/TriggerModel/json'
|
|
127
|
+
os.environ['XILINX_AP_INCLUDE'] = '/eos/user/m/maglowac/TriggerModel/HLS_arbitrary_Precision_Types/include'
|
|
128
128
|
|
|
129
|
-
|
|
129
|
+
|
|
130
|
+
cfg = conifer.backends.xilinxhls.auto_config()#config or conifer.backends.cpp.auto_config()
|
|
130
131
|
cfg['OutputDir'] = str(firmware_dir)
|
|
131
|
-
|
|
132
|
+
|
|
132
133
|
for key, value in kwargs.items():
|
|
133
134
|
cfg[key] = value
|
|
134
135
|
|
|
136
|
+
print(cfg)
|
|
135
137
|
firmware_model = conifer.converters.convert_from_xgboost(
|
|
136
138
|
model,
|
|
137
139
|
config=cfg
|
|
138
140
|
)
|
|
139
141
|
|
|
142
|
+
firmware_model.write()
|
|
143
|
+
proj_name = cfg.get('ProjectName', 'my_prj')
|
|
144
|
+
bridge_file = firmware_dir / "bridge.cpp"
|
|
145
|
+
text = bridge_file.read_text()
|
|
146
|
+
text = text.replace("my_prj.h", f"{proj_name}.h")
|
|
147
|
+
bridge_file.write_text(text)
|
|
140
148
|
firmware_model.compile()
|
|
141
149
|
if shutil.which("vivado") is not None:
|
|
142
150
|
firmware_model.build()
|
|
@@ -145,34 +153,42 @@ class ConiferStrategy(CompilerStrategy):
|
|
|
145
153
|
|
|
146
154
|
firmware_model.save(firmware_dir / "firmware_model.fml")
|
|
147
155
|
return firmware_model
|
|
156
|
+
|
|
157
|
+
def load_compiled_model(self, workspace: Path) -> Any:
|
|
158
|
+
from conifer import load_model
|
|
159
|
+
|
|
160
|
+
firmware_model = load_model(workspace / "firmware_model.fml")
|
|
161
|
+
firmware_model.compile()
|
|
162
|
+
return firmware_model
|
|
163
|
+
|
|
148
164
|
|
|
149
165
|
class DA4MLStrategy(CompilerStrategy):
|
|
150
166
|
"""DA4ML compilation strategy (placeholder)"""
|
|
151
|
-
|
|
152
|
-
def compile(self, model, workspace: Path, config:
|
|
167
|
+
|
|
168
|
+
def compile(self, model, workspace: Path, config: Optional[Dict] = None) -> Any:
|
|
153
169
|
raise NotImplementedError("DA4ML conversion without QONNX not yet implemented")
|
|
154
|
-
|
|
170
|
+
|
|
155
171
|
def load_compiled_model(self, workspace: Path) -> Any:
|
|
156
172
|
raise NotImplementedError("DA4ML loading not yet implemented")
|
|
157
173
|
|
|
158
174
|
|
|
159
175
|
class FINNStrategy(CompilerStrategy):
|
|
160
176
|
"""FINN compilation strategy (placeholder)"""
|
|
161
|
-
|
|
162
|
-
def compile(self, model, workspace: Path, config:
|
|
177
|
+
|
|
178
|
+
def compile(self, model, workspace: Path, config: Optional[Dict] = None) -> Any:
|
|
163
179
|
raise NotImplementedError("FINN conversion without QONNX not yet implemented")
|
|
164
|
-
|
|
180
|
+
|
|
165
181
|
def load_compiled_model(self, workspace: Path) -> Any:
|
|
166
182
|
raise NotImplementedError("FINN loading not yet implemented")
|
|
167
183
|
|
|
168
184
|
|
|
169
185
|
class SoftwarePredictor(ModelPredictor):
|
|
170
186
|
"""Software-based model predictor"""
|
|
171
|
-
|
|
187
|
+
|
|
172
188
|
def __init__(self, model, backend: str):
|
|
173
189
|
self.model = model
|
|
174
190
|
self.backend = backend.lower()
|
|
175
|
-
|
|
191
|
+
|
|
176
192
|
def predict(self, input_data):
|
|
177
193
|
if input_data.ndim == 1:
|
|
178
194
|
input_data = np.expand_dims(input_data, axis=0)
|
|
@@ -181,37 +197,37 @@ class SoftwarePredictor(ModelPredictor):
|
|
|
181
197
|
|
|
182
198
|
class QONNXPredictor(ModelPredictor):
|
|
183
199
|
"""QONNX-based model predictor"""
|
|
184
|
-
|
|
200
|
+
|
|
185
201
|
def __init__(self, qonnx_model, input_name: str):
|
|
186
202
|
self.qonnx_model = qonnx_model
|
|
187
203
|
self.input_name = input_name
|
|
188
|
-
|
|
204
|
+
|
|
189
205
|
def predict(self, input_data: np.ndarray) -> np.ndarray:
|
|
190
206
|
from qonnx.core.onnx_exec import execute_onnx
|
|
191
|
-
|
|
207
|
+
|
|
192
208
|
input_data = np.asarray(input_data)
|
|
193
209
|
if input_data.ndim == 1:
|
|
194
210
|
input_data = np.expand_dims(input_data, axis=0)
|
|
195
|
-
|
|
211
|
+
|
|
196
212
|
outputs = []
|
|
197
213
|
for i in range(input_data.shape[0]):
|
|
198
214
|
sample = input_data[i].astype("float32").reshape(1, -1)
|
|
199
215
|
output_dict = execute_onnx(self.qonnx_model, {self.input_name: sample})
|
|
200
216
|
outputs.append(output_dict["global_out"])
|
|
201
|
-
|
|
217
|
+
|
|
202
218
|
return np.vstack(outputs)
|
|
203
219
|
|
|
204
220
|
|
|
205
221
|
class FirmwarePredictor(ModelPredictor):
|
|
206
222
|
"""Firmware-based model predictor"""
|
|
207
|
-
|
|
223
|
+
|
|
208
224
|
def __init__(self, firmware_model, compiler):
|
|
209
225
|
if firmware_model is None:
|
|
210
226
|
raise RuntimeError("Firmware model not built.")
|
|
211
227
|
self.firmware_model = firmware_model
|
|
212
228
|
self.compiler = compiler
|
|
213
|
-
|
|
214
|
-
|
|
229
|
+
|
|
230
|
+
|
|
215
231
|
def predict(self, input_data: np.ndarray) -> np.ndarray:
|
|
216
232
|
if self.compiler == "conifer":
|
|
217
233
|
return self.firmware_model.decision_function(input_data)
|
|
@@ -221,7 +237,7 @@ class FirmwarePredictor(ModelPredictor):
|
|
|
221
237
|
|
|
222
238
|
class ConverterFactory:
|
|
223
239
|
"""Factory for creating model converters"""
|
|
224
|
-
|
|
240
|
+
|
|
225
241
|
@staticmethod
|
|
226
242
|
def create_converter(ml_backend: str, compiler: str) -> ModelConverter:
|
|
227
243
|
if ml_backend.lower() == "keras" and compiler.lower() == "hls4ml":
|
|
@@ -232,12 +248,12 @@ class ConverterFactory:
|
|
|
232
248
|
|
|
233
249
|
class CompilerFactory:
|
|
234
250
|
"""Factory for creating compilation strategies"""
|
|
235
|
-
|
|
251
|
+
|
|
236
252
|
@staticmethod
|
|
237
253
|
def create_compiler(ml_backend: str, compiler: str) -> CompilerStrategy:
|
|
238
254
|
backend = ml_backend.lower()
|
|
239
255
|
comp = compiler.lower()
|
|
240
|
-
|
|
256
|
+
|
|
241
257
|
if backend == "keras" and comp == "hls4ml":
|
|
242
258
|
return HLS4MLStrategy()
|
|
243
259
|
elif backend == "xgboost" and comp == "conifer":
|
|
@@ -252,9 +268,9 @@ class CompilerFactory:
|
|
|
252
268
|
|
|
253
269
|
class WorkspaceManager:
|
|
254
270
|
"""Manages workspace directories and metadata"""
|
|
255
|
-
|
|
271
|
+
|
|
256
272
|
BASE_WORKSPACE = Path.cwd() / "triggermodel"
|
|
257
|
-
|
|
273
|
+
|
|
258
274
|
def __init__(self):
|
|
259
275
|
self.workspace = self.BASE_WORKSPACE
|
|
260
276
|
self.artifacts = {"firmware": None}
|
|
@@ -264,7 +280,7 @@ class WorkspaceManager:
|
|
|
264
280
|
"compiler": None,
|
|
265
281
|
"versions": []
|
|
266
282
|
}
|
|
267
|
-
|
|
283
|
+
|
|
268
284
|
def setup_workspace(self, name: str, ml_backend: str, compiler: str):
|
|
269
285
|
"""Initialize workspace and metadata"""
|
|
270
286
|
self.workspace.mkdir(parents=True, exist_ok=True)
|
|
@@ -273,22 +289,22 @@ class WorkspaceManager:
|
|
|
273
289
|
"ml_backend": ml_backend,
|
|
274
290
|
"compiler": compiler,
|
|
275
291
|
})
|
|
276
|
-
|
|
292
|
+
|
|
277
293
|
def save_native_model(self, model, ml_backend: str):
|
|
278
294
|
"""Save the native model to workspace"""
|
|
279
295
|
if ml_backend.lower() == "keras":
|
|
280
|
-
model.save(self.workspace / "keras_model
|
|
296
|
+
model.save(self.workspace / "keras_model")
|
|
281
297
|
elif ml_backend.lower() == "xgboost":
|
|
282
298
|
model.save_model(str(self.workspace / "xgb_model.json"))
|
|
283
|
-
|
|
299
|
+
|
|
284
300
|
def add_artifact(self, key: str, value: Any):
|
|
285
301
|
"""Add artifact to tracking"""
|
|
286
302
|
self.artifacts[key] = value
|
|
287
|
-
|
|
288
|
-
def add_version(self, version_info:
|
|
303
|
+
|
|
304
|
+
def add_version(self, version_info: Dict):
|
|
289
305
|
"""Add version information"""
|
|
290
306
|
self.metadata["versions"].append(version_info)
|
|
291
|
-
|
|
307
|
+
|
|
292
308
|
def save_metadata(self):
|
|
293
309
|
"""Save metadata to file"""
|
|
294
310
|
with open(self.workspace / "metadata.json", "w") as f:
|
|
@@ -301,7 +317,7 @@ class WorkspaceManager:
|
|
|
301
317
|
|
|
302
318
|
class ModelSerializer:
|
|
303
319
|
"""Handles model serialization and deserialization"""
|
|
304
|
-
|
|
320
|
+
|
|
305
321
|
@staticmethod
|
|
306
322
|
def save(workspace: Path, path: str):
|
|
307
323
|
"""Serialize the workspace into a tar.xz archive"""
|
|
@@ -309,37 +325,37 @@ class ModelSerializer:
|
|
|
309
325
|
path.parent.mkdir(parents=True, exist_ok=True)
|
|
310
326
|
with tarfile.open(path, mode="w:xz") as tar:
|
|
311
327
|
tar.add(workspace, arcname=workspace.name)
|
|
312
|
-
|
|
313
|
-
|
|
328
|
+
print(f"TriggerModel saved to {path}")
|
|
329
|
+
|
|
314
330
|
@staticmethod
|
|
315
|
-
def load(path: str) ->
|
|
331
|
+
def load(path: str) -> Dict[str, Any]:
|
|
316
332
|
"""Load workspace from tar.xz archive"""
|
|
317
333
|
path = Path(path)
|
|
318
334
|
if not path.exists():
|
|
319
335
|
raise FileNotFoundError(f"{path} does not exist")
|
|
320
|
-
|
|
336
|
+
|
|
321
337
|
workspace = Path.cwd() / "triggermodel"
|
|
322
|
-
|
|
338
|
+
|
|
323
339
|
if workspace.exists():
|
|
324
340
|
response = input(f"{workspace} already exists. Overwrite? [y/N]: ").strip().lower()
|
|
325
341
|
if response != "y":
|
|
326
|
-
|
|
342
|
+
print("Load cancelled by user.")
|
|
327
343
|
return None
|
|
328
344
|
shutil.rmtree(workspace)
|
|
329
|
-
|
|
345
|
+
|
|
330
346
|
with tarfile.open(path, mode="r:xz") as tar:
|
|
331
347
|
tar.extractall(path=Path.cwd())
|
|
332
|
-
|
|
348
|
+
|
|
333
349
|
# Load metadata
|
|
334
350
|
metadata_path = workspace / "metadata.json"
|
|
335
|
-
with open(metadata_path) as f:
|
|
351
|
+
with open(metadata_path, "r") as f:
|
|
336
352
|
metadata = json.load(f)
|
|
337
|
-
|
|
353
|
+
|
|
338
354
|
return {
|
|
339
355
|
"workspace": workspace,
|
|
340
356
|
"metadata": metadata
|
|
341
357
|
}
|
|
342
|
-
|
|
358
|
+
|
|
343
359
|
@staticmethod
|
|
344
360
|
def load_native_model(workspace: Path, ml_backend: str):
|
|
345
361
|
"""Load native model from workspace"""
|
|
@@ -356,7 +372,7 @@ class ModelSerializer:
|
|
|
356
372
|
return model
|
|
357
373
|
else:
|
|
358
374
|
raise ValueError(f"Unsupported ml_backend: {ml_backend}")
|
|
359
|
-
|
|
375
|
+
|
|
360
376
|
@staticmethod
|
|
361
377
|
def load_qonnx_model(workspace: Path):
|
|
362
378
|
"""Load QONNX model if it exists"""
|
|
@@ -370,81 +386,82 @@ class ModelSerializer:
|
|
|
370
386
|
|
|
371
387
|
class TriggerModel:
|
|
372
388
|
"""Main facade class that orchestrates model conversion, compilation, and inference"""
|
|
373
|
-
|
|
389
|
+
|
|
374
390
|
def __init__(self, name: str, ml_backend: str, n_outputs:int, compiler: str,
|
|
375
|
-
native_model: object, compiler_config: dict = None, scales: dict = None):
|
|
376
|
-
|
|
391
|
+
native_model: object, compiler_config: dict = None, scales: dict = None, unscaled_type: str = "ap_fixed<16,6>"):
|
|
392
|
+
|
|
377
393
|
if ml_backend.lower() not in ("keras", "xgboost"):
|
|
378
394
|
raise ValueError("Only Keras or XGBoost backends are currently supported.")
|
|
379
|
-
|
|
395
|
+
|
|
380
396
|
self.name = name
|
|
381
397
|
self.ml_backend = ml_backend.lower()
|
|
382
398
|
self.scales = scales
|
|
399
|
+
self.unscaled_type = unscaled_type
|
|
383
400
|
self.n_outputs = n_outputs
|
|
384
401
|
self.compiler = compiler.lower()
|
|
385
402
|
self.native_model = native_model
|
|
386
403
|
self.compiler_conifg = compiler_config
|
|
387
|
-
|
|
404
|
+
|
|
388
405
|
self.workspace_manager = WorkspaceManager()
|
|
389
406
|
self.converter = ConverterFactory.create_converter(ml_backend, compiler)
|
|
390
407
|
self.compiler_strategy = CompilerFactory.create_compiler(ml_backend, compiler)
|
|
391
|
-
|
|
408
|
+
|
|
392
409
|
self.firmware_model = None
|
|
393
410
|
self.model_qonnx = None
|
|
394
411
|
self.input_name = None
|
|
395
|
-
|
|
412
|
+
|
|
396
413
|
self.workspace_manager.setup_workspace(name, self.ml_backend, self.compiler)
|
|
397
|
-
|
|
414
|
+
|
|
398
415
|
@property
|
|
399
416
|
def workspace(self) -> Path:
|
|
400
417
|
"""Get workspace path"""
|
|
401
418
|
return self.workspace_manager.workspace
|
|
402
|
-
|
|
419
|
+
|
|
403
420
|
@property
|
|
404
|
-
def artifacts(self) ->
|
|
421
|
+
def artifacts(self) -> Dict[str, Any]:
|
|
405
422
|
"""Get artifacts dictionary"""
|
|
406
423
|
return self.workspace_manager.artifacts
|
|
407
|
-
|
|
424
|
+
|
|
408
425
|
@property
|
|
409
|
-
def metadata(self) ->
|
|
426
|
+
def metadata(self) -> Dict[str, Any]:
|
|
410
427
|
"""Get metadata dictionary"""
|
|
411
428
|
return self.workspace_manager.metadata
|
|
412
|
-
|
|
429
|
+
|
|
413
430
|
def __call__(self, **compiler_kwargs):
|
|
414
431
|
"""Execute the full model conversion and compilation pipeline"""
|
|
415
432
|
self.parse_dataset_object()
|
|
416
|
-
|
|
433
|
+
|
|
417
434
|
# Save native model
|
|
418
435
|
self.workspace_manager.save_native_model(self.native_model, self.ml_backend)
|
|
419
|
-
|
|
436
|
+
|
|
420
437
|
# Convert model if needed
|
|
421
438
|
conversion_result = self.converter.convert(
|
|
422
|
-
self.native_model,
|
|
439
|
+
self.native_model,
|
|
423
440
|
self.workspace_manager.workspace
|
|
424
441
|
)
|
|
425
|
-
|
|
442
|
+
|
|
426
443
|
if conversion_result is not None:
|
|
427
444
|
qonnx_path, self.model_qonnx = conversion_result
|
|
428
445
|
self.input_name = self.model_qonnx.graph.input[0].name
|
|
429
446
|
self.workspace_manager.add_artifact("qonnx", qonnx_path)
|
|
430
447
|
self.workspace_manager.add_version({"qonnx": str(qonnx_path)})
|
|
431
|
-
|
|
448
|
+
|
|
432
449
|
# Compile model
|
|
433
450
|
self.firmware_model = self.compiler_strategy.compile(
|
|
434
451
|
self.native_model,
|
|
435
452
|
self.workspace_manager.workspace,
|
|
436
|
-
self.compiler_conifg,
|
|
453
|
+
self.compiler_conifg,
|
|
437
454
|
**compiler_kwargs
|
|
438
455
|
)
|
|
439
|
-
|
|
456
|
+
|
|
440
457
|
self.workspace_manager.add_artifact("firmware", self.workspace_manager.workspace / "firmware")
|
|
441
458
|
|
|
442
|
-
if self.compiler
|
|
443
|
-
self.build_emulator(self.scales['shifts'], self.scales['offsets'], self.n_outputs)
|
|
444
|
-
|
|
459
|
+
if self.compiler is not "conifer" and self.scales is not None:
|
|
460
|
+
self.build_emulator(self.scales['shifts'], self.scales['offsets'], self.n_outputs, self.unscaled_type)
|
|
461
|
+
|
|
445
462
|
self.workspace_manager.add_artifact("firmware", self.workspace_manager.workspace / "firmware")
|
|
446
463
|
self.workspace_manager.save_metadata()
|
|
447
|
-
|
|
464
|
+
|
|
448
465
|
@staticmethod
|
|
449
466
|
def parse_dataset_object():
|
|
450
467
|
"""Parse dataset object (placeholder)"""
|
|
@@ -459,39 +476,37 @@ class TriggerModel:
|
|
|
459
476
|
template = template.replace("{{" + k + "}}", str(v))
|
|
460
477
|
with open(out_path, "w") as f:
|
|
461
478
|
f.write(template)
|
|
462
|
-
|
|
479
|
+
|
|
463
480
|
def software_predict(self, input_data: np.ndarray) -> np.ndarray:
|
|
464
481
|
"""Make predictions using software model"""
|
|
465
482
|
predictor = SoftwarePredictor(self.native_model, self.ml_backend)
|
|
466
483
|
return predictor.predict(input_data)
|
|
467
|
-
|
|
484
|
+
|
|
468
485
|
def qonnx_predict(self, input_data: np.ndarray) -> np.ndarray:
|
|
469
486
|
"""Make predictions using QONNX model"""
|
|
470
487
|
if self.model_qonnx is None:
|
|
471
488
|
raise RuntimeError("QONNX model not available")
|
|
472
489
|
predictor = QONNXPredictor(self.model_qonnx, self.input_name)
|
|
473
490
|
return predictor.predict(input_data)
|
|
474
|
-
|
|
491
|
+
|
|
475
492
|
def firmware_predict(self, input_data: np.ndarray) -> np.ndarray:
|
|
476
493
|
"""Make predictions using firmware model"""
|
|
477
494
|
predictor = FirmwarePredictor(self.firmware_model, self.compiler)
|
|
478
495
|
return predictor.predict(input_data)
|
|
479
|
-
|
|
480
|
-
def build_emulator(self, ad_shift: list, ad_offsets: list, n_outputs: int):
|
|
481
|
-
"""
|
|
482
|
-
|
|
483
|
-
Copies HLS sources and generates emulator scaffolding.
|
|
484
|
-
"""
|
|
496
|
+
|
|
497
|
+
def build_emulator(self, ad_shift: list, ad_offsets: list, n_outputs: int, unscaled_type: str = "ap_fixed<16,6>"):
|
|
498
|
+
"""Builds CMSSW emulator"""
|
|
499
|
+
|
|
485
500
|
emulator_dir = self.workspace / "emulator"
|
|
486
501
|
emulator_dir.mkdir(exist_ok=True)
|
|
487
502
|
|
|
488
|
-
model_dir = emulator_dir / self.name
|
|
503
|
+
model_dir = emulator_dir / self.name
|
|
489
504
|
model_dir.mkdir(exist_ok=True)
|
|
490
|
-
|
|
505
|
+
|
|
491
506
|
firmware_dir = self.workspace / "firmware" / "firmware"
|
|
492
|
-
|
|
507
|
+
|
|
493
508
|
shutil.copytree(firmware_dir, f"{model_dir}/NN", dirs_exist_ok=True)
|
|
494
|
-
|
|
509
|
+
|
|
495
510
|
# Access scales template from installed package
|
|
496
511
|
with pkg_resources.path(triggerflow.templates, "scales.h") as scales_template_path:
|
|
497
512
|
scales_out_path = model_dir / "scales.h"
|
|
@@ -501,13 +516,14 @@ class TriggerModel:
|
|
|
501
516
|
"N_OUTPUTS": n_outputs,
|
|
502
517
|
"AD_SHIFT": ", ".join(map(str, ad_shift)),
|
|
503
518
|
"AD_OFFSETS": ", ".join(map(str, ad_offsets)),
|
|
519
|
+
"UNSCALED_TYPE": unscaled_type,
|
|
504
520
|
}
|
|
505
521
|
self._render_template(scales_template_path, scales_out_path, context)
|
|
506
522
|
|
|
507
523
|
with pkg_resources.path(triggerflow.templates, "model_template.cpp") as emulator_template_path:
|
|
508
524
|
emulator_out_path = model_dir / "emulator.cpp"
|
|
509
|
-
self._render_template(emulator_template_path, emulator_out_path, context)
|
|
510
|
-
|
|
525
|
+
self._render_template(emulator_template_path, emulator_out_path, context)
|
|
526
|
+
|
|
511
527
|
with pkg_resources.path(triggerflow.templates, "makefile_version") as makefile_template_path:
|
|
512
528
|
makefile_out_path = model_dir / "Makefile"
|
|
513
529
|
self._render_template(makefile_template_path, makefile_out_path, {"MODEL_NAME": self.name})
|
|
@@ -515,44 +531,44 @@ class TriggerModel:
|
|
|
515
531
|
with pkg_resources.path(triggerflow.templates, "makefile") as makefile_template_path:
|
|
516
532
|
makefile_out_path = emulator_dir / "Makefile"
|
|
517
533
|
self._render_template(makefile_template_path, makefile_out_path, {"MODEL_NAME": self.name})
|
|
518
|
-
|
|
519
|
-
|
|
534
|
+
|
|
535
|
+
|
|
520
536
|
def save(self, path: str):
|
|
521
537
|
"""Save the complete model to an archive"""
|
|
522
538
|
ModelSerializer.save(self.workspace_manager.workspace, path)
|
|
523
|
-
|
|
539
|
+
|
|
524
540
|
@classmethod
|
|
525
541
|
def load(cls, path: str) -> 'TriggerModel':
|
|
526
542
|
"""Load a model from an archive"""
|
|
527
543
|
load_result = ModelSerializer.load(path)
|
|
528
544
|
if load_result is None:
|
|
529
545
|
return None
|
|
530
|
-
|
|
546
|
+
|
|
531
547
|
workspace = load_result["workspace"]
|
|
532
548
|
metadata = load_result["metadata"]
|
|
533
|
-
|
|
549
|
+
|
|
534
550
|
obj = cls.__new__(cls)
|
|
535
551
|
obj.workspace_manager = WorkspaceManager()
|
|
536
552
|
obj.workspace_manager.workspace = workspace
|
|
537
553
|
obj.workspace_manager.metadata = metadata
|
|
538
554
|
obj.workspace_manager.artifacts = {"firmware": workspace / "firmware"}
|
|
539
|
-
|
|
555
|
+
|
|
540
556
|
obj.name = metadata.get("name", "")
|
|
541
557
|
obj.ml_backend = metadata.get("ml_backend")
|
|
542
558
|
obj.compiler = metadata.get("compiler")
|
|
543
|
-
|
|
559
|
+
|
|
544
560
|
obj.native_model = ModelSerializer.load_native_model(workspace, obj.ml_backend)
|
|
545
|
-
|
|
561
|
+
|
|
546
562
|
obj.model_qonnx, obj.input_name = ModelSerializer.load_qonnx_model(workspace)
|
|
547
|
-
|
|
563
|
+
|
|
548
564
|
if obj.compiler.lower() in ("hls4ml", "conifer"):
|
|
549
565
|
obj.compiler_strategy = CompilerFactory.create_compiler(obj.ml_backend, obj.compiler)
|
|
550
566
|
obj.firmware_model = obj.compiler_strategy.load_compiled_model(workspace)
|
|
551
567
|
else:
|
|
552
568
|
obj.firmware_model = None
|
|
553
569
|
obj.compiler_strategy = None
|
|
554
|
-
|
|
570
|
+
|
|
555
571
|
obj.converter = ConverterFactory.create_converter(obj.ml_backend, obj.compiler)
|
|
556
|
-
obj.dataset_object = None
|
|
557
|
-
|
|
558
|
-
return obj
|
|
572
|
+
obj.dataset_object = None
|
|
573
|
+
|
|
574
|
+
return obj
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
CPP_STANDARD := c++17
|
|
2
2
|
CXXFLAGS := -O3 -fPIC -std=$(CPP_STANDARD)
|
|
3
3
|
PREFIX := .
|
|
4
|
-
EMULATOR_EXTRAS :=
|
|
4
|
+
EMULATOR_EXTRAS := /cvmfs/cms.cern.ch/el8_amd64_gcc11/external/hls4mlEmulatorExtras/1.1.1-6933fcc7cdb4cdd5a649bd6579151d1b/
|
|
5
5
|
AP_TYPES := $(EMULATOR_EXTRAS)/include/ap_types
|
|
6
|
-
HLS_ROOT :=
|
|
6
|
+
HLS_ROOT := /cvmfs/cms.cern.ch/el8_amd64_gcc11/external/hls/2019.08-fd724004387c2a6770dc3517446d30d9
|
|
7
7
|
HLS4ML_INCLUDE := $(EMULATOR_EXTRAS)/include/hls4ml
|
|
8
8
|
INCLUDES := -I$(HLS4ML_INCLUDE) -I$(AP_TYPES) -I$(HLS_ROOT)/include
|
|
9
|
-
LD_FLAGS := -L$(EMULATOR_EXTRAS)/lib64 -lemulator_interface
|
|
9
|
+
LD_FLAGS := -L$(EMULATOR_EXTRAS)/lib64 -lemulator_interface -ldl
|
|
10
10
|
ALL_VERSIONS := {{MODEL_NAME}}/{{MODEL_NAME}}.so
|
|
11
11
|
|
|
12
12
|
.DEFAULT_GOAL := all
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
MODEL_NAME = {{MODEL_NAME}}
|
|
4
4
|
|
|
5
|
-
$(MODEL_NAME).so: $(MODEL_NAME)_project.o
|
|
5
|
+
$(MODEL_NAME).so: $(MODEL_NAME)_project.o emulator.o
|
|
6
6
|
$(CXX) $(CXXFLAGS) $(LD_FLAGS) -shared $^ -o $@
|
|
7
7
|
|
|
8
8
|
%.o: NN/%.cpp
|
|
@@ -12,4 +12,4 @@ $(MODEL_NAME).so: $(MODEL_NAME)_project.o $(MODEL_NAME).o
|
|
|
12
12
|
$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
|
|
13
13
|
|
|
14
14
|
clean:
|
|
15
|
-
rm -f $(MODEL_NAME)_project.o
|
|
15
|
+
rm -f $(MODEL_NAME)_project.o emulator.o $(MODEL_NAME).so
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
#include "NN/{{MODEL_NAME}}_project.h"
|
|
2
|
+
#include "emulator.h"
|
|
3
|
+
#include "NN/nnet_utils/nnet_common.h"
|
|
4
|
+
#include <any>
|
|
5
|
+
#include <array>
|
|
6
|
+
#include <utility>
|
|
7
|
+
#include "ap_fixed.h"
|
|
8
|
+
#include "ap_int.h"
|
|
9
|
+
#include "scales.h"
|
|
10
|
+
|
|
11
|
+
using namespace hls4ml_{{MODEL_NAME}};
|
|
12
|
+
|
|
13
|
+
class {{MODEL_NAME}}_emulator : public hls4mlEmulator::Model {
|
|
14
|
+
|
|
15
|
+
private:
|
|
16
|
+
typedef {{UNSCALED_TYPE}} unscaled_t;
|
|
17
|
+
static const int N_INPUT_SIZE = {{N_INPUTS}};
|
|
18
|
+
|
|
19
|
+
unscaled_t _unscaled_input[N_INPUT_SIZE];
|
|
20
|
+
{{MODEL_NAME}}::input_t _scaled_input;
|
|
21
|
+
{{MODEL_NAME}}::result_t _result;
|
|
22
|
+
|
|
23
|
+
// Scale the raw input array to the model input type
|
|
24
|
+
virtual void _scaleNNInputs(unscaled_t unscaled[N_INPUT_SIZE], {{MODEL_NAME}}::input_t &scaled)
|
|
25
|
+
{
|
|
26
|
+
for (int i = 0; i < N_INPUT_SIZE; i++)
|
|
27
|
+
{
|
|
28
|
+
unscaled_t tmp0 = unscaled[i] - hls4ml_{{MODEL_NAME}}::ad_offsets[i];
|
|
29
|
+
{{UNSCALED_TYPE}} tmp1 = tmp0 >> hls4ml_{{MODEL_NAME}}::ad_shift[i];
|
|
30
|
+
scaled[i] = tmp1;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
public:
|
|
35
|
+
virtual void prepare_input(std::any input) {
|
|
36
|
+
unscaled_t *unscaled_input_p = std::any_cast<unscaled_t*>(input);
|
|
37
|
+
for (int i = 0; i < N_INPUT_SIZE; i++) {
|
|
38
|
+
_unscaled_input[i] = unscaled_input_p[i];
|
|
39
|
+
}
|
|
40
|
+
_scaleNNInputs(_unscaled_input, _scaled_input);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
virtual void predict() {
|
|
44
|
+
// Call the io_parallel model function; pass pointers
|
|
45
|
+
{{MODEL_NAME}}::{{MODEL_NAME}}_project(&_scaled_input, &_result);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
virtual void read_result(std::any result) {
|
|
49
|
+
{{MODEL_NAME}}::result_t *result_p = std::any_cast<{{MODEL_NAME}}::result_t*>(result);
|
|
50
|
+
*result_p = _result;
|
|
51
|
+
}
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
extern "C" hls4mlEmulator::Model* create_model() {
|
|
55
|
+
return new {{MODEL_NAME}}_emulator;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
extern "C" void destroy_model(hls4mlEmulator::Model* m) {
|
|
59
|
+
delete m;
|
|
60
|
+
}
|
|
@@ -37,19 +37,22 @@ def test_predict():
|
|
|
37
37
|
dummy_model = make_dummy_model()
|
|
38
38
|
scales = {'offsets': np.array([18, 0, 72, 7, 0, 73, 4, 0, 73, 4, 0, 72, 3, 0, 72, 6, -0, 286, 3, -2, 285, 3, -2, 282, 3, -2, 286, 29, 0, 72, 22, 0, 72, 18, 0, 72, 14, 0, 72, 11, 0, 72, 10, 0, 72, 10, 0, 73, 9, 0, 73, 9, 0, 72, 8, -2, 72], dtype='int'),
|
|
39
39
|
'shifts': np.array([3, 0, 6, 2, 5, 6, 0, 5, 6, 0, 5, 6, -1, 5, 6, 2, 7, 8, 0, 7, 8, 0, 7, 8, 0, 7, 8, 4, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 3, 6, 6, 2, 6, 6], dtype='int')}
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
40
|
+
trigger_model = TriggerModel(
|
|
41
|
+
name=name,
|
|
42
|
+
ml_backend="Keras",
|
|
43
|
+
n_outputs=int(1),
|
|
44
|
+
compiler="hls4ml",
|
|
45
|
+
native_model=dummy_model,
|
|
46
|
+
compiler_config=None,
|
|
47
|
+
scales=scales,
|
|
48
48
|
)
|
|
49
|
-
|
|
49
|
+
trigger_model(project_name = name+"_project", namespace = name, write_weights_txt = False, io_type='io_parallel')
|
|
50
50
|
input_data = np.ones((10,57))
|
|
51
|
-
output =
|
|
52
|
-
output =
|
|
53
|
-
output =
|
|
51
|
+
output = trigger_model.software_predict(input_data)
|
|
52
|
+
output = trigger_model.firmware_predict(input_data)
|
|
53
|
+
output = trigger_model.qonnx_predict(input_data)
|
|
54
54
|
assert output is not None
|
|
55
55
|
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
#include "NN/{{MODEL_NAME}}.h"
|
|
2
|
-
#include "emulator.h"
|
|
3
|
-
#include "NN/nnet_utils/nnet_common.h"
|
|
4
|
-
#include <any>
|
|
5
|
-
#include <array>
|
|
6
|
-
#include <utility>
|
|
7
|
-
#include "ap_fixed.h"
|
|
8
|
-
#include "ap_int.h"
|
|
9
|
-
#include "scales.h"
|
|
10
|
-
|
|
11
|
-
using namespace hls4ml_{{MODEL_NAME}};
|
|
12
|
-
|
|
13
|
-
class {{MODEL_NAME}}_emulator : public hls4mlEmulator::Model {
|
|
14
|
-
|
|
15
|
-
private:
|
|
16
|
-
unscaled_t _unscaled_input[N_INPUT_1_1];
|
|
17
|
-
input_t _scaled_input[N_INPUT_1_1];
|
|
18
|
-
result_t _result[{{N_OUTPUTS}}];
|
|
19
|
-
|
|
20
|
-
virtual void _scaleNNInputs(unscaled_t unscaled[N_INPUT_1_1], input_t scaled[N_INPUT_1_1])
|
|
21
|
-
{
|
|
22
|
-
for (int i = 0; i < N_INPUT_1_1; i++)
|
|
23
|
-
{
|
|
24
|
-
unscaled_t tmp0 = unscaled[i] - hls4ml_{{MODEL_NAME}}::ad_offsets[i];
|
|
25
|
-
input_t tmp1 = tmp0 >> hls4ml_{{MODEL_NAME}}::ad_shift[i];
|
|
26
|
-
scaled[i] = tmp1;
|
|
27
|
-
}
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
public:
|
|
31
|
-
virtual void prepare_input(std::any input) {
|
|
32
|
-
unscaled_t *unscaled_input_p = std::any_cast<unscaled_t*>(input);
|
|
33
|
-
|
|
34
|
-
for (int i = 0; i < N_INPUT_1_1; i++) {
|
|
35
|
-
_unscaled_input[i] = std::any_cast<unscaled_t>(unscaled_input_p[i]);
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
_scaleNNInputs(_unscaled_input, _scaled_input);
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
virtual void predict() {
|
|
42
|
-
{{MODEL_NAME}}(_scaled_input, _result);
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
virtual void read_result(std::any result) {
|
|
46
|
-
result_t *result_p = std::any_cast<result_t*>(result);
|
|
47
|
-
for (int i = 0; i < {{N_OUTPUTS}}; i++) {
|
|
48
|
-
result_p[i] = _result[i];
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
};
|
|
52
|
-
|
|
53
|
-
extern "C" hls4mlEmulator::Model* create_model() {
|
|
54
|
-
return new {{MODEL_NAME}}_emulator;
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
extern "C" void destroy_model(hls4mlEmulator::Model* m) {
|
|
58
|
-
delete m;
|
|
59
|
-
}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/README.md
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{triggerflow-0.2 → triggerflow-0.2.1}/src/triggerflow/starter/{{ cookiecutter.repo_name }}/dvc.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|