triggerflow 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
triggerflow/core.py ADDED
@@ -0,0 +1,550 @@
1
+ from pathlib import Path
2
+ import json
3
+ import numpy as np
4
+ import tarfile
5
+ import importlib
6
+ from abc import ABC, abstractmethod
7
+ from typing import Optional, Dict, Any, Union
8
+ import shutil, warnings
9
+ import importlib.resources as pkg_resources
10
+ import triggerflow.templates
11
+
12
+
13
+ class ModelConverter(ABC):
14
+ """Abstract base class for model converters"""
15
+
16
+ @abstractmethod
17
+ def convert(self, model, workspace: Path, **kwargs) -> Optional[Path]:
18
+ """Convert model to intermediate format"""
19
+ pass
20
+
21
+
22
+ class CompilerStrategy(ABC):
23
+ """Abstract base class for compilation strategies"""
24
+
25
+ @abstractmethod
26
+ def compile(self, model, workspace: Path, config: Optional[Dict] = None, **kwargs) -> Any:
27
+ """Compile model to firmware"""
28
+ pass
29
+
30
+ @abstractmethod
31
+ def load_compiled_model(self, workspace: Path) -> Any:
32
+ """Load a previously compiled model"""
33
+ pass
34
+
35
+
36
+ class ModelPredictor(ABC):
37
+ """Abstract base class for model predictors"""
38
+
39
+ @abstractmethod
40
+ def predict(self, input_data: np.ndarray) -> np.ndarray:
41
+ """Make predictions using the model"""
42
+ pass
43
+
44
+
45
+ class KerasToQONNXConverter(ModelConverter):
46
+ """Converts Keras models to QONNX format"""
47
+
48
+ def convert(self, model, workspace: Path, **kwargs) -> Path:
49
+ import tensorflow as tf
50
+ from qonnx.converters import keras as keras_converter
51
+ from qonnx.core.modelwrapper import ModelWrapper
52
+ from qonnx.transformation.channels_last import ConvertToChannelsLastAndClean
53
+ from qonnx.transformation.gemm_to_matmul import GemmToMatMul
54
+ from qonnx.util.cleanup import cleanup_model
55
+
56
+ qonnx_path = workspace / "model_qonnx.onnx"
57
+ input_signature = [tf.TensorSpec(1, model.inputs[0].dtype, name="input_0")]
58
+ qonnx_model, _ = keras_converter.from_keras(model, input_signature, output_path=qonnx_path)
59
+ qonnx_model = ModelWrapper(qonnx_model)
60
+ qonnx_model = cleanup_model(qonnx_model)
61
+ qonnx_model = qonnx_model.transform(ConvertToChannelsLastAndClean())
62
+ qonnx_model = qonnx_model.transform(GemmToMatMul())
63
+ cleaned_model = cleanup_model(qonnx_model)
64
+
65
+ return qonnx_path, cleaned_model
66
+
67
+
68
+ class NoOpConverter(ModelConverter):
69
+ """No-operation converter for models that don't need conversion"""
70
+
71
+ def convert(self, model, workspace: Path, **kwargs) -> Optional[Path]:
72
+ return None
73
+
74
+
75
+ class HLS4MLStrategy(CompilerStrategy):
76
+ """HLS4ML compilation strategy for Keras models"""
77
+
78
+ def compile(self, model, workspace: Path, config: Optional[Dict] = None, **kwargs) -> Any:
79
+ import hls4ml
80
+
81
+ firmware_dir = workspace / "firmware"
82
+ firmware_dir.mkdir(exist_ok=True)
83
+
84
+ cfg = config or hls4ml.utils.config_from_keras_model(model, granularity="name")
85
+
86
+ hls_kwargs = {
87
+ "hls_config": cfg,
88
+ "output_dir": str(firmware_dir),
89
+ "io_type": "io_stream",
90
+ "backend": "Vitis"
91
+ }
92
+ hls_kwargs.update(kwargs)
93
+
94
+ firmware_model = hls4ml.converters.convert_from_keras_model(
95
+ model,
96
+ **hls_kwargs
97
+ )
98
+
99
+ firmware_model.compile()
100
+ if shutil.which("vivado") is not None:
101
+ firmware_model.build()
102
+ else:
103
+ warnings.warn("Vivado not found in PATH. Firmware build failed.", UserWarning)
104
+ firmware_model.save(workspace / "firmware_model.fml")
105
+ return firmware_model
106
+
107
+ def load_compiled_model(self, workspace: Path) -> Any:
108
+ from hls4ml.converters import link_existing_project
109
+
110
+ firmware_model = link_existing_project(workspace / "firmware")
111
+ firmware_model.compile()
112
+ return firmware_model
113
+
114
+
115
+ class ConiferStrategy(CompilerStrategy):
116
+ """Conifer compilation strategy for XGBoost models"""
117
+
118
+ def compile(self, model, workspace: Path, config: Optional[Dict] = None) -> Any:
119
+ import conifer
120
+
121
+ firmware_dir = workspace / "firmware"
122
+ firmware_dir.mkdir(exist_ok=True)
123
+
124
+ cfg = config or conifer.backends.xilinxhls.auto_config()
125
+ firmware_model = conifer.converters.convert_from_xgboost(
126
+ model,
127
+ config=cfg,
128
+ output_dir=str(firmware_dir)
129
+ )
130
+ firmware_model.compile()
131
+ if shutil.which("vivado") is not None:
132
+ firmware_model.build()
133
+ else:
134
+ warnings.warn("Vivado not found in PATH. Firmware build failed.", UserWarning)
135
+ firmware_model.save(workspace / "firmware_model.fml")
136
+ return firmware_model
137
+
138
+ def load_compiled_model(self, workspace: Path) -> Any:
139
+ from conifer import load_model
140
+
141
+ firmware_model = load_model(workspace / "firmware_model.fml")
142
+ firmware_model.compile()
143
+ return firmware_model
144
+
145
+
146
+ class DA4MLStrategy(CompilerStrategy):
147
+ """DA4ML compilation strategy (placeholder)"""
148
+
149
+ def compile(self, model, workspace: Path, config: Optional[Dict] = None) -> Any:
150
+ raise NotImplementedError("DA4ML conversion without QONNX not yet implemented")
151
+
152
+ def load_compiled_model(self, workspace: Path) -> Any:
153
+ raise NotImplementedError("DA4ML loading not yet implemented")
154
+
155
+
156
+ class FINNStrategy(CompilerStrategy):
157
+ """FINN compilation strategy (placeholder)"""
158
+
159
+ def compile(self, model, workspace: Path, config: Optional[Dict] = None) -> Any:
160
+ raise NotImplementedError("FINN conversion without QONNX not yet implemented")
161
+
162
+ def load_compiled_model(self, workspace: Path) -> Any:
163
+ raise NotImplementedError("FINN loading not yet implemented")
164
+
165
+
166
+ class SoftwarePredictor(ModelPredictor):
167
+ """Software-based model predictor"""
168
+
169
+ def __init__(self, model, backend: str):
170
+ self.model = model
171
+ self.backend = backend.lower()
172
+
173
+ def predict(self, input_data):
174
+ if input_data.ndim == 1:
175
+ input_data = np.expand_dims(input_data, axis=0)
176
+ return self.model.predict(input_data)
177
+
178
+
179
+ class QONNXPredictor(ModelPredictor):
180
+ """QONNX-based model predictor"""
181
+
182
+ def __init__(self, qonnx_model, input_name: str):
183
+ self.qonnx_model = qonnx_model
184
+ self.input_name = input_name
185
+
186
+ def predict(self, input_data: np.ndarray) -> np.ndarray:
187
+ from qonnx.core.onnx_exec import execute_onnx
188
+
189
+ input_data = np.asarray(input_data)
190
+ if input_data.ndim == 1:
191
+ input_data = np.expand_dims(input_data, axis=0)
192
+
193
+ outputs = []
194
+ for i in range(input_data.shape[0]):
195
+ sample = input_data[i].astype("float32").reshape(1, -1)
196
+ output_dict = execute_onnx(self.qonnx_model, {self.input_name: sample})
197
+ outputs.append(output_dict["global_out"])
198
+
199
+ return np.vstack(outputs)
200
+
201
+
202
+ class FirmwarePredictor(ModelPredictor):
203
+ """Firmware-based model predictor"""
204
+
205
+ def __init__(self, firmware_model):
206
+ if firmware_model is None:
207
+ raise RuntimeError("Firmware model not built.")
208
+ self.firmware_model = firmware_model
209
+
210
+ def predict(self, input_data: np.ndarray) -> np.ndarray:
211
+ return self.firmware_model.predict(input_data)
212
+
213
+
214
+ class ConverterFactory:
215
+ """Factory for creating model converters"""
216
+
217
+ @staticmethod
218
+ def create_converter(ml_backend: str, compiler: str) -> ModelConverter:
219
+ if ml_backend.lower() == "keras" and compiler.lower() == "hls4ml":
220
+ return KerasToQONNXConverter()
221
+ else:
222
+ return NoOpConverter()
223
+
224
+
225
+ class CompilerFactory:
226
+ """Factory for creating compilation strategies"""
227
+
228
+ @staticmethod
229
+ def create_compiler(ml_backend: str, compiler: str) -> CompilerStrategy:
230
+ backend = ml_backend.lower()
231
+ comp = compiler.lower()
232
+
233
+ if backend == "keras" and comp == "hls4ml":
234
+ return HLS4MLStrategy()
235
+ elif backend == "xgboost" and comp == "conifer":
236
+ return ConiferStrategy()
237
+ elif comp == "da4ml":
238
+ return DA4MLStrategy()
239
+ elif comp == "finn":
240
+ return FINNStrategy()
241
+ else:
242
+ raise RuntimeError(f"Unsupported combination: ml_backend={backend}, compiler={comp}")
243
+
244
+
245
+ class WorkspaceManager:
246
+ """Manages workspace directories and metadata"""
247
+
248
+ BASE_WORKSPACE = Path.cwd() / "triggermodel"
249
+
250
+ def __init__(self):
251
+ self.workspace = self.BASE_WORKSPACE
252
+ self.artifacts = {"firmware": None}
253
+ self.metadata = {
254
+ "name": None,
255
+ "ml_backend": None,
256
+ "compiler": None,
257
+ "versions": []
258
+ }
259
+
260
+ def setup_workspace(self, name: str, ml_backend: str, compiler: str):
261
+ """Initialize workspace and metadata"""
262
+ self.workspace.mkdir(parents=True, exist_ok=True)
263
+ self.metadata.update({
264
+ "name": name,
265
+ "ml_backend": ml_backend,
266
+ "compiler": compiler,
267
+ })
268
+
269
+ def save_native_model(self, model, ml_backend: str):
270
+ """Save the native model to workspace"""
271
+ if ml_backend.lower() == "keras":
272
+ model.save(self.workspace / "keras_model")
273
+ elif ml_backend.lower() == "xgboost":
274
+ model.save_model(str(self.workspace / "xgb_model.json"))
275
+
276
+ def add_artifact(self, key: str, value: Any):
277
+ """Add artifact to tracking"""
278
+ self.artifacts[key] = value
279
+
280
+ def add_version(self, version_info: Dict):
281
+ """Add version information"""
282
+ self.metadata["versions"].append(version_info)
283
+
284
+ def save_metadata(self):
285
+ """Save metadata to file"""
286
+ with open(self.workspace / "metadata.json", "w") as f:
287
+ json.dump({
288
+ "name": self.metadata["name"],
289
+ "ml_backend": self.metadata["ml_backend"],
290
+ "compiler": self.metadata["compiler"],
291
+ }, f, indent=2)
292
+
293
+
294
+ class ModelSerializer:
295
+ """Handles model serialization and deserialization"""
296
+
297
+ @staticmethod
298
+ def save(workspace: Path, path: str):
299
+ """Serialize the workspace into a tar.xz archive"""
300
+ path = Path(path)
301
+ path.parent.mkdir(parents=True, exist_ok=True)
302
+ with tarfile.open(path, mode="w:xz") as tar:
303
+ tar.add(workspace, arcname=workspace.name)
304
+ print(f"TriggerModel saved to {path}")
305
+
306
+ @staticmethod
307
+ def load(path: str) -> Dict[str, Any]:
308
+ """Load workspace from tar.xz archive"""
309
+ path = Path(path)
310
+ if not path.exists():
311
+ raise FileNotFoundError(f"{path} does not exist")
312
+
313
+ workspace = Path.cwd() / "triggermodel"
314
+
315
+ if workspace.exists():
316
+ response = input(f"{workspace} already exists. Overwrite? [y/N]: ").strip().lower()
317
+ if response != "y":
318
+ print("Load cancelled by user.")
319
+ return None
320
+ shutil.rmtree(workspace)
321
+
322
+ with tarfile.open(path, mode="r:xz") as tar:
323
+ tar.extractall(path=Path.cwd())
324
+
325
+ # Load metadata
326
+ metadata_path = workspace / "metadata.json"
327
+ with open(metadata_path, "r") as f:
328
+ metadata = json.load(f)
329
+
330
+ return {
331
+ "workspace": workspace,
332
+ "metadata": metadata
333
+ }
334
+
335
+ @staticmethod
336
+ def load_native_model(workspace: Path, ml_backend: str):
337
+ """Load native model from workspace"""
338
+ if ml_backend in ("keras", "qkeras"):
339
+ try:
340
+ tf_keras = importlib.import_module("keras.models")
341
+ except ModuleNotFoundError:
342
+ tf_keras = importlib.import_module("tensorflow.keras.models")
343
+ return tf_keras.load_model(workspace / "keras_model")
344
+ elif ml_backend == "xgboost":
345
+ import xgboost as xgb
346
+ model = xgb.Booster()
347
+ model.load_model(str(workspace / "xgb_model.json"))
348
+ return model
349
+ else:
350
+ raise ValueError(f"Unsupported ml_backend: {ml_backend}")
351
+
352
+ @staticmethod
353
+ def load_qonnx_model(workspace: Path):
354
+ """Load QONNX model if it exists"""
355
+ qonnx_path = workspace / "model_qonnx.onnx"
356
+ if qonnx_path.exists():
357
+ from qonnx.core.modelwrapper import ModelWrapper
358
+ model = ModelWrapper(str(qonnx_path))
359
+ input_name = model.graph.input[0].name
360
+ return model, input_name
361
+ return None, None
362
+
363
+ class TriggerModel:
364
+ """Main facade class that orchestrates model conversion, compilation, and inference"""
365
+
366
+ def __init__(self, name: str, ml_backend: str, scales: dict, n_outputs:int, compiler: str,
367
+ native_model: object, dataset_object: object, compiler_config: dict = None):
368
+
369
+ if ml_backend.lower() not in ("keras", "xgboost"):
370
+ raise ValueError("Only Keras or XGBoost backends are currently supported.")
371
+
372
+ self.name = name
373
+ self.ml_backend = ml_backend.lower()
374
+ self.scales = scales
375
+ self.n_outputs = n_outputs
376
+ self.compiler = compiler.lower()
377
+ self.native_model = native_model
378
+ self.dataset_object = dataset_object
379
+ self.compiler_conifg = compiler_config
380
+
381
+ self.workspace_manager = WorkspaceManager()
382
+ self.converter = ConverterFactory.create_converter(ml_backend, compiler)
383
+ self.compiler_strategy = CompilerFactory.create_compiler(ml_backend, compiler)
384
+
385
+ self.firmware_model = None
386
+ self.model_qonnx = None
387
+ self.input_name = None
388
+
389
+ self.workspace_manager.setup_workspace(name, self.ml_backend, self.compiler)
390
+
391
+ @property
392
+ def workspace(self) -> Path:
393
+ """Get workspace path"""
394
+ return self.workspace_manager.workspace
395
+
396
+ @property
397
+ def artifacts(self) -> Dict[str, Any]:
398
+ """Get artifacts dictionary"""
399
+ return self.workspace_manager.artifacts
400
+
401
+ @property
402
+ def metadata(self) -> Dict[str, Any]:
403
+ """Get metadata dictionary"""
404
+ return self.workspace_manager.metadata
405
+
406
+ def __call__(self, **compiler_kwargs):
407
+ """Execute the full model conversion and compilation pipeline"""
408
+ self.parse_dataset_object()
409
+
410
+ # Save native model
411
+ self.workspace_manager.save_native_model(self.native_model, self.ml_backend)
412
+
413
+ # Convert model if needed
414
+ conversion_result = self.converter.convert(
415
+ self.native_model,
416
+ self.workspace_manager.workspace
417
+ )
418
+
419
+ if conversion_result is not None:
420
+ qonnx_path, self.model_qonnx = conversion_result
421
+ self.input_name = self.model_qonnx.graph.input[0].name
422
+ self.workspace_manager.add_artifact("qonnx", qonnx_path)
423
+ self.workspace_manager.add_version({"qonnx": str(qonnx_path)})
424
+
425
+ # Compile model
426
+ self.firmware_model = self.compiler_strategy.compile(
427
+ self.native_model,
428
+ self.workspace_manager.workspace,
429
+ self.compiler_conifg,
430
+ **compiler_kwargs
431
+ )
432
+
433
+ self.workspace_manager.add_artifact("firmware", self.workspace_manager.workspace / "firmware")
434
+
435
+ self.build_emulator(self.scales['shifts'], self.scales['offsets'], self.n_outputs)
436
+
437
+ self.workspace_manager.add_artifact("firmware", self.workspace_manager.workspace / "firmware")
438
+ self.workspace_manager.save_metadata()
439
+
440
+ @staticmethod
441
+ def parse_dataset_object():
442
+ """Parse dataset object (placeholder)"""
443
+ pass
444
+
445
+ @staticmethod
446
+ def _render_template(template_path: Path, out_path: Path, context: dict):
447
+ """Simple template substitution"""
448
+ with open(template_path) as f:
449
+ template = f.read()
450
+ for k, v in context.items():
451
+ template = template.replace("{{" + k + "}}", str(v))
452
+ with open(out_path, "w") as f:
453
+ f.write(template)
454
+
455
+ def software_predict(self, input_data: np.ndarray) -> np.ndarray:
456
+ """Make predictions using software model"""
457
+ predictor = SoftwarePredictor(self.native_model, self.ml_backend)
458
+ return predictor.predict(input_data)
459
+
460
+ def qonnx_predict(self, input_data: np.ndarray) -> np.ndarray:
461
+ """Make predictions using QONNX model"""
462
+ if self.model_qonnx is None:
463
+ raise RuntimeError("QONNX model not available")
464
+ predictor = QONNXPredictor(self.model_qonnx, self.input_name)
465
+ return predictor.predict(input_data)
466
+
467
+ def firmware_predict(self, input_data: np.ndarray) -> np.ndarray:
468
+ """Make predictions using firmware model"""
469
+ predictor = FirmwarePredictor(self.firmware_model)
470
+ return predictor.predict(input_data)
471
+
472
+ def build_emulator(self, ad_shift: list, ad_offsets: list, n_outputs: int):
473
+ """
474
+ Create an emulator directory for this model.
475
+ Copies HLS sources and generates emulator scaffolding.
476
+ """
477
+ emulator_dir = self.workspace / "emulator"
478
+ emulator_dir.mkdir(exist_ok=True)
479
+
480
+ model_dir = emulator_dir / self.name
481
+ model_dir.mkdir(exist_ok=True)
482
+
483
+ firmware_dir = self.workspace / "firmware" / "firmware"
484
+
485
+ shutil.copytree(firmware_dir, f"{model_dir}/NN", dirs_exist_ok=True)
486
+
487
+ # Access scales template from installed package
488
+ with pkg_resources.path(triggerflow.templates, "scales.h") as scales_template_path:
489
+ scales_out_path = model_dir / "scales.h"
490
+ context = {
491
+ "MODEL_NAME": self.name,
492
+ "N_INPUTS": len(ad_shift),
493
+ "N_OUTPUTS": n_outputs,
494
+ "AD_SHIFT": ", ".join(map(str, ad_shift)),
495
+ "AD_OFFSETS": ", ".join(map(str, ad_offsets)),
496
+ }
497
+ self._render_template(scales_template_path, scales_out_path, context)
498
+
499
+ with pkg_resources.path(triggerflow.templates, "model_template.cpp") as emulator_template_path:
500
+ emulator_out_path = model_dir / "emulator.cpp"
501
+ self._render_template(emulator_template_path, emulator_out_path, context)
502
+
503
+ with pkg_resources.path(triggerflow.templates, "makefile_version") as makefile_template_path:
504
+ makefile_out_path = model_dir / "Makefile"
505
+ self._render_template(makefile_template_path, makefile_out_path, {"MODEL_NAME": self.name})
506
+
507
+ with pkg_resources.path(triggerflow.templates, "makefile") as makefile_template_path:
508
+ makefile_out_path = emulator_dir / "Makefile"
509
+ self._render_template(makefile_template_path, makefile_out_path, {"MODEL_NAME": self.name})
510
+
511
+
512
+ def save(self, path: str):
513
+ """Save the complete model to an archive"""
514
+ ModelSerializer.save(self.workspace_manager.workspace, path)
515
+
516
+ @classmethod
517
+ def load(cls, path: str) -> 'TriggerModel':
518
+ """Load a model from an archive"""
519
+ load_result = ModelSerializer.load(path)
520
+ if load_result is None:
521
+ return None
522
+
523
+ workspace = load_result["workspace"]
524
+ metadata = load_result["metadata"]
525
+
526
+ obj = cls.__new__(cls)
527
+ obj.workspace_manager = WorkspaceManager()
528
+ obj.workspace_manager.workspace = workspace
529
+ obj.workspace_manager.metadata = metadata
530
+ obj.workspace_manager.artifacts = {"firmware": workspace / "firmware"}
531
+
532
+ obj.name = metadata.get("name", "")
533
+ obj.ml_backend = metadata.get("ml_backend")
534
+ obj.compiler = metadata.get("compiler")
535
+
536
+ obj.native_model = ModelSerializer.load_native_model(workspace, obj.ml_backend)
537
+
538
+ obj.model_qonnx, obj.input_name = ModelSerializer.load_qonnx_model(workspace)
539
+
540
+ if obj.compiler.lower() in ("hls4ml", "conifer"):
541
+ obj.compiler_strategy = CompilerFactory.create_compiler(obj.ml_backend, obj.compiler)
542
+ obj.firmware_model = obj.compiler_strategy.load_compiled_model(workspace)
543
+ else:
544
+ obj.firmware_model = None
545
+ obj.compiler_strategy = None
546
+
547
+ obj.converter = ConverterFactory.create_converter(obj.ml_backend, obj.compiler)
548
+ obj.dataset_object = None
549
+
550
+ return obj
@@ -0,0 +1,100 @@
1
+ # trigger_mlflow.py
2
+ import mlflow
3
+ import mlflow.pyfunc
4
+ import tempfile
5
+ from pathlib import Path
6
+ from typing import Dict, Any
7
+ from mlflow.tracking import MlflowClient
8
+ from core import TriggerModel
9
+
10
+
11
+ class MLflowWrapper(mlflow.pyfunc.PythonModel):
12
+ """PyFunc wrapper for TriggerModel; backend can be set at runtime."""
13
+ def load_context(self, context):
14
+ archive_path = Path(context.artifacts["trigger_model"])
15
+ self.model = TriggerModel.load(archive_path)
16
+ self.backend = "software"
17
+
18
+ def predict(self, context, model_input):
19
+ if self.backend == "software":
20
+ return self.model.software_predict(model_input)
21
+ elif self.backend == "qonnx":
22
+ if self.model.model_qonnx is None:
23
+ raise RuntimeError("QONNX model not available.")
24
+ return self.model.qonnx_predict(model_input)
25
+ elif self.backend == "firmware":
26
+ if self.model.firmware_model is None:
27
+ raise RuntimeError("Firmware model not available.")
28
+ return self.model.firmware_predict(model_input)
29
+ else:
30
+ raise ValueError(f"Unsupported backend: {self.backend}")
31
+
32
+ def get_model_info(self):
33
+ if hasattr(self.model, "get_model_info"):
34
+ return self.model.get_model_info()
35
+ return {"error": "Model info not available"}
36
+
37
+
38
+ def _get_pip_requirements(trigger_model: TriggerModel) -> list:
39
+ requirements = ["numpy"]
40
+ if trigger_model.ml_backend == "keras":
41
+ requirements.extend(["tensorflow", "keras"])
42
+ elif trigger_model.ml_backend == "xgboost":
43
+ requirements.append("xgboost")
44
+ if trigger_model.compiler == "hls4ml":
45
+ requirements.append("hls4ml")
46
+ elif trigger_model.compiler == "conifer":
47
+ requirements.append("conifer")
48
+ if hasattr(trigger_model, "model_qonnx") and trigger_model.model_qonnx is not None:
49
+ requirements.append("qonnx")
50
+ return requirements
51
+
52
+
53
+ def log_model(trigger_model: TriggerModel, registered_model_name: str, artifact_path: str = "TriggerModel"):
54
+ """Log a TriggerModel as a PyFunc model and register it in the Model Registry."""
55
+ if not registered_model_name:
56
+ raise ValueError("registered_model_name must be provided and non-empty")
57
+
58
+ if mlflow.active_run() is None:
59
+ raise RuntimeError("No active MLflow run. Start a run before logging.")
60
+
61
+ run = mlflow.active_run()
62
+ with tempfile.TemporaryDirectory() as tmpdir:
63
+ archive_path = Path(tmpdir) / "triggermodel.tar.xz"
64
+ trigger_model.save(archive_path)
65
+
66
+ mlflow.pyfunc.log_model(
67
+ artifact_path=artifact_path,
68
+ python_model=MLflowWrapper(),
69
+ artifacts={"trigger_model": str(archive_path)},
70
+ pip_requirements=_get_pip_requirements(trigger_model)
71
+ )
72
+
73
+ # register model (always required)
74
+ client = MlflowClient()
75
+ model_uri = f"runs:/{run.info.run_id}/{artifact_path}"
76
+ try:
77
+ client.get_registered_model(registered_model_name)
78
+ except mlflow.exceptions.RestException:
79
+ client.create_registered_model(registered_model_name)
80
+ client.create_model_version(
81
+ name=registered_model_name,
82
+ source=model_uri,
83
+ run_id=run.info.run_id
84
+ )
85
+
86
+ def load_model(model_uri: str) -> mlflow.pyfunc.PyFuncModel:
87
+ return mlflow.pyfunc.load_model(model_uri)
88
+
89
+
90
+ def load_full_model(model_uri: str) -> TriggerModel:
91
+ local_path = mlflow.artifacts.download_artifacts(model_uri)
92
+ archive_path = Path(local_path) / "trigger_model" / "triggermodel.tar.xz"
93
+ return TriggerModel.load(archive_path)
94
+
95
+
96
+ def get_model_info(model_uri: str) -> Dict[str, Any]:
97
+ model = mlflow.pyfunc.load_model(model_uri)
98
+ if hasattr(model._model_impl, "get_model_info"):
99
+ return model._model_impl.get_model_info()
100
+ return {"error": "Model info not available"}
@@ -0,0 +1,28 @@
1
+ CPP_STANDARD := c++17
2
+ CXXFLAGS := -O3 -fPIC -std=$(CPP_STANDARD)
3
+ PREFIX := .
4
+ EMULATOR_EXTRAS := ../../hls4mlEmulatorExtras
5
+ AP_TYPES := $(EMULATOR_EXTRAS)/include/ap_types
6
+ HLS_ROOT := ../../hls
7
+ HLS4ML_INCLUDE := $(EMULATOR_EXTRAS)/include/hls4ml
8
+ INCLUDES := -I$(HLS4ML_INCLUDE) -I$(AP_TYPES) -I$(HLS_ROOT)/include
9
+ LD_FLAGS := -L$(EMULATOR_EXTRAS)/lib64 -lemulator_interface
10
+ ALL_VERSIONS := {{MODEL_NAME}}/{{MODEL_NAME}}.so
11
+
12
+ .DEFAULT_GOAL := all
13
+ .PHONY: all clean install
14
+
15
+ all: $(ALL_VERSIONS)
16
+ @cp $(ALL_VERSIONS) ./
17
+ @echo All OK
18
+
19
+ install: all
20
+ @rm -rf $(PREFIX)/lib64
21
+ @mkdir -p $(PREFIX)/lib64
22
+ cp {{MODEL_NAME}}/{{MODEL_NAME}}.so $(PREFIX)/lib64
23
+
24
+ %.so:
25
+ $(MAKE) -C $(@D) INCLUDES="$(INCLUDES)" LD_FLAGS="$(LD_FLAGS)" CXXFLAGS="$(CXXFLAGS)"
26
+
27
+ clean:
28
+ rm -rf {{MODEL_NAME}}/{{MODEL_NAME}}.so $(ALL_VERSIONS)
@@ -0,0 +1,15 @@
1
+ .PHONY: clean
2
+
3
+ MODEL_NAME = {{MODEL_NAME}}
4
+
5
+ $(MODEL_NAME).so: $(MODEL_NAME)_project.o $(MODEL_NAME).o
6
+ $(CXX) $(CXXFLAGS) $(LD_FLAGS) -shared $^ -o $@
7
+
8
+ %.o: NN/%.cpp
9
+ $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
10
+
11
+ %.o: %.cpp
12
+ $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
13
+
14
+ clean:
15
+ rm -f $(MODEL_NAME)_project.o $(MODEL_NAME).o $(MODEL_NAME).so
@@ -0,0 +1,59 @@
1
+ #include "NN/{{MODEL_NAME}}.h"
2
+ #include "emulator.h"
3
+ #include "NN/nnet_utils/nnet_common.h"
4
+ #include <any>
5
+ #include <array>
6
+ #include <utility>
7
+ #include "ap_fixed.h"
8
+ #include "ap_int.h"
9
+ #include "scales.h"
10
+
11
+ using namespace hls4ml_{{MODEL_NAME}};
12
+
13
+ class {{MODEL_NAME}}_emulator : public hls4mlEmulator::Model {
14
+
15
+ private:
16
+ unscaled_t _unscaled_input[N_INPUT_1_1];
17
+ input_t _scaled_input[N_INPUT_1_1];
18
+ result_t _result[{{N_OUTPUTS}}];
19
+
20
+ virtual void _scaleNNInputs(unscaled_t unscaled[N_INPUT_1_1], input_t scaled[N_INPUT_1_1])
21
+ {
22
+ for (int i = 0; i < N_INPUT_1_1; i++)
23
+ {
24
+ unscaled_t tmp0 = unscaled[i] - hls4ml_{{MODEL_NAME}}::ad_offsets[i];
25
+ input_t tmp1 = tmp0 >> hls4ml_{{MODEL_NAME}}::ad_shift[i];
26
+ scaled[i] = tmp1;
27
+ }
28
+ }
29
+
30
+ public:
31
+ virtual void prepare_input(std::any input) {
32
+ unscaled_t *unscaled_input_p = std::any_cast<unscaled_t*>(input);
33
+
34
+ for (int i = 0; i < N_INPUT_1_1; i++) {
35
+ _unscaled_input[i] = std::any_cast<unscaled_t>(unscaled_input_p[i]);
36
+ }
37
+
38
+ _scaleNNInputs(_unscaled_input, _scaled_input);
39
+ }
40
+
41
+ virtual void predict() {
42
+ {{MODEL_NAME}}(_scaled_input, _result);
43
+ }
44
+
45
+ virtual void read_result(std::any result) {
46
+ result_t *result_p = std::any_cast<result_t*>(result);
47
+ for (int i = 0; i < {{N_OUTPUTS}}; i++) {
48
+ result_p[i] = _result[i];
49
+ }
50
+ }
51
+ };
52
+
53
+ extern "C" hls4mlEmulator::Model* create_model() {
54
+ return new {{MODEL_NAME}}_emulator;
55
+ }
56
+
57
+ extern "C" void destroy_model(hls4mlEmulator::Model* m) {
58
+ delete m;
59
+ }
@@ -0,0 +1,20 @@
1
+ #ifndef __ADT_SCALES_H
2
+ #define __ADT_SCALES_H
3
+
4
+ #include "NN/{{MODEL_NAME}}.h"
5
+
6
+ namespace hls4ml_{{MODEL_NAME}} {
7
+
8
+ typedef ap_fixed<5,5> ad_shift_t;
9
+ typedef ap_fixed<10,10> ad_offset_t;
10
+
11
+ const ad_shift_t ad_shift[{{N_INPUTS}}] = {
12
+ {{AD_SHIFT}}
13
+ };
14
+
15
+ const ad_offset_t ad_offsets[{{N_INPUTS}}] = {
16
+ {{AD_OFFSETS}}
17
+ };
18
+
19
+ } // namespace hls4ml_{{MODEL_NAME}}
20
+ #endif
@@ -0,0 +1,61 @@
1
+ Metadata-Version: 2.4
2
+ Name: triggerflow
3
+ Version: 0.1.4
4
+ Summary: Utilities for ML models targeting hardware triggers
5
+ Classifier: Programming Language :: Python :: 3
6
+ Classifier: License :: OSI Approved :: MIT License
7
+ Classifier: Operating System :: OS Independent
8
+ Requires-Python: >=3.10
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: mlflow>=2.0
11
+
12
+ # Machine Learning for Hardware Triggers
13
+
14
+ `triggerflow` provides a set of utilities for Machine Learning models targeting FPGA deployment.
15
+ The `TriggerModel` class consolidates several Machine Learning frontends and compiler backends to construct a "trigger model". MLflow utilities are for logging, versioning, and loading of trigger models.
16
+
17
+ ## Installation
18
+
19
+ ```bash
20
+ pip install triggerflow
21
+ ```
22
+
23
+ ## Usage
24
+
25
+ ```python
26
+
27
+ from triggerflow.core import TriggerModel
28
+
29
+ trigger_model = TriggerModel(name="my-trigger-model", ml_backend="Keras", compiler="hls4ml", model, compiler_config or None)
30
+ trigger_model() # call the constructor
31
+
32
+ # then:
33
+ output_software = trigger_model.software_predict(input_data)
34
+ output_firmware = trigger_model.firmware_predict(input_data)
35
+ output_qonnx = trigger_model.qonnx_predict(input_data)
36
+
37
+ # save and load trigger models:
38
+ trigger_model.save("trigger_model.tar.xz")
39
+
40
+ # in a separate session:
41
+ from trigger_model.core import TriggerModel
42
+ trigger_model = TriggerModel.load("trigger_model.tar.xz")
43
+ ```
44
+
45
+ ## Logging with MLflow
46
+
47
+ ```python
48
+ # logging with MLFlow:
49
+ import mlflow
50
+ from trigger_model.mlflow_wrapper import log_model
51
+
52
+ mlflow.set_tracking_uri("https://ngt.cern.ch/models")
53
+ experiment_id = mlflow.create_experiment("example-experiment")
54
+
55
+ with mlflow.start_run(run_name="trial-v1", experiment_id=experiment_id):
56
+ log_model(trigger_model, registered_model_name="TriggerModel")
57
+ ```
58
+
59
+ ### Note: This package doesn't install dependencies so it won't disrupt specific training environments or custom compilers. For a reference environment, see `environment.yml`.
60
+
61
+
@@ -0,0 +1,11 @@
1
+ triggerflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ triggerflow/core.py,sha256=8yxV3xH3SjixyDx3OP0fHEtGjteMUA01MIGrdofsusg,20349
3
+ triggerflow/mlflow_wrapper.py,sha256=mtg2mQTRlYkv9ojO6aRZ5Oln9lbE6AYA220Zw4rdEzM,3888
4
+ triggerflow/templates/makefile,sha256=VL39isTUBewrs8zTSDzdP6LLln7zpGoCZnLadpMu7CA,808
5
+ triggerflow/templates/makefile_version,sha256=Tmu0tyAopJbiBQVMMOa6l2Cz5GkEn20mwgzIi0CfhyM,338
6
+ triggerflow/templates/model_template.cpp,sha256=eGwY5ca_HgjoIvqorOBPSJspP0wngpjJheq3meb48r4,1616
7
+ triggerflow/templates/scales.h,sha256=5bq6lVF36SRQKE2zg9RpBG6K5orpPlnJ8g125nbtFow,365
8
+ triggerflow-0.1.4.dist-info/METADATA,sha256=R5p0NDzGnDhROZLRAdWbY_JBE9EPS1dma1UWaMoQ5Ac,1942
9
+ triggerflow-0.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
10
+ triggerflow-0.1.4.dist-info/top_level.txt,sha256=g4M0nqpVPFZcmVmsoLExDtJFLDBK4fzobCIBqo13BEw,12
11
+ triggerflow-0.1.4.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ triggerflow