triggerflow 0.1.6__tar.gz → 0.1.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: triggerflow
3
- Version: 0.1.6
3
+ Version: 0.1.8
4
4
  Summary: Utilities for ML models targeting hardware triggers
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "triggerflow"
7
- version = "0.1.6"
7
+ version = "0.1.8"
8
8
  description = "Utilities for ML models targeting hardware triggers"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.10"
@@ -0,0 +1,215 @@
1
+ # trigger_mlflow.py
2
+ import mlflow
3
+ import os
4
+ import mlflow.pyfunc
5
+ import tempfile
6
+ from pathlib import Path
7
+ from typing import Dict, Any
8
+ from mlflow.tracking import MlflowClient
9
+ from .core import TriggerModel
10
+
11
+
12
+ def setup_mlflow(mlflow_uri: str = None,
13
+ model_name: str = None,
14
+ experiment_name: str = None,
15
+ run_name: str = None,
16
+ experiment_id: str = None,
17
+ run_id: str = None,
18
+ save_env_file: bool = True,
19
+ auto_configure: bool = False
20
+ ):
21
+
22
+ # Set the MLflow tracking URI
23
+ if mlflow_uri is None:
24
+ mlflow_uri = os.getenv('MLFLOW_URI', 'https://ngt.cern.ch/models')
25
+ mlflow.set_tracking_uri(mlflow_uri)
26
+ os.environ["MLFLOW_URI"] = mlflow_uri
27
+ print(f"Using MLflow tracking URI: {mlflow_uri}")
28
+
29
+
30
+ # Set the model name
31
+ if model_name is None:
32
+ if os.getenv('MLFLOW_MODEL_NAME'):
33
+ model_name = os.getenv('MLFLOW_MODEL_NAME')
34
+ else:
35
+ model_name = os.getenv('CI_COMMIT_BRANCH', 'Test-Model')
36
+ os.environ["MLFLOW_MODEL_NAME"] = model_name
37
+ print(f"Using model name: {model_name}")
38
+
39
+
40
+ # Set the experiment name
41
+ if experiment_name is None:
42
+ if os.getenv('MLFLOW_EXPERIMENT_NAME'):
43
+ experiment_name = os.getenv('MLFLOW_EXPERIMENT_NAME')
44
+ else:
45
+ experiment_name = os.getenv('CI_COMMIT_BRANCH', 'Test-Training-Torso')
46
+ os.environ["MLFLOW_EXPERIMENT_NAME"] = experiment_name
47
+ print(f"Using experiment name: {experiment_name}")
48
+
49
+
50
+ # Set the run name
51
+ if run_name is None:
52
+ if os.getenv('CI') == 'true':
53
+ if os.getenv('CI_PARENT_PIPELINE_ID'):
54
+ run_name = f"{os.getenv('CI_PARENT_PIPELINE_ID')}-{os.getenv('CI_PIPELINE_ID')}"
55
+ else:
56
+ run_name = f"{os.getenv('CI_PIPELINE_ID')}"
57
+ else:
58
+ import datetime
59
+ run_name = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
60
+ os.environ["MLFLOW_RUN_NAME"] = run_name
61
+ print(f"Using run name: {run_name}")
62
+
63
+
64
+ # Create a new experiment or get the existing one
65
+ if experiment_id is None:
66
+ if os.getenv("MLFLOW_EXPERIMENT_ID"):
67
+ experiment_id = os.getenv("MLFLOW_EXPERIMENT_ID")
68
+ else:
69
+ try:
70
+ experiment_id = mlflow.create_experiment(experiment_name)
71
+ except mlflow.exceptions.MlflowException:
72
+ experiment_id = mlflow.get_experiment_by_name(experiment_name).experiment_id
73
+
74
+ check_experiment_id = mlflow.get_experiment_by_name(experiment_name).experiment_id
75
+ if str(check_experiment_id) != str(experiment_id):
76
+ raise ValueError(f"Provided experiment_id {experiment_id} does not match the ID of experiment_name {experiment_name} ({check_experiment_id})")
77
+
78
+ # if mlflow.get_experiment_by_name(experiment_name).experiment_id is None:
79
+ # experiment_id = mlflow.create_experiment(experiment_name)
80
+ # else:
81
+ # experiment_id = mlflow.get_experiment_by_name(experiment_name).experiment_id
82
+
83
+ mlflow.set_experiment(experiment_id=experiment_id)
84
+ os.environ["MLFLOW_EXPERIMENT_ID"] = experiment_id
85
+ print(f"Using experiment ID: {experiment_id}")
86
+
87
+
88
+ # Start a new MLflow run
89
+ if run_id is None:
90
+ if os.getenv("MLFLOW_RUN_ID"):
91
+ run_id = os.getenv("MLFLOW_RUN_ID")
92
+ else:
93
+ with mlflow.start_run(experiment_id=experiment_id, run_name=run_name) as run:
94
+ run_id = run.info.run_id
95
+
96
+ check_run_info = mlflow.get_run(run_id)
97
+ if str(check_run_info.info.experiment_id) != str(experiment_id):
98
+ raise ValueError(f"Provided run_id {run_id} does not belong to experiment_id {experiment_id} (found {check_run_info.info.experiment_id})")
99
+
100
+ os.environ["MLFLOW_RUN_ID"] = run_id
101
+ print(f"Started run with ID: {run_id}")
102
+
103
+ # Save environment variables to a file for later steps in CI/CD pipelines
104
+ if save_env_file and os.getenv("CI") == "true":
105
+ print(f"Saving MLflow environment variables to {os.getenv('CI_ENV_FILE', 'mlflow.env')}")
106
+ with open(os.getenv('CI_ENV_FILE', 'mlflow.env'), 'a') as f:
107
+ f.write(f"MLFLOW_URI={mlflow_uri}\n")
108
+ f.write(f"MLFLOW_MODEL_NAME={model_name}\n")
109
+ f.write(f"MLFLOW_EXPERIMENT_NAME={experiment_name}\n")
110
+ f.write(f"MLFLOW_RUN_NAME={run_name}\n")
111
+ f.write(f"MLFLOW_EXPERIMENT_ID={experiment_id}\n")
112
+ f.write(f"MLFLOW_RUN_ID={run_id}\n")
113
+
114
+ if auto_configure:
115
+ print("Auto_configure is set to true. Exporting AUTO_CONFIGURE=true")
116
+ f.write(f"AUTO_CONFIGURE=true\n")
117
+
118
+ return experiment_name, run_name, experiment_id, run_id
119
+
120
+ if os.getenv("AUTO_CONFIGURE") == "true":
121
+ print("AUTO_CONFIGURE is true and running in CI environment. Setting up mlflow...")
122
+ setup_mlflow()
123
+ else:
124
+ print("AUTO_CONFIGURE is not set. Skipping mlflow run setup")
125
+
126
+ class MLflowWrapper(mlflow.pyfunc.PythonModel):
127
+ """PyFunc wrapper for TriggerModel; backend can be set at runtime."""
128
+ def load_context(self, context):
129
+ archive_path = Path(context.artifacts["trigger_model"])
130
+ self.model = TriggerModel.load(archive_path)
131
+ self.backend = "software"
132
+
133
+ def predict(self, context, model_input):
134
+ if self.backend == "software":
135
+ return self.model.software_predict(model_input)
136
+ elif self.backend == "qonnx":
137
+ if self.model.model_qonnx is None:
138
+ raise RuntimeError("QONNX model not available.")
139
+ return self.model.qonnx_predict(model_input)
140
+ elif self.backend == "firmware":
141
+ if self.model.firmware_model is None:
142
+ raise RuntimeError("Firmware model not available.")
143
+ return self.model.firmware_predict(model_input)
144
+ else:
145
+ raise ValueError(f"Unsupported backend: {self.backend}")
146
+
147
+ def get_model_info(self):
148
+ if hasattr(self.model, "get_model_info"):
149
+ return self.model.get_model_info()
150
+ return {"error": "Model info not available"}
151
+
152
+
153
+ def _get_pip_requirements(trigger_model: TriggerModel) -> list:
154
+ requirements = ["numpy"]
155
+ if trigger_model.ml_backend == "keras":
156
+ requirements.extend(["tensorflow", "keras"])
157
+ elif trigger_model.ml_backend == "xgboost":
158
+ requirements.append("xgboost")
159
+ if trigger_model.compiler == "hls4ml":
160
+ requirements.append("hls4ml")
161
+ elif trigger_model.compiler == "conifer":
162
+ requirements.append("conifer")
163
+ if hasattr(trigger_model, "model_qonnx") and trigger_model.model_qonnx is not None:
164
+ requirements.append("qonnx")
165
+ return requirements
166
+
167
+
168
+ def log_model(trigger_model: TriggerModel, registered_model_name: str, artifact_path: str = "TriggerModel"):
169
+ """Log a TriggerModel as a PyFunc model and register it in the Model Registry."""
170
+ if not registered_model_name:
171
+ raise ValueError("registered_model_name must be provided and non-empty")
172
+
173
+ if mlflow.active_run() is None:
174
+ raise RuntimeError("No active MLflow run. Start a run before logging.")
175
+
176
+ run = mlflow.active_run()
177
+ with tempfile.TemporaryDirectory() as tmpdir:
178
+ archive_path = Path(tmpdir) / "triggermodel.tar.xz"
179
+ trigger_model.save(archive_path)
180
+
181
+ mlflow.pyfunc.log_model(
182
+ artifact_path=artifact_path,
183
+ python_model=MLflowWrapper(),
184
+ artifacts={"trigger_model": str(archive_path)},
185
+ pip_requirements=_get_pip_requirements(trigger_model)
186
+ )
187
+
188
+ # register model (always required)
189
+ client = MlflowClient()
190
+ model_uri = f"runs:/{run.info.run_id}/{artifact_path}"
191
+ try:
192
+ client.get_registered_model(registered_model_name)
193
+ except mlflow.exceptions.RestException:
194
+ client.create_registered_model(registered_model_name)
195
+ client.create_model_version(
196
+ name=registered_model_name,
197
+ source=model_uri,
198
+ run_id=run.info.run_id
199
+ )
200
+
201
+ def load_model(model_uri: str) -> mlflow.pyfunc.PyFuncModel:
202
+ return mlflow.pyfunc.load_model(model_uri)
203
+
204
+
205
+ def load_full_model(model_uri: str) -> TriggerModel:
206
+ local_path = mlflow.artifacts.download_artifacts(model_uri)
207
+ archive_path = Path(local_path) / "trigger_model" / "triggermodel.tar.xz"
208
+ return TriggerModel.load(archive_path)
209
+
210
+
211
+ def get_model_info(model_uri: str) -> Dict[str, Any]:
212
+ model = mlflow.pyfunc.load_model(model_uri)
213
+ if hasattr(model._model_impl, "get_model_info"):
214
+ return model._model_impl.get_model_info()
215
+ return {"error": "Model info not available"}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: triggerflow
3
- Version: 0.1.6
3
+ Version: 0.1.8
4
4
  Summary: Utilities for ML models targeting hardware triggers
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -1,100 +0,0 @@
1
- # trigger_mlflow.py
2
- import mlflow
3
- import mlflow.pyfunc
4
- import tempfile
5
- from pathlib import Path
6
- from typing import Dict, Any
7
- from mlflow.tracking import MlflowClient
8
- from .core import TriggerModel
9
-
10
-
11
- class MLflowWrapper(mlflow.pyfunc.PythonModel):
12
- """PyFunc wrapper for TriggerModel; backend can be set at runtime."""
13
- def load_context(self, context):
14
- archive_path = Path(context.artifacts["trigger_model"])
15
- self.model = TriggerModel.load(archive_path)
16
- self.backend = "software"
17
-
18
- def predict(self, context, model_input):
19
- if self.backend == "software":
20
- return self.model.software_predict(model_input)
21
- elif self.backend == "qonnx":
22
- if self.model.model_qonnx is None:
23
- raise RuntimeError("QONNX model not available.")
24
- return self.model.qonnx_predict(model_input)
25
- elif self.backend == "firmware":
26
- if self.model.firmware_model is None:
27
- raise RuntimeError("Firmware model not available.")
28
- return self.model.firmware_predict(model_input)
29
- else:
30
- raise ValueError(f"Unsupported backend: {self.backend}")
31
-
32
- def get_model_info(self):
33
- if hasattr(self.model, "get_model_info"):
34
- return self.model.get_model_info()
35
- return {"error": "Model info not available"}
36
-
37
-
38
- def _get_pip_requirements(trigger_model: TriggerModel) -> list:
39
- requirements = ["numpy"]
40
- if trigger_model.ml_backend == "keras":
41
- requirements.extend(["tensorflow", "keras"])
42
- elif trigger_model.ml_backend == "xgboost":
43
- requirements.append("xgboost")
44
- if trigger_model.compiler == "hls4ml":
45
- requirements.append("hls4ml")
46
- elif trigger_model.compiler == "conifer":
47
- requirements.append("conifer")
48
- if hasattr(trigger_model, "model_qonnx") and trigger_model.model_qonnx is not None:
49
- requirements.append("qonnx")
50
- return requirements
51
-
52
-
53
- def log_model(trigger_model: TriggerModel, registered_model_name: str, artifact_path: str = "TriggerModel"):
54
- """Log a TriggerModel as a PyFunc model and register it in the Model Registry."""
55
- if not registered_model_name:
56
- raise ValueError("registered_model_name must be provided and non-empty")
57
-
58
- if mlflow.active_run() is None:
59
- raise RuntimeError("No active MLflow run. Start a run before logging.")
60
-
61
- run = mlflow.active_run()
62
- with tempfile.TemporaryDirectory() as tmpdir:
63
- archive_path = Path(tmpdir) / "triggermodel.tar.xz"
64
- trigger_model.save(archive_path)
65
-
66
- mlflow.pyfunc.log_model(
67
- artifact_path=artifact_path,
68
- python_model=MLflowWrapper(),
69
- artifacts={"trigger_model": str(archive_path)},
70
- pip_requirements=_get_pip_requirements(trigger_model)
71
- )
72
-
73
- # register model (always required)
74
- client = MlflowClient()
75
- model_uri = f"runs:/{run.info.run_id}/{artifact_path}"
76
- try:
77
- client.get_registered_model(registered_model_name)
78
- except mlflow.exceptions.RestException:
79
- client.create_registered_model(registered_model_name)
80
- client.create_model_version(
81
- name=registered_model_name,
82
- source=model_uri,
83
- run_id=run.info.run_id
84
- )
85
-
86
- def load_model(model_uri: str) -> mlflow.pyfunc.PyFuncModel:
87
- return mlflow.pyfunc.load_model(model_uri)
88
-
89
-
90
- def load_full_model(model_uri: str) -> TriggerModel:
91
- local_path = mlflow.artifacts.download_artifacts(model_uri)
92
- archive_path = Path(local_path) / "trigger_model" / "triggermodel.tar.xz"
93
- return TriggerModel.load(archive_path)
94
-
95
-
96
- def get_model_info(model_uri: str) -> Dict[str, Any]:
97
- model = mlflow.pyfunc.load_model(model_uri)
98
- if hasattr(model._model_impl, "get_model_info"):
99
- return model._model_impl.get_model_info()
100
- return {"error": "Model info not available"}
File without changes
File without changes
File without changes