triggerflow 0.1.6__tar.gz → 0.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: triggerflow
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Utilities for ML models targeting hardware triggers
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "triggerflow"
7
- version = "0.1.6"
7
+ version = "0.1.7"
8
8
  description = "Utilities for ML models targeting hardware triggers"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.10"
@@ -0,0 +1,210 @@
1
+ # trigger_mlflow.py
2
+ import mlflow
3
+ import os
4
+ import mlflow.pyfunc
5
+ import tempfile
6
+ from pathlib import Path
7
+ from typing import Dict, Any
8
+ from mlflow.tracking import MlflowClient
9
+ from .core import TriggerModel
10
+
11
+
12
+ def setup_mlflow(mlflow_uri: str = None,
13
+ model_name: str = None,
14
+ experiment_name: str = None,
15
+ run_name: str = None,
16
+ experiment_id: str = None,
17
+ run_id: str = None,
18
+ save_env_file=True
19
+ ):
20
+
21
+ # Set the MLflow tracking URI
22
+ if mlflow_uri is None:
23
+ mlflow_uri = os.getenv('MLFLOW_URI', 'https://ngt.cern.ch/models')
24
+ mlflow.set_tracking_uri(mlflow_uri)
25
+ os.environ["MLFLOW_URI"] = mlflow_uri
26
+ print(f"Using MLflow tracking URI: {mlflow_uri}")
27
+
28
+
29
+ # Set the model name
30
+ if model_name is None:
31
+ if os.getenv('MLFLOW_MODEL_NAME'):
32
+ model_name = os.getenv('MLFLOW_MODEL_NAME')
33
+ else:
34
+ model_name = os.getenv('CI_COMMIT_BRANCH', 'Test-Model')
35
+ os.environ["MLFLOW_MODEL_NAME"] = model_name
36
+ print(f"Using model name: {model_name}")
37
+
38
+
39
+ # Set the experiment name
40
+ if experiment_name is None:
41
+ if os.getenv('MLFLOW_EXPERIMENT_NAME'):
42
+ experiment_name = os.getenv('MLFLOW_EXPERIMENT_NAME')
43
+ else:
44
+ experiment_name = os.getenv('CI_COMMIT_BRANCH', 'Test-Training-Torso')
45
+ os.environ["MLFLOW_EXPERIMENT_NAME"] = experiment_name
46
+ print(f"Using experiment name: {experiment_name}")
47
+
48
+
49
+ # Set the run name
50
+ if run_name is None:
51
+ if os.getenv('CI') == 'true':
52
+ if os.getenv('CI_PARENT_PIPELINE_ID'):
53
+ run_name = f"{os.getenv('CI_PARENT_PIPELINE_ID')}-{os.getenv('CI_PIPELINE_ID')}"
54
+ else:
55
+ run_name = f"{os.getenv('CI_PIPELINE_ID')}"
56
+ else:
57
+ import datetime
58
+ run_name = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
59
+ os.environ["MLFLOW_RUN_NAME"] = run_name
60
+ print(f"Using run name: {run_name}")
61
+
62
+
63
+ # Create a new experiment or get the existing one
64
+ if experiment_id is None:
65
+ if os.getenv("MLFLOW_EXPERIMENT_ID"):
66
+ experiment_id = os.getenv("MLFLOW_EXPERIMENT_ID")
67
+ else:
68
+ try:
69
+ experiment_id = mlflow.create_experiment(experiment_name)
70
+ except mlflow.exceptions.MlflowException:
71
+ experiment_id = mlflow.get_experiment_by_name(experiment_name).experiment_id
72
+
73
+ check_experiment_id = mlflow.get_experiment_by_name(experiment_name).experiment_id
74
+ if str(check_experiment_id) != str(experiment_id):
75
+ raise ValueError(f"Provided experiment_id {experiment_id} does not match the ID of experiment_name {experiment_name} ({check_experiment_id})")
76
+
77
+ # if mlflow.get_experiment_by_name(experiment_name).experiment_id is None:
78
+ # experiment_id = mlflow.create_experiment(experiment_name)
79
+ # else:
80
+ # experiment_id = mlflow.get_experiment_by_name(experiment_name).experiment_id
81
+
82
+ mlflow.set_experiment(experiment_id=experiment_id)
83
+ os.environ["MLFLOW_EXPERIMENT_ID"] = experiment_id
84
+ print(f"Using experiment ID: {experiment_id}")
85
+
86
+
87
+ # Start a new MLflow run
88
+ if run_id is None:
89
+ if os.getenv("MLFLOW_RUN_ID"):
90
+ run_id = os.getenv("MLFLOW_RUN_ID")
91
+ else:
92
+ with mlflow.start_run(experiment_id=experiment_id, run_name=run_name) as run:
93
+ run_id = run.info.run_id
94
+
95
+ check_run_info = mlflow.get_run(run_id)
96
+ if str(check_run_info.info.experiment_id) != str(experiment_id):
97
+ raise ValueError(f"Provided run_id {run_id} does not belong to experiment_id {experiment_id} (found {check_run_info.info.experiment_id})")
98
+
99
+ os.environ["MLFLOW_RUN_ID"] = run_id
100
+ print(f"Started run with ID: {run_id}")
101
+
102
+ # Save environment variables to a file for later steps in CI/CD pipelines
103
+ if save_env_file and os.getenv("CI") == "true":
104
+ print(f"Saving MLflow environment variables to {os.getenv('CI_ENV_FILE', 'mlflow.env')}")
105
+ with open(os.getenv('CI_ENV_FILE', 'mlflow.env'), 'a') as f:
106
+ f.write(f"MLFLOW_URI={mlflow_uri}\n")
107
+ f.write(f"MLFLOW_MODEL_NAME={model_name}\n")
108
+ f.write(f"MLFLOW_EXPERIMENT_NAME={experiment_name}\n")
109
+ f.write(f"MLFLOW_RUN_NAME={run_name}\n")
110
+ f.write(f"MLFLOW_EXPERIMENT_ID={experiment_id}\n")
111
+ f.write(f"MLFLOW_RUN_ID={run_id}\n")
112
+
113
+ return experiment_name, run_name, experiment_id, run_id
114
+
115
+ if os.getenv("MLFLOW_TRACKING_PASSWORD") is not None and os.getenv("MLFLOW_TRACKING_USERNAME") is not None and os.getenv('CI') == 'true':
116
+ print("Setup mlflow run")
117
+ setup_mlflow()
118
+ else:
119
+ print("MLFLOW_TRACKING_PASSWORD and MLFLOW_TRACKING_USERNAME not set. Skipping mlflow run setup")
120
+
121
+ class MLflowWrapper(mlflow.pyfunc.PythonModel):
122
+ """PyFunc wrapper for TriggerModel; backend can be set at runtime."""
123
+ def load_context(self, context):
124
+ archive_path = Path(context.artifacts["trigger_model"])
125
+ self.model = TriggerModel.load(archive_path)
126
+ self.backend = "software"
127
+
128
+ def predict(self, context, model_input):
129
+ if self.backend == "software":
130
+ return self.model.software_predict(model_input)
131
+ elif self.backend == "qonnx":
132
+ if self.model.model_qonnx is None:
133
+ raise RuntimeError("QONNX model not available.")
134
+ return self.model.qonnx_predict(model_input)
135
+ elif self.backend == "firmware":
136
+ if self.model.firmware_model is None:
137
+ raise RuntimeError("Firmware model not available.")
138
+ return self.model.firmware_predict(model_input)
139
+ else:
140
+ raise ValueError(f"Unsupported backend: {self.backend}")
141
+
142
+ def get_model_info(self):
143
+ if hasattr(self.model, "get_model_info"):
144
+ return self.model.get_model_info()
145
+ return {"error": "Model info not available"}
146
+
147
+
148
+ def _get_pip_requirements(trigger_model: TriggerModel) -> list:
149
+ requirements = ["numpy"]
150
+ if trigger_model.ml_backend == "keras":
151
+ requirements.extend(["tensorflow", "keras"])
152
+ elif trigger_model.ml_backend == "xgboost":
153
+ requirements.append("xgboost")
154
+ if trigger_model.compiler == "hls4ml":
155
+ requirements.append("hls4ml")
156
+ elif trigger_model.compiler == "conifer":
157
+ requirements.append("conifer")
158
+ if hasattr(trigger_model, "model_qonnx") and trigger_model.model_qonnx is not None:
159
+ requirements.append("qonnx")
160
+ return requirements
161
+
162
+
163
+ def log_model(trigger_model: TriggerModel, registered_model_name: str, artifact_path: str = "TriggerModel"):
164
+ """Log a TriggerModel as a PyFunc model and register it in the Model Registry."""
165
+ if not registered_model_name:
166
+ raise ValueError("registered_model_name must be provided and non-empty")
167
+
168
+ if mlflow.active_run() is None:
169
+ raise RuntimeError("No active MLflow run. Start a run before logging.")
170
+
171
+ run = mlflow.active_run()
172
+ with tempfile.TemporaryDirectory() as tmpdir:
173
+ archive_path = Path(tmpdir) / "triggermodel.tar.xz"
174
+ trigger_model.save(archive_path)
175
+
176
+ mlflow.pyfunc.log_model(
177
+ artifact_path=artifact_path,
178
+ python_model=MLflowWrapper(),
179
+ artifacts={"trigger_model": str(archive_path)},
180
+ pip_requirements=_get_pip_requirements(trigger_model)
181
+ )
182
+
183
+ # register model (always required)
184
+ client = MlflowClient()
185
+ model_uri = f"runs:/{run.info.run_id}/{artifact_path}"
186
+ try:
187
+ client.get_registered_model(registered_model_name)
188
+ except mlflow.exceptions.RestException:
189
+ client.create_registered_model(registered_model_name)
190
+ client.create_model_version(
191
+ name=registered_model_name,
192
+ source=model_uri,
193
+ run_id=run.info.run_id
194
+ )
195
+
196
+ def load_model(model_uri: str) -> mlflow.pyfunc.PyFuncModel:
197
+ return mlflow.pyfunc.load_model(model_uri)
198
+
199
+
200
+ def load_full_model(model_uri: str) -> TriggerModel:
201
+ local_path = mlflow.artifacts.download_artifacts(model_uri)
202
+ archive_path = Path(local_path) / "trigger_model" / "triggermodel.tar.xz"
203
+ return TriggerModel.load(archive_path)
204
+
205
+
206
+ def get_model_info(model_uri: str) -> Dict[str, Any]:
207
+ model = mlflow.pyfunc.load_model(model_uri)
208
+ if hasattr(model._model_impl, "get_model_info"):
209
+ return model._model_impl.get_model_info()
210
+ return {"error": "Model info not available"}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: triggerflow
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Utilities for ML models targeting hardware triggers
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -1,100 +0,0 @@
1
- # trigger_mlflow.py
2
- import mlflow
3
- import mlflow.pyfunc
4
- import tempfile
5
- from pathlib import Path
6
- from typing import Dict, Any
7
- from mlflow.tracking import MlflowClient
8
- from .core import TriggerModel
9
-
10
-
11
- class MLflowWrapper(mlflow.pyfunc.PythonModel):
12
- """PyFunc wrapper for TriggerModel; backend can be set at runtime."""
13
- def load_context(self, context):
14
- archive_path = Path(context.artifacts["trigger_model"])
15
- self.model = TriggerModel.load(archive_path)
16
- self.backend = "software"
17
-
18
- def predict(self, context, model_input):
19
- if self.backend == "software":
20
- return self.model.software_predict(model_input)
21
- elif self.backend == "qonnx":
22
- if self.model.model_qonnx is None:
23
- raise RuntimeError("QONNX model not available.")
24
- return self.model.qonnx_predict(model_input)
25
- elif self.backend == "firmware":
26
- if self.model.firmware_model is None:
27
- raise RuntimeError("Firmware model not available.")
28
- return self.model.firmware_predict(model_input)
29
- else:
30
- raise ValueError(f"Unsupported backend: {self.backend}")
31
-
32
- def get_model_info(self):
33
- if hasattr(self.model, "get_model_info"):
34
- return self.model.get_model_info()
35
- return {"error": "Model info not available"}
36
-
37
-
38
- def _get_pip_requirements(trigger_model: TriggerModel) -> list:
39
- requirements = ["numpy"]
40
- if trigger_model.ml_backend == "keras":
41
- requirements.extend(["tensorflow", "keras"])
42
- elif trigger_model.ml_backend == "xgboost":
43
- requirements.append("xgboost")
44
- if trigger_model.compiler == "hls4ml":
45
- requirements.append("hls4ml")
46
- elif trigger_model.compiler == "conifer":
47
- requirements.append("conifer")
48
- if hasattr(trigger_model, "model_qonnx") and trigger_model.model_qonnx is not None:
49
- requirements.append("qonnx")
50
- return requirements
51
-
52
-
53
- def log_model(trigger_model: TriggerModel, registered_model_name: str, artifact_path: str = "TriggerModel"):
54
- """Log a TriggerModel as a PyFunc model and register it in the Model Registry."""
55
- if not registered_model_name:
56
- raise ValueError("registered_model_name must be provided and non-empty")
57
-
58
- if mlflow.active_run() is None:
59
- raise RuntimeError("No active MLflow run. Start a run before logging.")
60
-
61
- run = mlflow.active_run()
62
- with tempfile.TemporaryDirectory() as tmpdir:
63
- archive_path = Path(tmpdir) / "triggermodel.tar.xz"
64
- trigger_model.save(archive_path)
65
-
66
- mlflow.pyfunc.log_model(
67
- artifact_path=artifact_path,
68
- python_model=MLflowWrapper(),
69
- artifacts={"trigger_model": str(archive_path)},
70
- pip_requirements=_get_pip_requirements(trigger_model)
71
- )
72
-
73
- # register model (always required)
74
- client = MlflowClient()
75
- model_uri = f"runs:/{run.info.run_id}/{artifact_path}"
76
- try:
77
- client.get_registered_model(registered_model_name)
78
- except mlflow.exceptions.RestException:
79
- client.create_registered_model(registered_model_name)
80
- client.create_model_version(
81
- name=registered_model_name,
82
- source=model_uri,
83
- run_id=run.info.run_id
84
- )
85
-
86
- def load_model(model_uri: str) -> mlflow.pyfunc.PyFuncModel:
87
- return mlflow.pyfunc.load_model(model_uri)
88
-
89
-
90
- def load_full_model(model_uri: str) -> TriggerModel:
91
- local_path = mlflow.artifacts.download_artifacts(model_uri)
92
- archive_path = Path(local_path) / "trigger_model" / "triggermodel.tar.xz"
93
- return TriggerModel.load(archive_path)
94
-
95
-
96
- def get_model_info(model_uri: str) -> Dict[str, Any]:
97
- model = mlflow.pyfunc.load_model(model_uri)
98
- if hasattr(model._model_impl, "get_model_info"):
99
- return model._model_impl.get_model_info()
100
- return {"error": "Model info not available"}
File without changes
File without changes
File without changes