aissemble-inference-common-test 1.5.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,42 @@
1
+ ###
2
+ # #%L
3
+ # aiSSEMBLE::Open Inference Protocol::Common Test Utilities
4
+ # %%
5
+ # Copyright (C) 2024 Booz Allen Hamilton Inc.
6
+ # %%
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ # #L%
19
+ ###
20
+ """aiSSEMBLE OIP Common Test Utilities.
21
+
22
+ Reusable MLServer test fixtures and utilities for aiSSEMBLE OIP modules and examples.
23
+ """
24
+
25
+ from .behave_helpers import (
26
+ setup_mlserver_dynamic,
27
+ setup_mlserver_simple,
28
+ start_mlserver_with_model,
29
+ teardown_mlserver,
30
+ )
31
+ from .config_builder import create_model_settings, create_settings
32
+ from .mlserver_fixture import MLServerFixture
33
+
34
+ __all__ = [
35
+ "MLServerFixture",
36
+ "create_settings",
37
+ "create_model_settings",
38
+ "setup_mlserver_simple",
39
+ "setup_mlserver_dynamic",
40
+ "start_mlserver_with_model",
41
+ "teardown_mlserver",
42
+ ]
@@ -0,0 +1,147 @@
1
+ ###
2
+ # #%L
3
+ # aiSSEMBLE::Open Inference Protocol::Common Test Utilities
4
+ # %%
5
+ # Copyright (C) 2024 Booz Allen Hamilton Inc.
6
+ # %%
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ # #L%
19
+ ###
20
+ """Behave integration helpers for MLServer testing.
21
+
22
+ Provides drop-in replacement functions for environment.py hooks,
23
+ maintaining backward compatibility with existing test code.
24
+ """
25
+
26
+ from pathlib import Path
27
+ from typing import Any, Optional
28
+
29
+ from .mlserver_fixture import MLServerFixture
30
+
31
+
32
+ def setup_mlserver_simple(context: Any, models_dir: Path, port: int = 8080) -> None:
33
+ """Setup MLServer fixture for static model directory (examples).
34
+
35
+ Sets context attributes for backward compatibility:
36
+ - context.mlserver_fixture
37
+ - context.mlserver_url
38
+ - context.mlserver_port
39
+
40
+ Usage in before_all():
41
+ setup_mlserver_simple(context, models_dir=Path("models"), port=8080)
42
+ context.mlserver_fixture.start()
43
+
44
+ Args:
45
+ context: Behave context object
46
+ models_dir: Path to existing models directory
47
+ port: HTTP port (default: 8080)
48
+ """
49
+ context.mlserver_fixture = MLServerFixture.simple(port=port, models_dir=models_dir)
50
+ context.mlserver_url = None # Will be set after start()
51
+ context.mlserver_port = None # Will be set after start()
52
+ context.mlserver_process = None # Backward compatibility
53
+
54
+
55
+ def setup_mlserver_dynamic(context: Any) -> None:
56
+ """Setup MLServer fixture for dynamic config generation (module tests).
57
+
58
+ Sets context attributes for backward compatibility:
59
+ - context.mlserver_fixture
60
+ - context.mlserver_url
61
+ - context.mlserver_port
62
+ - context.mlserver_process
63
+ - context.temp_dir
64
+
65
+ Usage in before_all():
66
+ setup_mlserver_dynamic(context)
67
+
68
+ Args:
69
+ context: Behave context object
70
+ """
71
+ context.mlserver_fixture = MLServerFixture.dynamic()
72
+ context.mlserver_url = None # Will be set after start_with_model()
73
+ context.mlserver_port = None # Will be set after start_with_model()
74
+ context.mlserver_process = None # Backward compatibility
75
+ context.temp_dir = None # Will be set after start_with_model()
76
+
77
+
78
+ def teardown_mlserver(context: Any) -> None:
79
+ """Teardown MLServer fixture and cleanup resources.
80
+
81
+ Usage in after_all():
82
+ teardown_mlserver(context)
83
+
84
+ Args:
85
+ context: Behave context object
86
+ """
87
+ if hasattr(context, "mlserver_fixture"):
88
+ fixture = context.mlserver_fixture
89
+ if fixture.process:
90
+ exit_code = fixture.process.poll()
91
+ if exit_code is not None:
92
+ stdout, stderr = fixture.process.communicate()
93
+ print(f"\nMLServer exited with code {exit_code}")
94
+ print(f"stdout: {stdout.decode()}")
95
+ print(f"stderr: {stderr.decode()}")
96
+ else:
97
+ fixture.stop()
98
+
99
+ fixture.cleanup()
100
+
101
+
102
+ def start_mlserver_with_model(
103
+ context: Any,
104
+ model_name: str,
105
+ runtime: str,
106
+ global_settings: Optional[dict] = None,
107
+ **parameters,
108
+ ) -> None:
109
+ """Start MLServer with dynamically generated model configuration.
110
+
111
+ Updates context attributes for backward compatibility:
112
+ - context.mlserver_url
113
+ - context.mlserver_port
114
+ - context.mlserver_process
115
+ - context.temp_dir
116
+
117
+ Usage in test steps:
118
+ start_mlserver_with_model(
119
+ context,
120
+ model_name="yolo",
121
+ runtime="aissemble_inference_yolo.YOLORuntime",
122
+ model="yolov8n.pt"
123
+ )
124
+
125
+ Args:
126
+ context: Behave context object
127
+ model_name: Model name (e.g., "yolo", "sumy")
128
+ runtime: Runtime implementation class path
129
+ global_settings: Optional global settings.json content
130
+ **parameters: Model parameters for model-settings.json
131
+ """
132
+ if not hasattr(context, "mlserver_fixture"):
133
+ raise RuntimeError("Call setup_mlserver_dynamic() in before_all() first")
134
+
135
+ fixture = context.mlserver_fixture
136
+ fixture.start_with_model(
137
+ model_name=model_name,
138
+ runtime=runtime,
139
+ global_settings=global_settings,
140
+ **parameters,
141
+ )
142
+
143
+ # Update context attributes for backward compatibility
144
+ context.mlserver_url = fixture.url
145
+ context.mlserver_port = fixture.port
146
+ context.mlserver_process = fixture.process
147
+ context.temp_dir = fixture.temp_dir
@@ -0,0 +1,52 @@
1
+ ###
2
+ # #%L
3
+ # aiSSEMBLE::Open Inference Protocol::Common Test Utilities
4
+ # %%
5
+ # Copyright (C) 2024 Booz Allen Hamilton Inc.
6
+ # %%
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ # #L%
19
+ ###
20
+ """MLServer configuration file builders.
21
+
22
+ Utilities for generating settings.json and model-settings.json files
23
+ for dynamic MLServer configuration during testing.
24
+ """
25
+
26
+ import json
27
+ from pathlib import Path
28
+ from typing import Any, Dict
29
+
30
+
31
+ def create_settings(models_dir: Path, settings: Dict[str, Any]) -> None:
32
+ """Create MLServer global settings.json file.
33
+
34
+ Args:
35
+ models_dir: Directory where settings.json will be written
36
+ settings: Settings dictionary (parallel_workers, host, etc.)
37
+ """
38
+ settings_path = models_dir / "settings.json"
39
+ with open(settings_path, "w") as f:
40
+ json.dump(settings, f, indent=2)
41
+
42
+
43
+ def create_model_settings(model_dir: Path, model_settings: Dict[str, Any]) -> None:
44
+ """Create model-settings.json file for a specific model.
45
+
46
+ Args:
47
+ model_dir: Model directory where model-settings.json will be written
48
+ model_settings: Model configuration (name, implementation, parameters)
49
+ """
50
+ settings_path = model_dir / "model-settings.json"
51
+ with open(settings_path, "w") as f:
52
+ json.dump(model_settings, f, indent=2)
@@ -0,0 +1,363 @@
1
+ ###
2
+ # #%L
3
+ # aiSSEMBLE::Open Inference Protocol::Common Test Utilities
4
+ # %%
5
+ # Copyright (C) 2024 Booz Allen Hamilton Inc.
6
+ # %%
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ # #L%
19
+ ###
20
+ """MLServer fixture for test environments.
21
+
22
+ Provides a reusable abstraction for managing MLServer lifecycle during testing.
23
+ Consolidates process management, health checking, and graceful shutdown logic.
24
+ """
25
+
26
+ import logging
27
+ import os
28
+ import socket
29
+ import subprocess
30
+ import sys
31
+ import tempfile
32
+ import time
33
+ from pathlib import Path
34
+ from typing import Optional
35
+
36
+ import requests
37
+
38
+ logger = logging.getLogger(__name__)
39
+
40
+
41
+ class MLServerFixture:
42
+ """Manages MLServer lifecycle for testing.
43
+
44
+ Provides two factory methods:
45
+ - simple(): For static model directories (examples)
46
+ - dynamic(): For temporary config generation (module tests)
47
+
48
+ Attributes:
49
+ url: MLServer base URL (e.g., "http://127.0.0.1:8080")
50
+ port: HTTP port number
51
+ process: Subprocess handle (for backward compatibility)
52
+ temp_dir: Temporary directory path (None for simple mode)
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ port: Optional[int] = None,
58
+ models_dir: Optional[Path] = None,
59
+ use_temp_dir: bool = False,
60
+ ):
61
+ """Initialize MLServer fixture.
62
+
63
+ Args:
64
+ port: HTTP port (None for dynamic allocation)
65
+ models_dir: Path to models directory (for simple mode)
66
+ use_temp_dir: Whether to use temporary directory (for dynamic mode)
67
+ """
68
+ self._port = port
69
+ self._models_dir = models_dir
70
+ self._use_temp_dir = use_temp_dir
71
+ self._process: Optional[subprocess.Popen] = None
72
+ self._temp_dir: Optional[str] = None
73
+
74
+ def __enter__(self) -> "MLServerFixture":
75
+ """Enter context manager - returns self for use in 'with' statement.
76
+
77
+ Returns:
78
+ Self for context manager protocol
79
+ """
80
+ return self
81
+
82
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
83
+ """Exit context manager - ensures cleanup even if exceptions occur.
84
+
85
+ Args:
86
+ exc_type: Exception type if an exception occurred
87
+ exc_val: Exception value if an exception occurred
88
+ exc_tb: Exception traceback if an exception occurred
89
+ """
90
+ self.stop()
91
+ self.cleanup()
92
+
93
+ @classmethod
94
+ def simple(cls, port: int, models_dir: Path) -> "MLServerFixture":
95
+ """Create fixture for static model directory (examples).
96
+
97
+ Args:
98
+ port: Fixed HTTP port
99
+ models_dir: Path to existing models directory
100
+
101
+ Returns:
102
+ Configured MLServerFixture
103
+ """
104
+ return cls(port=port, models_dir=models_dir, use_temp_dir=False)
105
+
106
+ @classmethod
107
+ def dynamic(cls) -> "MLServerFixture":
108
+ """Create fixture for dynamic config generation (module tests).
109
+
110
+ Uses temporary directory and dynamic port allocation.
111
+
112
+ Returns:
113
+ Configured MLServerFixture
114
+ """
115
+ return cls(port=None, models_dir=None, use_temp_dir=True)
116
+
117
+ @property
118
+ def url(self) -> str:
119
+ """Get MLServer base URL."""
120
+ if self._port is None:
121
+ raise RuntimeError("MLServer not started - call start() first")
122
+ return f"http://127.0.0.1:{self._port}"
123
+
124
+ @property
125
+ def port(self) -> int:
126
+ """Get MLServer HTTP port."""
127
+ if self._port is None:
128
+ raise RuntimeError("MLServer not started - call start() first")
129
+ return self._port
130
+
131
+ @property
132
+ def process(self) -> Optional[subprocess.Popen]:
133
+ """Get MLServer subprocess (backward compatibility)."""
134
+ return self._process
135
+
136
+ @property
137
+ def temp_dir(self) -> Optional[str]:
138
+ """Get temporary directory path (None for simple mode)."""
139
+ return self._temp_dir
140
+
141
+ def start(self, verbose: bool = False) -> None:
142
+ """Start MLServer with static models directory.
143
+
144
+ For simple mode only (examples with pre-configured models).
145
+
146
+ Args:
147
+ verbose: Enable verbose health check logging
148
+
149
+ Raises:
150
+ RuntimeError: If called in dynamic mode or already started
151
+ """
152
+ if self._use_temp_dir:
153
+ raise RuntimeError("Use start_with_model() for dynamic mode")
154
+ if self._process:
155
+ raise RuntimeError("MLServer already started")
156
+ if not self._models_dir:
157
+ raise RuntimeError("No models directory configured")
158
+
159
+ self._start_process(self._models_dir, verbose=verbose)
160
+
161
+ def start_with_model(
162
+ self,
163
+ model_name: str,
164
+ runtime: str,
165
+ global_settings: Optional[dict] = None,
166
+ **parameters,
167
+ ) -> None:
168
+ """Start MLServer with dynamically generated model configuration.
169
+
170
+ For dynamic mode only (module tests with temporary configs).
171
+
172
+ Args:
173
+ model_name: Model name (e.g., "yolo", "sumy")
174
+ runtime: Runtime implementation class
175
+ global_settings: Optional global settings.json content
176
+ **parameters: Model parameters for model-settings.json
177
+
178
+ Raises:
179
+ RuntimeError: If called in simple mode or already started
180
+ """
181
+ if not self._use_temp_dir:
182
+ raise RuntimeError("Use start() for simple mode")
183
+ if self._process:
184
+ self.stop()
185
+
186
+ from .config_builder import create_model_settings, create_settings
187
+
188
+ self._temp_dir = tempfile.mkdtemp(prefix=f"{model_name}_test_")
189
+ models_dir = Path(self._temp_dir) / "models"
190
+ model_dir = models_dir / model_name
191
+ model_dir.mkdir(parents=True)
192
+
193
+ settings = global_settings or {
194
+ "parallel_workers": 0,
195
+ "host": "127.0.0.1",
196
+ }
197
+ create_settings(models_dir, settings)
198
+
199
+ model_settings = {"name": model_name, "implementation": runtime}
200
+ if parameters:
201
+ model_settings["parameters"] = parameters
202
+
203
+ create_model_settings(model_dir, model_settings)
204
+
205
+ self._start_process(models_dir, verbose=True)
206
+
207
+ def stop(self) -> None:
208
+ """Stop MLServer gracefully.
209
+
210
+ Attempts terminate, waits 10s, then kills if needed.
211
+ """
212
+ if not self._process:
213
+ return
214
+
215
+ # Only terminate if process is still running
216
+ if self._process.poll() is None:
217
+ self._process.terminate()
218
+ try:
219
+ self._process.wait(timeout=10)
220
+ except subprocess.TimeoutExpired:
221
+ logger.warning("MLServer did not terminate gracefully, killing process")
222
+ self._process.kill()
223
+ # Add timeout to second wait to prevent indefinite hang
224
+ try:
225
+ self._process.wait(timeout=5)
226
+ except subprocess.TimeoutExpired:
227
+ logger.error(
228
+ "MLServer did not respond to kill signal after 5s, "
229
+ "process may be in zombie state"
230
+ )
231
+
232
+ self._process = None
233
+
234
+ def cleanup(self) -> None:
235
+ """Clean up temporary directory (dynamic mode only).
236
+
237
+ Logs warnings if cleanup fails but does not raise exceptions.
238
+ """
239
+ if self._temp_dir and os.path.exists(self._temp_dir):
240
+ import shutil
241
+
242
+ try:
243
+ shutil.rmtree(self._temp_dir)
244
+ logger.debug(f"Cleaned up temp directory: {self._temp_dir}")
245
+ except (OSError, PermissionError) as e:
246
+ logger.warning(
247
+ f"Failed to clean up temp directory {self._temp_dir}: {e}. "
248
+ f"Manual cleanup may be required."
249
+ )
250
+ finally:
251
+ # Always null out the temp_dir to prevent reuse attempts
252
+ self._temp_dir = None
253
+
254
+ def _start_process(self, models_dir: Path, verbose: bool = False) -> None:
255
+ """Start MLServer process and wait for readiness.
256
+
257
+ Args:
258
+ models_dir: Path to models directory
259
+ verbose: Enable verbose health check logging
260
+ """
261
+ if self._port is None:
262
+ self._port = self._find_free_port()
263
+
264
+ venv_bin = os.path.dirname(sys.executable)
265
+ mlserver_cmd = os.path.join(venv_bin, "mlserver")
266
+
267
+ env = os.environ.copy()
268
+ env["MLSERVER_HTTP_PORT"] = str(self._port)
269
+ env["MLSERVER_GRPC_PORT"] = str(self._port + 1)
270
+ env["MLSERVER_METRICS_PORT"] = str(self._port + 2)
271
+
272
+ self._process = subprocess.Popen(
273
+ [mlserver_cmd, "start", str(models_dir)],
274
+ env=env,
275
+ stdout=subprocess.PIPE,
276
+ stderr=subprocess.PIPE,
277
+ )
278
+
279
+ self._wait_for_server(verbose=verbose)
280
+
281
+ def _wait_for_server(self, timeout: int = 120, verbose: bool = False) -> None:
282
+ """Wait for MLServer to become ready.
283
+
284
+ Args:
285
+ timeout: Maximum seconds to wait
286
+ verbose: Log health check attempts every 10 tries
287
+
288
+ Raises:
289
+ RuntimeError: If MLServer exits prematurely
290
+ TimeoutError: If server doesn't become ready in time
291
+ """
292
+ if not self._process:
293
+ raise RuntimeError("No process to wait for")
294
+
295
+ health_url = f"{self.url}/v2/health/ready"
296
+ start_time = time.time()
297
+ attempt = 0
298
+
299
+ while time.time() - start_time < timeout:
300
+ exit_code = self._process.poll()
301
+ if exit_code is not None:
302
+ stdout, stderr = self._process.communicate()
303
+ raise RuntimeError(
304
+ f"MLServer exited with code {exit_code}.\n"
305
+ f"stdout: {stdout.decode()}\n"
306
+ f"stderr: {stderr.decode()}"
307
+ )
308
+
309
+ try:
310
+ response = requests.get(health_url, timeout=2)
311
+ if response.status_code == 200:
312
+ return
313
+ if verbose and attempt % 10 == 0:
314
+ print(
315
+ f"Health check returned {response.status_code}: {response.text}"
316
+ )
317
+ except requests.exceptions.ConnectionError as e:
318
+ if verbose and attempt % 10 == 0:
319
+ print(f"Health check connection failed: {e}")
320
+ except requests.exceptions.Timeout:
321
+ if verbose and attempt % 10 == 0:
322
+ print("Health check timed out")
323
+ except Exception as e:
324
+ if verbose and attempt % 10 == 0:
325
+ print(f"Health check error: {e}")
326
+
327
+ attempt += 1
328
+ time.sleep(1)
329
+
330
+ self._process.terminate()
331
+ stdout, stderr = self._process.communicate(timeout=5)
332
+ raise TimeoutError(
333
+ f"MLServer did not become ready within {timeout} seconds.\n"
334
+ f"stdout: {stdout.decode()}\n"
335
+ f"stderr: {stderr.decode()}"
336
+ )
337
+
338
+ @staticmethod
339
+ def _find_free_port() -> int:
340
+ """Find a free port for MLServer.
341
+
342
+ WARNING: Race condition exists between port allocation and usage.
343
+ The port is freed when this method returns, and another process could
344
+ theoretically bind to it before MLServer starts. This is generally safe
345
+ for test environments but can cause flaky tests in high-concurrency
346
+ scenarios (e.g., parallel CI builds).
347
+
348
+ The socket is bound to 127.0.0.1 (localhost) to avoid conflicts with
349
+ system services. The OS assigns an ephemeral port from its available pool.
350
+
351
+ Returns:
352
+ Available port number from OS ephemeral port range
353
+
354
+ Note:
355
+ If MLServer fails to start with "Address already in use", this race
356
+ condition may be the cause. Consider adding retry logic or using
357
+ fixed port ranges for parallel test execution.
358
+ """
359
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
360
+ s.bind(("127.0.0.1", 0))
361
+ s.listen(1)
362
+ port = s.getsockname()[1]
363
+ return port
@@ -0,0 +1,172 @@
1
+ Metadata-Version: 2.4
2
+ Name: aissemble-inference-common-test
3
+ Version: 1.5.0rc3
4
+ Summary: Reusable MLServer test utilities for aiSSEMBLE Inference
5
+ Author-email: aiSSEMBLE Team <aissemble@bah.com>
6
+ License-File: LICENSE.txt
7
+ Classifier: Development Status :: 3 - Alpha
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: License :: OSI Approved :: Apache Software License
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
15
+ Requires-Python: >=3.11
16
+ Requires-Dist: requests>=2.31.0
17
+ Description-Content-Type: text/markdown
18
+
19
+ # aiSSEMBLE OIP Common Test Utilities
20
+
21
+ Reusable MLServer test utilities for aiSSEMBLE Inference modules and examples.
22
+
23
+ ## Overview
24
+
25
+ This module provides consolidated MLServer lifecycle management utilities to eliminate code duplication across test suites. It offers two primary usage patterns:
26
+
27
+ 1. **Simple mode**: For examples with static model directories
28
+ 2. **Dynamic mode**: For module tests with temporary config generation
29
+
30
+ ## Installation
31
+
32
+ ```bash
33
+ # As a test dependency in pyproject.toml
34
+ [dependency-groups]
35
+ test = [
36
+ "aissemble-inference-common-test",
37
+ ]
38
+
39
+ [tool.uv.sources]
40
+ aissemble-inference-common-test = { path = "../aissemble-inference-common-test", editable = true }
41
+ ```
42
+
43
+ ## Usage
44
+
45
+ ### Simple Mode (Examples)
46
+
47
+ For tests that use pre-configured model directories:
48
+
49
+ ```python
50
+ # tests/features/environment.py
51
+ from pathlib import Path
52
+ from aissemble_inference_common_test.behave_helpers import (
53
+ setup_mlserver_simple,
54
+ teardown_mlserver,
55
+ )
56
+
57
+ def before_all(context):
58
+ example_dir = Path(__file__).parent.parent.parent
59
+ models_dir = example_dir / "models"
60
+
61
+ setup_mlserver_simple(context, models_dir=models_dir, port=8080)
62
+ context.mlserver_fixture.start()
63
+
64
+ def after_all(context):
65
+ teardown_mlserver(context)
66
+ ```
67
+
68
+ ### Dynamic Mode (Module Tests)
69
+
70
+ For tests that generate model configurations dynamically:
71
+
72
+ ```python
73
+ # tests/features/environment.py
74
+ from aissemble_inference_common_test.behave_helpers import (
75
+ setup_mlserver_dynamic,
76
+ teardown_mlserver,
77
+ start_mlserver_with_model,
78
+ )
79
+
80
+ def before_all(context):
81
+ setup_mlserver_dynamic(context)
82
+
83
+ def after_scenario(context, scenario):
84
+ if hasattr(context, "mlserver_fixture") and context.mlserver_fixture.process:
85
+ context.mlserver_fixture.stop()
86
+
87
+ def after_all(context):
88
+ teardown_mlserver(context)
89
+
90
+ # In your step definitions:
91
+ start_mlserver_with_model(
92
+ context,
93
+ model_name="yolo",
94
+ runtime="aissemble_inference_yolo.YOLORuntime",
95
+ model="yolov8n.pt"
96
+ )
97
+ ```
98
+
99
+ ## Features
100
+
101
+ - **Process Management**: Robust MLServer subprocess handling with zombie process detection
102
+ - **Health Checking**: Automatic polling of `/v2/health/ready` endpoint
103
+ - **Graceful Shutdown**: Terminate with timeout, then kill if needed (with safety timeouts)
104
+ - **Dynamic Port Allocation**: Automatic free port discovery
105
+ - **Config Generation**: JSON settings and model-settings creation
106
+ - **Context Manager Support**: Automatic cleanup with Python `with` statement
107
+ - **Error Logging**: Detailed warnings for cleanup and shutdown failures
108
+ - **Backward Compatible**: Drop-in replacement for existing test code
109
+
110
+ ## API Reference
111
+
112
+ ### MLServerFixture
113
+
114
+ Main fixture class for MLServer lifecycle management.
115
+
116
+ #### Basic Usage
117
+
118
+ ```python
119
+ from aissemble_inference_common_test import MLServerFixture
120
+
121
+ # Simple mode
122
+ fixture = MLServerFixture.simple(port=8080, models_dir=Path("models"))
123
+ fixture.start()
124
+
125
+ # Dynamic mode
126
+ fixture = MLServerFixture.dynamic()
127
+ fixture.start_with_model(
128
+ model_name="yolo",
129
+ runtime="aissemble_inference_yolo.YOLORuntime",
130
+ model="yolov8n.pt"
131
+ )
132
+
133
+ # Cleanup
134
+ fixture.stop()
135
+ fixture.cleanup()
136
+ ```
137
+
138
+ #### Context Manager Usage (Recommended)
139
+
140
+ For automatic cleanup even when tests fail:
141
+
142
+ ```python
143
+ from aissemble_inference_common_test import MLServerFixture
144
+ from pathlib import Path
145
+
146
+ # Simple mode with context manager
147
+ with MLServerFixture.simple(port=8080, models_dir=Path("models")) as fixture:
148
+ fixture.start()
149
+ # Run tests...
150
+ # Automatic cleanup on exit
151
+
152
+ # Dynamic mode with context manager
153
+ with MLServerFixture.dynamic() as fixture:
154
+ fixture.start_with_model(
155
+ model_name="yolo",
156
+ runtime="aissemble_inference_yolo.YOLORuntime",
157
+ model="yolov8n.pt"
158
+ )
159
+ # Run tests...
160
+ # Automatic stop() and cleanup() on exit, even if exception occurs
161
+ ```
162
+
163
+ ### Behave Helpers
164
+
165
+ - `setup_mlserver_simple(context, models_dir, port)` - Initialize simple fixture
166
+ - `setup_mlserver_dynamic(context)` - Initialize dynamic fixture
167
+ - `start_mlserver_with_model(context, model_name, runtime, **params)` - Start with config
168
+ - `teardown_mlserver(context)` - Cleanup and shutdown
169
+
170
+ ## License
171
+
172
+ Apache 2.0
@@ -0,0 +1,8 @@
1
+ aissemble_inference_common_test/__init__.py,sha256=iklVYA0fd1khc8bALe9Yn8Dzg_VApATm8f18MoGjevE,1274
2
+ aissemble_inference_common_test/behave_helpers.py,sha256=Fv6d09L7ZpMbZP33uCbXKUyu3UDn4kVatnrzQrpam0E,4813
3
+ aissemble_inference_common_test/config_builder.py,sha256=bnjjXy4IZvQ2S40H8pgiB187fFLJmV8uoroBcvSA6dk,1786
4
+ aissemble_inference_common_test/mlserver_fixture.py,sha256=F4Fyu81wz2ujdxdSbHsou6gBvQbmHl-ZHwEy1po5uNg,12486
5
+ aissemble_inference_common_test-1.5.0rc3.dist-info/METADATA,sha256=L7HBPyAnDJ20sAP6UuDKN1cNKSOeEWZskUaJatAxK0A,4849
6
+ aissemble_inference_common_test-1.5.0rc3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
7
+ aissemble_inference_common_test-1.5.0rc3.dist-info/licenses/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
+ aissemble_inference_common_test-1.5.0rc3.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,201 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.