lumen-app 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. lumen_app/__init__.py +7 -0
  2. lumen_app/core/__init__.py +0 -0
  3. lumen_app/core/config.py +661 -0
  4. lumen_app/core/installer.py +274 -0
  5. lumen_app/core/loader.py +45 -0
  6. lumen_app/core/router.py +87 -0
  7. lumen_app/core/server.py +389 -0
  8. lumen_app/core/service.py +49 -0
  9. lumen_app/core/tests/__init__.py +1 -0
  10. lumen_app/core/tests/test_core_integration.py +561 -0
  11. lumen_app/core/tests/test_env_checker.py +487 -0
  12. lumen_app/proto/README.md +12 -0
  13. lumen_app/proto/ml_service.proto +88 -0
  14. lumen_app/proto/ml_service_pb2.py +66 -0
  15. lumen_app/proto/ml_service_pb2.pyi +136 -0
  16. lumen_app/proto/ml_service_pb2_grpc.py +251 -0
  17. lumen_app/server.py +362 -0
  18. lumen_app/utils/env_checker.py +752 -0
  19. lumen_app/utils/installation/__init__.py +25 -0
  20. lumen_app/utils/installation/env_manager.py +152 -0
  21. lumen_app/utils/installation/micromamba_installer.py +459 -0
  22. lumen_app/utils/installation/package_installer.py +149 -0
  23. lumen_app/utils/installation/verifier.py +95 -0
  24. lumen_app/utils/logger.py +181 -0
  25. lumen_app/utils/mamba/cuda.yaml +12 -0
  26. lumen_app/utils/mamba/default.yaml +6 -0
  27. lumen_app/utils/mamba/openvino.yaml +7 -0
  28. lumen_app/utils/mamba/tensorrt.yaml +13 -0
  29. lumen_app/utils/package_resolver.py +309 -0
  30. lumen_app/utils/preset_registry.py +219 -0
  31. lumen_app/web/__init__.py +3 -0
  32. lumen_app/web/api/__init__.py +1 -0
  33. lumen_app/web/api/config.py +229 -0
  34. lumen_app/web/api/hardware.py +201 -0
  35. lumen_app/web/api/install.py +608 -0
  36. lumen_app/web/api/server.py +253 -0
  37. lumen_app/web/core/__init__.py +1 -0
  38. lumen_app/web/core/server_manager.py +348 -0
  39. lumen_app/web/core/state.py +264 -0
  40. lumen_app/web/main.py +145 -0
  41. lumen_app/web/models/__init__.py +28 -0
  42. lumen_app/web/models/config.py +63 -0
  43. lumen_app/web/models/hardware.py +64 -0
  44. lumen_app/web/models/install.py +134 -0
  45. lumen_app/web/models/server.py +95 -0
  46. lumen_app/web/static/assets/index-CGuhGHC9.css +1 -0
  47. lumen_app/web/static/assets/index-DN6HmxWS.js +56 -0
  48. lumen_app/web/static/index.html +14 -0
  49. lumen_app/web/static/vite.svg +1 -0
  50. lumen_app/web/websockets/__init__.py +1 -0
  51. lumen_app/web/websockets/logs.py +159 -0
  52. lumen_app-0.4.2.dist-info/METADATA +23 -0
  53. lumen_app-0.4.2.dist-info/RECORD +56 -0
  54. lumen_app-0.4.2.dist-info/WHEEL +5 -0
  55. lumen_app-0.4.2.dist-info/entry_points.txt +3 -0
  56. lumen_app-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,487 @@
1
+ """
2
+ Tests for EnvironmentChecker utility.
3
+ """
4
+
5
+ from pathlib import Path
6
+ from unittest.mock import MagicMock, patch
7
+
8
+ import pytest
9
+
10
+ from lumen_app.core.config import DeviceConfig
11
+ from lumen_app.utils.env_checker import (
12
+ DependencyInstaller,
13
+ DriverChecker,
14
+ DriverStatus,
15
+ EnvironmentChecker,
16
+ EnvironmentReport,
17
+ MicromambaChecker,
18
+ )
19
+
20
+ # =============================================================================
21
+ # DriverChecker Tests
22
+ # =============================================================================
23
+
24
+
25
+ def test_check_nvidia_gpu_available():
26
+ """Test NVIDIA GPU check when nvidia-smi succeeds."""
27
+ with patch("subprocess.run") as mock_run:
28
+ mock_result = MagicMock()
29
+ mock_result.returncode = 0
30
+ mock_result.stdout = "CUDA Version: 12.6"
31
+ mock_run.return_value = mock_result
32
+
33
+ result = DriverChecker.check_nvidia_gpu()
34
+
35
+ assert result.status == DriverStatus.AVAILABLE
36
+ assert result.name == "CUDA"
37
+ assert result.installable_via_mamba is True
38
+
39
+
40
+ def test_check_nvidia_gpu_missing():
41
+ """Test NVIDIA GPU check when nvidia-smi fails."""
42
+ with patch("subprocess.run") as mock_run:
43
+ mock_run.side_effect = FileNotFoundError()
44
+
45
+ result = DriverChecker.check_nvidia_gpu()
46
+
47
+ assert result.status == DriverStatus.MISSING
48
+ assert result.name == "CUDA"
49
+
50
+
51
+ def test_check_amd_ryzen_ai_npu_not_windows():
52
+ """Test AMD Ryzen AI NPU check on non-Windows."""
53
+ with patch("platform.system", return_value="Linux"):
54
+ result = DriverChecker.check_amd_ryzen_ai_npu()
55
+
56
+ assert result.status == DriverStatus.MISSING
57
+ assert "Only available on Windows" in result.details
58
+
59
+
60
+ def test_check_amd_ryzen_ai_npu_dll_missing():
61
+ """Test AMD Ryzen AI NPU check when DLL is missing."""
62
+ with patch("platform.system", return_value="Windows"):
63
+ with patch("pathlib.Path.exists", return_value=False):
64
+ result = DriverChecker.check_amd_ryzen_ai_npu()
65
+
66
+ assert result.status == DriverStatus.MISSING
67
+ assert "amdipu.dll not found" in result.details
68
+
69
+
70
+ def test_check_amd_ryzen_ai_npu_service_running():
71
+ """Test AMD Ryzen AI NPU check when service is running."""
72
+ with patch("platform.system", return_value="Windows"):
73
+ with patch("pathlib.Path.exists", return_value=True):
74
+ with patch("subprocess.run") as mock_run:
75
+ mock_result = MagicMock()
76
+ mock_result.returncode = 0
77
+ mock_result.stdout = "STATE: RUNNING"
78
+ mock_run.return_value = mock_result
79
+
80
+ result = DriverChecker.check_amd_ryzen_ai_npu()
81
+
82
+ assert result.status == DriverStatus.AVAILABLE
83
+ assert "amdipu service is running" in result.details
84
+
85
+
86
+ # =============================================================================
87
+ # MicromambaChecker Tests
88
+ # =============================================================================
89
+
90
+
91
+ def test_check_micromamba_available():
92
+ """Test micromamba check when installed."""
93
+ with patch("subprocess.run") as mock_run:
94
+ mock_result = MagicMock()
95
+ mock_result.returncode = 0
96
+ mock_result.stdout = "micromamba 1.5.3\n"
97
+ mock_run.return_value = mock_result
98
+
99
+ result = MicromambaChecker.check_micromamba()
100
+
101
+ assert result.status == DriverStatus.AVAILABLE
102
+ assert result.name == "micromamba"
103
+ assert "1.5.3" in result.details
104
+
105
+
106
+ def test_check_micromamba_missing():
107
+ """Test micromamba check when not installed."""
108
+ with patch("subprocess.run") as mock_run:
109
+ mock_run.side_effect = FileNotFoundError()
110
+
111
+ result = MicromambaChecker.check_micromamba()
112
+
113
+ assert result.status == DriverStatus.MISSING
114
+ assert result.name == "micromamba"
115
+ assert "not found" in result.details
116
+
117
+
118
+ def test_micromamba_install_to_cache_dir():
119
+ """Test micromamba installation to cache directory."""
120
+ with patch("subprocess.run") as mock_run:
121
+ # Mock successful installation
122
+ mock_result = MagicMock()
123
+ mock_result.returncode = 0
124
+ mock_run.return_value = mock_result
125
+
126
+ with patch("pathlib.Path.exists", return_value=False):
127
+ with patch("pathlib.Path.mkdir"): # Mock mkdir to avoid creating dirs
128
+ success, message = MicromambaChecker.install_micromamba(
129
+ cache_dir="/test/cache", dry_run=True
130
+ )
131
+
132
+ assert success is True
133
+ assert "Would run" in message
134
+
135
+
136
+ def test_micromamba_get_executable_path():
137
+ """Test getting micromamba executable path."""
138
+ with patch("platform.system", return_value="Darwin"):
139
+ path = MicromambaChecker.get_executable_path("/test/cache")
140
+ assert "bin/micromamba" in path
141
+
142
+ with patch("platform.system", return_value="Windows"):
143
+ path = MicromambaChecker.get_executable_path("/test/cache")
144
+ assert "bin/micromamba.exe" in path
145
+
146
+
147
+ # =============================================================================
148
+ # DriverChecker Tests
149
+ # =============================================================================
150
+
151
+
152
+ def test_check_intel_gpu_openvino_genuine_intel():
153
+ """Test Intel GPU/OpenVINO check."""
154
+ result = DriverChecker.check_intel_gpu_openvino()
155
+
156
+ # Should return OpenVINO check result
157
+ assert result.name == "OpenVINO"
158
+ # Status depends on platform (INCOMPATIBLE on Apple Silicon, MISSING on Intel without package, etc.)
159
+ assert isinstance(result.status, DriverStatus)
160
+
161
+
162
+ def test_check_intel_gpu_openvino_not_intel():
163
+ """Test Intel GPU/OpenVINO with non-Intel CPU."""
164
+ with patch("platform.system", return_value="Linux"):
165
+ with patch("builtins.open", create=True) as mock_open:
166
+ mock_open.return_value.__enter__ = MagicMock()
167
+ mock_open.return_value.__exit__ = MagicMock()
168
+ mock_open.return_value.read.return_value = "vendor_id\t: AuthenticAMD\n"
169
+
170
+ result = DriverChecker.check_intel_gpu_openvino()
171
+
172
+ assert result.status == DriverStatus.INCOMPATIBLE
173
+ assert "not Intel" in result.details
174
+
175
+
176
+ def test_check_apple_silicon_not_macos():
177
+ """Test Apple Silicon check on non-macOS."""
178
+ with patch("platform.system", return_value="Linux"):
179
+ result = DriverChecker.check_apple_silicon()
180
+
181
+ assert result.status == DriverStatus.INCOMPATIBLE
182
+ assert "Only available on macOS" in result.details
183
+
184
+
185
+ def test_check_apple_silicon_not_arm64():
186
+ """Test Apple Silicon check on x86_64 macOS."""
187
+ with patch("platform.system", return_value="Darwin"):
188
+ with patch("platform.machine", return_value="x86_64"):
189
+ result = DriverChecker.check_apple_silicon()
190
+
191
+ assert result.status == DriverStatus.INCOMPATIBLE
192
+ assert "not arm64" in result.details
193
+
194
+
195
+ def test_check_apple_silicon_available():
196
+ """Test Apple Silicon check on M1/M2/M3."""
197
+ with patch("platform.system", return_value="Darwin"):
198
+ with patch("platform.machine", return_value="arm64"):
199
+ with patch("subprocess.run") as mock_run:
200
+ mock_result = MagicMock()
201
+ mock_result.returncode = 0
202
+ mock_result.stdout = "Apple M2"
203
+ mock_run.return_value = mock_result
204
+
205
+ result = DriverChecker.check_apple_silicon()
206
+
207
+ assert result.status == DriverStatus.AVAILABLE
208
+ assert "Apple" in result.details
209
+
210
+
211
+ def test_check_rockchip_rknn_not_linux():
212
+ """Test RKNN check on non-Linux."""
213
+ with patch("platform.system", return_value="Windows"):
214
+ result = DriverChecker.check_rockchip_rknn()
215
+
216
+ assert result.status == DriverStatus.INCOMPATIBLE
217
+ assert "Only available on Linux" in result.details
218
+
219
+
220
+ def test_check_rockchip_rknn_device_found():
221
+ """Test RKNN check when device node exists."""
222
+ with patch("platform.system", return_value="Linux"):
223
+ # Mock Path.exists to return True for /dev/rknpu
224
+ original_exists = Path.exists
225
+
226
+ def mock_exists(self):
227
+ return str(self) == "/dev/rknpu" or original_exists(self)
228
+
229
+ with patch.object(Path, "exists", mock_exists):
230
+ result = DriverChecker.check_rockchip_rknn()
231
+
232
+ assert result.status == DriverStatus.AVAILABLE
233
+ assert "Device found" in result.details
234
+
235
+
236
+ def test_check_rockchip_rknn_missing():
237
+ """Test RKNN check when device not found."""
238
+ with patch("platform.system", return_value="Linux"):
239
+ with patch("pathlib.Path.exists", return_value=False):
240
+ with patch("builtins.open", create=True) as mock_open:
241
+ mock_open.return_value.__enter__ = MagicMock()
242
+ mock_open.return_value.__exit__ = MagicMock()
243
+ mock_open.return_value.read.return_value = "vendor_id\t: GenuineIntel\n"
244
+
245
+ result = DriverChecker.check_rockchip_rknn()
246
+
247
+ assert result.status == DriverStatus.MISSING
248
+
249
+
250
+ def test_check_amd_gpu_directml_not_windows():
251
+ """Test AMD GPU DirectML check on non-Windows."""
252
+ with patch("platform.system", return_value="Linux"):
253
+ result = DriverChecker.check_amd_gpu_directml()
254
+
255
+ assert result.status == DriverStatus.INCOMPATIBLE
256
+ assert "Only available on Windows" in result.details
257
+
258
+
259
+ def test_check_amd_gpu_directml_radeon_found():
260
+ """Test AMD GPU DirectML check when Radeon GPU found."""
261
+ with patch("platform.system", return_value="Windows"):
262
+ with patch("subprocess.run") as mock_run:
263
+ mock_result = MagicMock()
264
+ mock_result.returncode = 0
265
+ mock_result.stdout = "AMD Radeon RX 7900 XTX"
266
+ mock_run.return_value = mock_result
267
+
268
+ result = DriverChecker.check_amd_gpu_directml()
269
+
270
+ assert result.status == DriverStatus.AVAILABLE
271
+ assert "Radeon" in result.details
272
+
273
+
274
+ def test_check_for_preset_nvidia():
275
+ """Test checking drivers for NVIDIA GPU preset."""
276
+ with patch("subprocess.run") as mock_run:
277
+ mock_run.side_effect = FileNotFoundError()
278
+
279
+ results = DriverChecker.check_for_preset("nvidia_gpu")
280
+
281
+ assert len(results) == 1
282
+ assert results[0].name == "CUDA"
283
+
284
+
285
+ def test_check_for_preset_cpu():
286
+ """Test checking drivers for CPU preset (no special drivers)."""
287
+ results = DriverChecker.check_for_preset("cpu")
288
+
289
+ assert len(results) == 0
290
+
291
+
292
+ def test_check_for_preset_apple_silicon():
293
+ """Test checking drivers for Apple Silicon preset."""
294
+ with patch("platform.system", return_value="Darwin"):
295
+ with patch("platform.machine", return_value="arm64"):
296
+ with patch("subprocess.run") as mock_run:
297
+ mock_result = MagicMock()
298
+ mock_result.returncode = 0
299
+ mock_result.stdout = "Apple M2"
300
+ mock_run.return_value = mock_result
301
+
302
+ results = DriverChecker.check_for_preset("apple_silicon")
303
+
304
+ assert len(results) == 1
305
+ assert results[0].name == "CoreML"
306
+
307
+
308
+ def test_check_for_device_config_cpu():
309
+ """Test checking drivers for CPU DeviceConfig."""
310
+ config = DeviceConfig.cpu()
311
+ results = DriverChecker.check_for_device_config(config)
312
+
313
+ assert len(results) == 0
314
+
315
+
316
+ def test_check_for_device_config_nvidia():
317
+ """Test checking drivers for NVIDIA DeviceConfig."""
318
+ config = DeviceConfig.nvidia_gpu()
319
+
320
+ with patch("subprocess.run") as mock_run:
321
+ mock_run.side_effect = FileNotFoundError()
322
+
323
+ results = DriverChecker.check_for_device_config(config)
324
+
325
+ assert len(results) >= 1
326
+ assert any(r.name == "CUDA" for r in results)
327
+
328
+
329
+ # =============================================================================
330
+ # DependencyInstaller Tests
331
+ # =============================================================================
332
+
333
+
334
+ def test_installer_init_default_path():
335
+ """Test installer initialization with default path."""
336
+ installer = DependencyInstaller()
337
+
338
+ assert installer.configs_dir is not None
339
+ assert installer.configs_dir.name == "mamba"
340
+
341
+
342
+ def test_installer_get_install_command_cuda():
343
+ """Test getting install command for CUDA."""
344
+ with patch("pathlib.Path.exists", return_value=True):
345
+ installer = DependencyInstaller(mamba_configs_dir="/fake/path")
346
+
347
+ cmd = installer.get_install_command("cuda")
348
+
349
+ assert "micromamba" in cmd
350
+ assert "cuda" in cmd.lower()
351
+
352
+
353
+ def test_installer_install_dry_run():
354
+ """Test installer dry run mode."""
355
+ with patch("pathlib.Path.exists", return_value=True):
356
+ installer = DependencyInstaller(mamba_configs_dir="/fake/path")
357
+
358
+ success, message = installer.install_driver("cuda", dry_run=True)
359
+
360
+ assert success is True
361
+ assert "Would run" in message
362
+
363
+
364
+ def test_installer_install_unsupported_driver():
365
+ """Test installer with unsupported driver."""
366
+ installer = DependencyInstaller()
367
+
368
+ success, message = installer.install_driver("unsupported_driver")
369
+
370
+ assert success is False
371
+ assert "No mamba config available" in message
372
+
373
+
374
+ # =============================================================================
375
+ # EnvironmentChecker Tests
376
+ # =============================================================================
377
+
378
+
379
+ def test_check_preset_nvidia_missing():
380
+ """Test environment check for NVIDIA preset with missing driver."""
381
+ with patch("subprocess.run") as mock_run:
382
+ mock_run.side_effect = FileNotFoundError()
383
+
384
+ report = EnvironmentChecker.check_preset("nvidia_gpu")
385
+
386
+ assert isinstance(report, EnvironmentReport)
387
+ assert report.preset_name == "nvidia_gpu"
388
+ assert report.ready is False
389
+ # missing_installable now contains lowercase names
390
+ assert "cuda" in report.missing_installable
391
+
392
+
393
+ def test_check_preset_cpu_ready():
394
+ """Test environment check for CPU preset (always ready)."""
395
+ report = EnvironmentChecker.check_preset("cpu")
396
+
397
+ assert isinstance(report, EnvironmentReport)
398
+ assert report.preset_name == "cpu"
399
+ assert report.ready is True
400
+ assert len(report.drivers) == 0
401
+
402
+
403
+ def test_check_device_config_cpu():
404
+ """Test environment check for CPU DeviceConfig."""
405
+ config = DeviceConfig.cpu()
406
+ report = EnvironmentChecker.check_device_config(config)
407
+
408
+ assert isinstance(report, EnvironmentReport)
409
+ assert report.ready is True
410
+ assert len(report.drivers) == 0
411
+
412
+
413
+ def test_check_device_config_apple_silicon():
414
+ """Test environment check for Apple Silicon DeviceConfig."""
415
+ config = DeviceConfig.apple_silicon()
416
+
417
+ with patch("platform.system", return_value="Darwin"):
418
+ with patch("platform.machine", return_value="arm64"):
419
+ with patch("subprocess.run") as mock_run:
420
+ mock_result = MagicMock()
421
+ mock_result.returncode = 0
422
+ mock_result.stdout = "Apple M2"
423
+ mock_run.return_value = mock_result
424
+
425
+ report = EnvironmentChecker.check_device_config(config)
426
+
427
+ assert isinstance(report, EnvironmentReport)
428
+ assert report.ready is True
429
+
430
+
431
+ # =============================================================================
432
+ # Integration Tests
433
+ # =============================================================================
434
+
435
+
436
+ def test_full_workflow_check_and_install():
437
+ """Test full workflow: check preset, get install commands."""
438
+ with patch("subprocess.run") as mock_run:
439
+ mock_run.side_effect = FileNotFoundError()
440
+
441
+ # 1. Check preset
442
+ report = EnvironmentChecker.check_preset("nvidia_gpu")
443
+
444
+ assert report.ready is False
445
+ assert len(report.missing_installable) > 0
446
+
447
+ # 2. Get install command (driver names are uppercase in result)
448
+ installer = DependencyInstaller()
449
+ for driver_name in report.missing_installable:
450
+ # Convert to lowercase for installer
451
+ cmd = installer.get_install_command(driver_name.lower())
452
+ # Should have valid command or error message
453
+ assert isinstance(cmd, str)
454
+ assert len(cmd) > 0
455
+
456
+
457
+ def test_environment_report_all_available():
458
+ """Test EnvironmentReport when all drivers available."""
459
+ report = EnvironmentReport(preset_name="cpu", drivers=[], ready=True)
460
+
461
+ assert report.ready is True
462
+
463
+
464
+ def test_environment_report_missing_drivers():
465
+ """Test EnvironmentReport with missing drivers."""
466
+ from lumen_app.utils.env_checker import DriverCheckResult
467
+
468
+ report = EnvironmentReport(
469
+ preset_name="nvidia_gpu",
470
+ drivers=[
471
+ DriverCheckResult(
472
+ name="CUDA",
473
+ status=DriverStatus.MISSING,
474
+ installable_via_mamba=True,
475
+ mamba_config_path="cuda.yaml",
476
+ )
477
+ ],
478
+ ready=False,
479
+ missing_installable=["CUDA"],
480
+ )
481
+
482
+ assert report.ready is False
483
+ assert "CUDA" in report.missing_installable
484
+
485
+
486
+ if __name__ == "__main__":
487
+ pytest.main([__file__, "-v"])
@@ -0,0 +1,12 @@
1
+ ## Proto Code Gen
2
+
3
+ Before run the command, please make sure you're right in `Lumen/lumen-*/` directory.
4
+
5
+ ```bash
6
+ python -m grpc_tools.protoc \
7
+ -I src/lumen_clip \
8
+ --python_out=src/lumen_clip \
9
+ --grpc_python_out=src/lumen_clip \
10
+ --pyi_out=src/lumen_clip \
11
+ src/lumen_clip/proto/ml_service.proto
12
+ ```
@@ -0,0 +1,88 @@
1
+ syntax = "proto3";
2
+
3
+ package home_native.v1;
4
+
5
+ option go_package = "server/proto";
6
+
7
+ import "google/protobuf/empty.proto";
8
+
9
+ // ---- Unified error codes (concise) ----
10
+ enum ErrorCode {
11
+ ERROR_CODE_UNSPECIFIED = 0;
12
+ ERROR_CODE_INVALID_ARGUMENT = 1;
13
+ ERROR_CODE_UNAVAILABLE = 2; // Downstream/hardware/resource unavailable
14
+ ERROR_CODE_DEADLINE_EXCEEDED = 3;
15
+ ERROR_CODE_INTERNAL = 4;
16
+ }
17
+
18
+ // ---- Standard error payload (used with gRPC status; fatal errors may terminate the stream) ----
19
+ message Error {
20
+ ErrorCode code = 1;
21
+ string message = 2;
22
+ string detail = 3; // For logging/troubleshooting (stack, node ID, etc.)
23
+ }
24
+
25
+ // ---- Structured task I/O description (for central routing and client negotiation) ----
26
+ message IOTask {
27
+ string name = 1; // "embed","detect","ocr","asr","generate","tts",...
28
+ repeated string input_mimes = 2; // Allow multiple input types: "image/jpeg","audio/pcm;rate=16000","application/json"
29
+ repeated string output_mimes = 3; // Typical outputs: "application/json;schema=bbox_v1","audio/wav"
30
+ map<string, string> limits = 4; // e.g., max_hw=1024, max_batch=8, max_length=4096
31
+ }
32
+
33
+ // ---- Capability declaration (retrieve at startup or on demand) ----
34
+ message Capability {
35
+ string service_name = 1; // "clip-embedder","ocr","llm","tts",...
36
+ repeated string model_ids = 2; // Supported model IDs/versions
37
+ string runtime = 3; // "onnxrt-cuda","tensorrt","coreml","rknn","qnn","cpu"
38
+ uint32 max_concurrency = 4; // Suggested max concurrency
39
+ repeated string precisions = 5; // ["fp32","fp16","int8"]
40
+ map<string, string> extra = 6; // Resolution limits, ANE/NPU features, etc.
41
+ repeated IOTask tasks = 7; // Structured task capabilities (recommended)
42
+ string protocol_version = 8; // Version of current porotocal, should be semantic versioning. e.g., 1.0.0
43
+ }
44
+
45
+ // ---- Inference request (inbound message of bidi stream) ----
46
+ message InferRequest {
47
+ string correlation_id = 1; // Trace/correlation
48
+ string task = 2; // "embed","classify","detect","ocr","asr","generate","tts",...
49
+ bytes payload = 3; // Raw payload (binary or UTF-8 text)
50
+ map<string, string> meta = 4; // Task-specific parameters: model_id, conf_thres, stop, etc.
51
+
52
+ // --- Added: input content type and chunking control ---
53
+ string payload_mime = 5; // e.g. "image/jpeg","audio/pcm;rate=16000","application/json"
54
+ uint64 seq = 6; // Chunk index (starting at 0)
55
+ uint64 total = 7; // Optional: total number of chunks (omit if unknown)
56
+ uint64 offset = 8; // Optional: byte offset of payload in the overall stream
57
+ }
58
+
59
+ // ---- Inference response (outbound message of bidi stream) ----
60
+ message InferResponse {
61
+ string correlation_id = 1;
62
+ bool is_final = 2; // Streaming partials or final result
63
+ bytes result = 3; // Result bytes; if JSON, specify via result_mime
64
+ map<string, string> meta = 4; // e.g., lat_ms, tokens, bboxes_count
65
+ Error error = 5; // Populated only on failure (fatal errors may also terminate the stream)
66
+
67
+ // --- Added: output content type and chunking control ---
68
+ uint64 seq = 6; // Chunk index (starting at 0), used for incremental/audio/video chunks
69
+ uint64 total = 7; // Optional: total number of chunks
70
+ uint64 offset = 8; // Optional: byte offset of result in the overall output
71
+ string result_mime = 9; // e.g. "application/json;schema=embedding_v1","audio/wav","image/png"
72
+ string result_schema = 10; // Optional: explicit schema name (e.g., "bbox_v1","mask_rle_v1")
73
+ }
74
+
75
+ // ---- Service contract ----
76
+ service Inference {
77
+ // Bidirectional stream: client sends chunks; server returns incremental/final results; ordered but non-blocking
78
+ rpc Infer(stream InferRequest) returns (stream InferResponse);
79
+
80
+ // Capability declaration (backward compatibility: single capability; use StreamCapabilities for multiple)
81
+ rpc GetCapabilities(google.protobuf.Empty) returns (Capability);
82
+
83
+ // Recommended: server stream returns all capabilities (call at startup or after hot-reload)
84
+ rpc StreamCapabilities(google.protobuf.Empty) returns (stream Capability);
85
+
86
+ // Health probe
87
+ rpc Health(google.protobuf.Empty) returns (google.protobuf.Empty);
88
+ }
@@ -0,0 +1,66 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # NO CHECKED-IN PROTOBUF GENCODE
4
+ # source: proto/ml_service.proto
5
+ # Protobuf Python Version: 6.31.1
6
+ """Generated protocol buffer code."""
7
+ from google.protobuf import descriptor as _descriptor
8
+ from google.protobuf import descriptor_pool as _descriptor_pool
9
+ from google.protobuf import runtime_version as _runtime_version
10
+ from google.protobuf import symbol_database as _symbol_database
11
+ from google.protobuf.internal import builder as _builder
12
+ _runtime_version.ValidateProtobufRuntimeVersion(
13
+ _runtime_version.Domain.PUBLIC,
14
+ 6,
15
+ 31,
16
+ 1,
17
+ '',
18
+ 'proto/ml_service.proto'
19
+ )
20
+ # @@protoc_insertion_point(imports)
21
+
22
+ _sym_db = _symbol_database.Default()
23
+
24
+
25
+ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
26
+
27
+
28
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16proto/ml_service.proto\x12\x0ehome_native.v1\x1a\x1bgoogle/protobuf/empty.proto\"Q\n\x05\x45rror\x12\'\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x19.home_native.v1.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0e\n\x06\x64\x65tail\x18\x03 \x01(\t\"\xa4\x01\n\x06IOTask\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0binput_mimes\x18\x02 \x03(\t\x12\x14\n\x0coutput_mimes\x18\x03 \x03(\t\x12\x32\n\x06limits\x18\x04 \x03(\x0b\x32\".home_native.v1.IOTask.LimitsEntry\x1a-\n\x0bLimitsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x98\x02\n\nCapability\x12\x14\n\x0cservice_name\x18\x01 \x01(\t\x12\x11\n\tmodel_ids\x18\x02 \x03(\t\x12\x0f\n\x07runtime\x18\x03 \x01(\t\x12\x17\n\x0fmax_concurrency\x18\x04 \x01(\r\x12\x12\n\nprecisions\x18\x05 \x03(\t\x12\x34\n\x05\x65xtra\x18\x06 \x03(\x0b\x32%.home_native.v1.Capability.ExtraEntry\x12%\n\x05tasks\x18\x07 \x03(\x0b\x32\x16.home_native.v1.IOTask\x12\x18\n\x10protocol_version\x18\x08 \x01(\t\x1a,\n\nExtraEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xea\x01\n\x0cInferRequest\x12\x16\n\x0e\x63orrelation_id\x18\x01 \x01(\t\x12\x0c\n\x04task\x18\x02 \x01(\t\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\x12\x34\n\x04meta\x18\x04 \x03(\x0b\x32&.home_native.v1.InferRequest.MetaEntry\x12\x14\n\x0cpayload_mime\x18\x05 \x01(\t\x12\x0b\n\x03seq\x18\x06 \x01(\x04\x12\r\n\x05total\x18\x07 \x01(\x04\x12\x0e\n\x06offset\x18\x08 \x01(\x04\x1a+\n\tMetaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xab\x02\n\rInferResponse\x12\x16\n\x0e\x63orrelation_id\x18\x01 \x01(\t\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12\x0e\n\x06result\x18\x03 \x01(\x0c\x12\x35\n\x04meta\x18\x04 \x03(\x0b\x32\'.home_native.v1.InferResponse.MetaEntry\x12$\n\x05\x65rror\x18\x05 \x01(\x0b\x32\x15.home_native.v1.Error\x12\x0b\n\x03seq\x18\x06 \x01(\x04\x12\r\n\x05total\x18\x07 \x01(\x04\x12\x0e\n\x06offset\x18\x08 \x01(\x04\x12\x13\n\x0bresult_mime\x18\t \x01(\t\x12\x15\n\rresult_schema\x18\n \x01(\t\x1a+\n\tMetaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01*\x9f\x01\n\tErrorCode\x12\x1a\n\x16\x45RROR_CODE_UNSPECIFIED\x10\x00\x12\x1f\n\x1b\x45RROR_CODE_INVALID_ARGUMENT\x10\x01\x12\x1a\n\x16\x45RROR_CODE_UNAVAILABLE\x10\x02\x12 \n\x1c\x45RROR_CODE_DEADLINE_EXCEEDED\x10\x03\x12\x17\n\x13\x45RROR_CODE_INTERNAL\x10\x04\x32\xa2\x02\n\tInference\x12H\n\x05Infer\x12\x1c.home_native.v1.InferRequest\x1a\x1d.home_native.v1.InferResponse(\x01\x30\x01\x12\x45\n\x0fGetCapabilities\x12\x16.google.protobuf.Empty\x1a\x1a.home_native.v1.Capability\x12J\n\x12StreamCapabilities\x12\x16.google.protobuf.Empty\x1a\x1a.home_native.v1.Capability0\x01\x12\x38\n\x06Health\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.EmptyB\x0eZ\x0cserver/protob\x06proto3')
29
+
30
+ _globals = globals()
31
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
32
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.ml_service_pb2', _globals)
33
+ if not _descriptor._USE_C_DESCRIPTORS:
34
+ _globals['DESCRIPTOR']._loaded_options = None
35
+ _globals['DESCRIPTOR']._serialized_options = b'Z\014server/proto'
36
+ _globals['_IOTASK_LIMITSENTRY']._loaded_options = None
37
+ _globals['_IOTASK_LIMITSENTRY']._serialized_options = b'8\001'
38
+ _globals['_CAPABILITY_EXTRAENTRY']._loaded_options = None
39
+ _globals['_CAPABILITY_EXTRAENTRY']._serialized_options = b'8\001'
40
+ _globals['_INFERREQUEST_METAENTRY']._loaded_options = None
41
+ _globals['_INFERREQUEST_METAENTRY']._serialized_options = b'8\001'
42
+ _globals['_INFERRESPONSE_METAENTRY']._loaded_options = None
43
+ _globals['_INFERRESPONSE_METAENTRY']._serialized_options = b'8\001'
44
+ _globals['_ERRORCODE']._serialized_start=1144
45
+ _globals['_ERRORCODE']._serialized_end=1303
46
+ _globals['_ERROR']._serialized_start=71
47
+ _globals['_ERROR']._serialized_end=152
48
+ _globals['_IOTASK']._serialized_start=155
49
+ _globals['_IOTASK']._serialized_end=319
50
+ _globals['_IOTASK_LIMITSENTRY']._serialized_start=274
51
+ _globals['_IOTASK_LIMITSENTRY']._serialized_end=319
52
+ _globals['_CAPABILITY']._serialized_start=322
53
+ _globals['_CAPABILITY']._serialized_end=602
54
+ _globals['_CAPABILITY_EXTRAENTRY']._serialized_start=558
55
+ _globals['_CAPABILITY_EXTRAENTRY']._serialized_end=602
56
+ _globals['_INFERREQUEST']._serialized_start=605
57
+ _globals['_INFERREQUEST']._serialized_end=839
58
+ _globals['_INFERREQUEST_METAENTRY']._serialized_start=796
59
+ _globals['_INFERREQUEST_METAENTRY']._serialized_end=839
60
+ _globals['_INFERRESPONSE']._serialized_start=842
61
+ _globals['_INFERRESPONSE']._serialized_end=1141
62
+ _globals['_INFERRESPONSE_METAENTRY']._serialized_start=796
63
+ _globals['_INFERRESPONSE_METAENTRY']._serialized_end=839
64
+ _globals['_INFERENCE']._serialized_start=1306
65
+ _globals['_INFERENCE']._serialized_end=1596
66
+ # @@protoc_insertion_point(module_scope)