lumen-app 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. lumen_app/__init__.py +7 -0
  2. lumen_app/core/__init__.py +0 -0
  3. lumen_app/core/config.py +661 -0
  4. lumen_app/core/installer.py +274 -0
  5. lumen_app/core/loader.py +45 -0
  6. lumen_app/core/router.py +87 -0
  7. lumen_app/core/server.py +389 -0
  8. lumen_app/core/service.py +49 -0
  9. lumen_app/core/tests/__init__.py +1 -0
  10. lumen_app/core/tests/test_core_integration.py +561 -0
  11. lumen_app/core/tests/test_env_checker.py +487 -0
  12. lumen_app/proto/README.md +12 -0
  13. lumen_app/proto/ml_service.proto +88 -0
  14. lumen_app/proto/ml_service_pb2.py +66 -0
  15. lumen_app/proto/ml_service_pb2.pyi +136 -0
  16. lumen_app/proto/ml_service_pb2_grpc.py +251 -0
  17. lumen_app/server.py +362 -0
  18. lumen_app/utils/env_checker.py +752 -0
  19. lumen_app/utils/installation/__init__.py +25 -0
  20. lumen_app/utils/installation/env_manager.py +152 -0
  21. lumen_app/utils/installation/micromamba_installer.py +459 -0
  22. lumen_app/utils/installation/package_installer.py +149 -0
  23. lumen_app/utils/installation/verifier.py +95 -0
  24. lumen_app/utils/logger.py +181 -0
  25. lumen_app/utils/mamba/cuda.yaml +12 -0
  26. lumen_app/utils/mamba/default.yaml +6 -0
  27. lumen_app/utils/mamba/openvino.yaml +7 -0
  28. lumen_app/utils/mamba/tensorrt.yaml +13 -0
  29. lumen_app/utils/package_resolver.py +309 -0
  30. lumen_app/utils/preset_registry.py +219 -0
  31. lumen_app/web/__init__.py +3 -0
  32. lumen_app/web/api/__init__.py +1 -0
  33. lumen_app/web/api/config.py +229 -0
  34. lumen_app/web/api/hardware.py +201 -0
  35. lumen_app/web/api/install.py +608 -0
  36. lumen_app/web/api/server.py +253 -0
  37. lumen_app/web/core/__init__.py +1 -0
  38. lumen_app/web/core/server_manager.py +348 -0
  39. lumen_app/web/core/state.py +264 -0
  40. lumen_app/web/main.py +145 -0
  41. lumen_app/web/models/__init__.py +28 -0
  42. lumen_app/web/models/config.py +63 -0
  43. lumen_app/web/models/hardware.py +64 -0
  44. lumen_app/web/models/install.py +134 -0
  45. lumen_app/web/models/server.py +95 -0
  46. lumen_app/web/static/assets/index-CGuhGHC9.css +1 -0
  47. lumen_app/web/static/assets/index-DN6HmxWS.js +56 -0
  48. lumen_app/web/static/index.html +14 -0
  49. lumen_app/web/static/vite.svg +1 -0
  50. lumen_app/web/websockets/__init__.py +1 -0
  51. lumen_app/web/websockets/logs.py +159 -0
  52. lumen_app-0.4.2.dist-info/METADATA +23 -0
  53. lumen_app-0.4.2.dist-info/RECORD +56 -0
  54. lumen_app-0.4.2.dist-info/WHEEL +5 -0
  55. lumen_app-0.4.2.dist-info/entry_points.txt +3 -0
  56. lumen_app-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,561 @@
1
+ """
2
+ Integration tests for Lumen App Core framework.
3
+
4
+ Tests the Config, ServiceLoader, HubRouter, and AppService components
5
+ using a minimal CPU preset configuration.
6
+ """
7
+
8
+ import tempfile
9
+ from pathlib import Path
10
+ from unittest.mock import MagicMock, patch
11
+
12
+ import pytest
13
+ from lumen_resources.lumen_config import Region, Runtime
14
+
15
+ import lumen_app.proto.ml_service_pb2 as pb
16
+ from lumen_app.core.config import Config, DeviceConfig
17
+ from lumen_app.core.loader import ServiceLoader
18
+ from lumen_app.core.router import HubRouter
19
+ from lumen_app.core.service import AppService
20
+
21
+ # =============================================================================
22
+ # Mock Service Classes
23
+ # =============================================================================
24
+
25
+
26
+ class MockService:
27
+ """Mock ML service implementing the gRPC Inference servicer interface."""
28
+
29
+ def __init__(self, name: str, supported_tasks: list[str]):
30
+ self.name = name
31
+ self._supported_tasks = supported_tasks
32
+
33
+ @classmethod
34
+ def from_config(cls, service_config, cache_dir: Path):
35
+ """Mock factory method matching real service interface."""
36
+ # Extract service name from package
37
+ package_name = service_config.package
38
+ return cls(name=package_name, supported_tasks=["embed", "classify"])
39
+
40
+ def get_supported_tasks(self) -> list[str]:
41
+ """Return list of task keys this service handles."""
42
+ return self._supported_tasks
43
+
44
+ async def Infer(self, request_iterator, context):
45
+ """Mock inference handler."""
46
+ requests = []
47
+ async for req in request_iterator:
48
+ requests.append(req)
49
+
50
+ # Return mock response
51
+ response = pb.InferResponse()
52
+ response.correlation_id = requests[0].correlation_id if requests else "test"
53
+ response.is_final = True
54
+ response.result = b'{"status": "ok"}'
55
+ response.result_mime = "application/json"
56
+ response.meta["service"] = self.name
57
+ yield response
58
+
59
+ async def GetCapabilities(self, request, context):
60
+ """Mock capability query."""
61
+ return pb.Capability(
62
+ service_name=self.name,
63
+ model_ids=["mock-model-v1"],
64
+ runtime="mock-runtime",
65
+ max_concurrency=1,
66
+ precisions=["fp32"],
67
+ protocol_version="1.0",
68
+ )
69
+
70
+
71
+ class MockRegistry:
72
+ """Mock registry class that returns MockService instances."""
73
+
74
+ @staticmethod
75
+ def from_config(service_config, cache_dir: Path):
76
+ return MockService(
77
+ name=service_config.package,
78
+ supported_tasks=["embed", "classify"],
79
+ )
80
+
81
+
82
+ # =============================================================================
83
+ # Config Tests
84
+ # =============================================================================
85
+
86
+
87
+ def test_device_config_cpu_preset():
88
+ """Test CPU preset device configuration."""
89
+ config = DeviceConfig.cpu()
90
+
91
+ assert config.runtime == Runtime.onnx
92
+ assert config.onnx_providers == ["CPUExecutionProvider"]
93
+ assert config.batch_size == 1
94
+ assert config.description == "Preset General CPUs"
95
+
96
+
97
+ def test_device_config_apple_silicon_preset():
98
+ """Test Apple Silicon preset configuration."""
99
+ config = DeviceConfig.apple_silicon()
100
+
101
+ assert config.runtime == Runtime.onnx
102
+ assert len(config.onnx_providers) == 2
103
+ # onnx_providers[0] is a tuple (provider_name, options)
104
+ assert config.onnx_providers[0][0] == "CoreMLExecutionProvider"
105
+ assert config.batch_size == 1
106
+
107
+
108
+ @pytest.mark.parametrize(
109
+ "preset_method,expected_runtime,expected_batch",
110
+ [
111
+ ("cpu", Runtime.onnx, 1),
112
+ ("apple_silicon", Runtime.onnx, 1),
113
+ ("nvidia_gpu", Runtime.onnx, 4),
114
+ ("nvidia_gpu_high", Runtime.onnx, None),
115
+ ("intel_gpu", Runtime.onnx, None),
116
+ ],
117
+ )
118
+ def test_device_config_presets(preset_method, expected_runtime, expected_batch):
119
+ """Test various device presets."""
120
+ method = getattr(DeviceConfig, preset_method)
121
+ config = method()
122
+
123
+ assert config.runtime == expected_runtime
124
+ assert config.batch_size == expected_batch
125
+
126
+
127
+ def test_config_minimal_preset():
128
+ """Test minimal configuration preset (OCR only)."""
129
+ with tempfile.TemporaryDirectory() as tmpdir:
130
+ device_config = DeviceConfig.cpu()
131
+ config = Config(
132
+ cache_dir=tmpdir,
133
+ device_config=device_config,
134
+ region=Region.cn,
135
+ service_name="lumen-test",
136
+ port=50051,
137
+ )
138
+
139
+ lumen_config = config.minimal()
140
+
141
+ assert lumen_config.metadata.region == Region.cn
142
+ assert lumen_config.server.port == 50051
143
+ assert lumen_config.server.mdns.enabled is True
144
+ assert lumen_config.deployment.mode == "hub"
145
+ assert len(lumen_config.deployment.services) == 1
146
+ assert lumen_config.deployment.services[0].root == "ocr"
147
+
148
+
149
+ def test_config_light_weight_preset():
150
+ """Test lightweight configuration preset (OCR + CLIP + Face)."""
151
+ with tempfile.TemporaryDirectory() as tmpdir:
152
+ device_config = DeviceConfig.cpu()
153
+ config = Config(
154
+ cache_dir=tmpdir,
155
+ device_config=device_config,
156
+ region=Region.other, # Region has 'cn' and 'other', not 'us'
157
+ service_name="lumen-test",
158
+ port=None,
159
+ )
160
+
161
+ lumen_config = config.light_weight(clip_model="MobileCLIP2-S2")
162
+
163
+ assert len(lumen_config.deployment.services) == 3
164
+ service_roots = {s.root for s in lumen_config.deployment.services}
165
+ assert service_roots == {"ocr", "clip", "face"}
166
+
167
+
168
+ # =============================================================================
169
+ # ServiceLoader Tests
170
+ # =============================================================================
171
+
172
+
173
+ def test_service_loader_invalid_path():
174
+ """Test ServiceLoader with invalid class path."""
175
+ loader = ServiceLoader()
176
+
177
+ with pytest.raises(ValueError, match="Invalid class path"):
178
+ loader.get_class("")
179
+
180
+ with pytest.raises(ValueError, match="Invalid class path"):
181
+ loader.get_class("NoDotsInPath")
182
+
183
+
184
+ @patch("lumen_app.core.loader.importlib.import_module")
185
+ def test_service_loader_import_error(mock_import):
186
+ """Test ServiceLoader with module import failure."""
187
+ mock_import.side_effect = ImportError("Module not found")
188
+ loader = ServiceLoader()
189
+
190
+ # The loader wraps ImportError in its own error handling
191
+ with pytest.raises(ImportError): # Just check it raises ImportError
192
+ loader.get_class("nonexistent.module.ClassName")
193
+
194
+
195
+ @patch("lumen_app.core.loader.importlib.import_module")
196
+ def test_service_loader_attribute_error(mock_import):
197
+ """Test ServiceLoader when class not found in module."""
198
+ mock_module = MagicMock(spec=[]) # No attributes, will cause getattr to fail
199
+ mock_import.return_value = mock_module
200
+
201
+ loader = ServiceLoader()
202
+
203
+ # Should raise AttributeError when getattr fails
204
+ with pytest.raises(AttributeError):
205
+ loader.get_class("valid.module.DoesNotExist")
206
+
207
+
208
+ def test_service_loader_success_with_real_module():
209
+ """Test ServiceLoader with a real Python module."""
210
+ loader = ServiceLoader()
211
+
212
+ # Test with a real built-in class
213
+ cls = loader.get_class("builtins.str")
214
+ assert cls is str
215
+
216
+
217
+ # =============================================================================
218
+ # HubRouter Tests
219
+ # =============================================================================
220
+
221
+
222
+ @pytest.mark.asyncio
223
+ async def test_router_route_table_building():
224
+ """Test HubRouter builds correct route table from services."""
225
+ service1 = MockService("service1", ["task_a", "task_b"])
226
+ service2 = MockService("service2", ["task_c"])
227
+
228
+ router = HubRouter(services=[service1, service2])
229
+
230
+ assert router._route_table["task_a"] is service1
231
+ assert router._route_table["task_b"] is service1
232
+ assert router._route_table["task_c"] is service2
233
+
234
+
235
+ @pytest.mark.asyncio
236
+ async def test_router_infer_dispatch():
237
+ """Test HubRouter dispatches inference requests correctly."""
238
+ service = MockService("test-service", ["embed"])
239
+ router = HubRouter(services=[service])
240
+
241
+ # Create mock request
242
+ request = pb.InferRequest()
243
+ request.task = "embed"
244
+ request.correlation_id = "test-123"
245
+ request.payload = b'{"text": "hello"}'
246
+
247
+ async def request_generator():
248
+ yield request
249
+
250
+ # Mock context
251
+ context = MagicMock()
252
+
253
+ # Collect responses
254
+ responses = []
255
+ async for resp in router.Infer(request_generator(), context):
256
+ responses.append(resp)
257
+
258
+ assert len(responses) == 1
259
+ assert responses[0].correlation_id == "test-123"
260
+ assert responses[0].meta["service"] == "test-service"
261
+
262
+
263
+ @pytest.mark.asyncio
264
+ async def test_router_task_not_found():
265
+ """Test HubRouter returns NOT_FOUND for unsupported tasks."""
266
+ service = MockService("test-service", ["embed"])
267
+ router = HubRouter(services=[service])
268
+
269
+ request = pb.InferRequest()
270
+ request.task = "unsupported_task"
271
+
272
+ async def request_generator():
273
+ yield request
274
+
275
+ # Mock context with abort method
276
+ context = MagicMock()
277
+ context.abort = MagicMock(side_effect=Exception("Aborted with NOT_FOUND"))
278
+
279
+ with pytest.raises(Exception, match="Aborted with NOT_FOUND"):
280
+ async for _ in router.Infer(request_generator(), context):
281
+ pass
282
+
283
+
284
+ @pytest.mark.asyncio
285
+ async def test_router_get_capabilities_aggregation():
286
+ """Test HubRouter aggregates capabilities from all services."""
287
+ service1 = MockService("service1", ["task_a"])
288
+ service2 = MockService("service2", ["task_b"])
289
+
290
+ router = HubRouter(services=[service1, service2])
291
+
292
+ # Import Empty from google.protobuf.empty
293
+ from google.protobuf import empty_pb2
294
+
295
+ request = empty_pb2.Empty()
296
+ context = MagicMock()
297
+
298
+ caps = await router.GetCapabilities(request, context)
299
+
300
+ # Should aggregate tasks from both services
301
+ assert len(caps.tasks) >= 0 # Mock returns empty, real would have tasks
302
+
303
+
304
+ # =============================================================================
305
+ # AppService Integration Tests
306
+ # =============================================================================
307
+
308
+
309
+ @pytest.mark.asyncio
310
+ async def test_app_service_from_config_with_mocks():
311
+ """Test AppService initialization from config using mocked services."""
312
+ with tempfile.TemporaryDirectory() as tmpdir:
313
+ device_config = DeviceConfig.cpu()
314
+ config_obj = Config(
315
+ cache_dir=tmpdir,
316
+ device_config=device_config,
317
+ region=Region.cn,
318
+ service_name="lumen-test",
319
+ port=50051,
320
+ )
321
+ lumen_config = config_obj.minimal()
322
+
323
+ # Patch the loader to return our mock service
324
+ with patch.object(ServiceLoader, "get_class", return_value=MockRegistry):
325
+ app_service = AppService.from_app_config(lumen_config)
326
+
327
+ assert len(app_service.services) == 1
328
+ assert app_service.config is lumen_config
329
+ assert isinstance(app_service.services[0], MockService)
330
+
331
+
332
+ @pytest.mark.asyncio
333
+ async def test_app_service_router_forwarding():
334
+ """Test end-to-end request flow through AppService."""
335
+ with tempfile.TemporaryDirectory() as tmpdir:
336
+ device_config = DeviceConfig.cpu()
337
+ config_obj = Config(
338
+ cache_dir=tmpdir,
339
+ device_config=device_config,
340
+ region=Region.cn,
341
+ service_name="lumen-test",
342
+ port=50051,
343
+ )
344
+ lumen_config = config_obj.minimal()
345
+
346
+ with patch.object(ServiceLoader, "get_class", return_value=MockRegistry):
347
+ app_service = AppService.from_app_config(lumen_config)
348
+
349
+ # Create request
350
+ request = pb.InferRequest()
351
+ request.task = "embed"
352
+ request.correlation_id = "integration-test"
353
+ request.payload = b"test data"
354
+
355
+ async def request_generator():
356
+ yield request
357
+
358
+ context = MagicMock()
359
+
360
+ # Send through router
361
+ responses = []
362
+ async for resp in app_service.router.Infer(request_generator(), context):
363
+ responses.append(resp)
364
+
365
+ assert len(responses) == 1
366
+ assert responses[0].correlation_id == "integration-test"
367
+
368
+
369
+ # =============================================================================
370
+ # Edge Cases and Error Handling
371
+ # =============================================================================
372
+
373
+
374
+ @pytest.mark.asyncio
375
+ async def test_router_empty_request_stream():
376
+ """Test HubRouter handles empty request stream."""
377
+ service = MockService("test", ["embed"])
378
+ router = HubRouter(services=[service])
379
+
380
+ async def empty_generator():
381
+ return
382
+ yield # Never reached
383
+
384
+ context = MagicMock()
385
+
386
+ # Should handle gracefully (returns immediately)
387
+ result = []
388
+ async for resp in router.Infer(empty_generator(), context):
389
+ result.append(resp)
390
+
391
+
392
+ # =============================================================================
393
+ # Cache Path Resolution Tests
394
+ # =============================================================================
395
+
396
+
397
+ def test_cache_path_resolution_intel_gpu():
398
+ """Test that Intel GPU preset accepts cache_dir and builds absolute paths."""
399
+ with tempfile.TemporaryDirectory() as tmpdir:
400
+ # Pass cache_dir directly to the preset method
401
+ device_config = DeviceConfig.intel_gpu(cache_dir=tmpdir)
402
+ config_obj = Config(
403
+ cache_dir=tmpdir,
404
+ device_config=device_config,
405
+ region=Region.other,
406
+ service_name="lumen-test",
407
+ port=50051,
408
+ )
409
+
410
+ # Check onnx_providers has absolute paths
411
+ providers = config_obj.device_config.onnx_providers
412
+ assert providers is not None
413
+
414
+ # Find OpenVINO provider
415
+ openvino_provider = None
416
+ for p in providers:
417
+ if isinstance(p, tuple) and p[0] == "OpenVINOExecutionProvider":
418
+ openvino_provider = p
419
+ break
420
+
421
+ assert openvino_provider is not None
422
+ cache_dir = openvino_provider[1]["cache_dir"]
423
+
424
+ # Should be absolute path under tmpdir, not "./cache/ov"
425
+ assert cache_dir.startswith(tmpdir)
426
+ assert cache_dir.endswith("cache/ov")
427
+
428
+
429
+ def test_cache_path_resolution_nvidia_high():
430
+ """Test that NVIDIA high-ram preset accepts cache_dir and builds absolute paths."""
431
+ with tempfile.TemporaryDirectory() as tmpdir:
432
+ # Pass cache_dir directly to the preset method
433
+ device_config = DeviceConfig.nvidia_gpu_high(cache_dir=tmpdir)
434
+ config_obj = Config(
435
+ cache_dir=tmpdir,
436
+ device_config=device_config,
437
+ region=Region.other,
438
+ service_name="lumen-test",
439
+ port=50051,
440
+ )
441
+
442
+ # Check onnx_providers has absolute paths
443
+ providers = config_obj.device_config.onnx_providers
444
+ assert providers is not None
445
+
446
+ # Find TensorRT provider
447
+ trt_provider = None
448
+ for p in providers:
449
+ if isinstance(p, tuple) and p[0] == "TensorRTExecutionProvider":
450
+ trt_provider = p
451
+ break
452
+
453
+ assert trt_provider is not None
454
+ cache_path = trt_provider[1]["trt_engine_cache_path"]
455
+
456
+ # Should be absolute path under tmpdir, not "./cache/trt"
457
+ assert cache_path.startswith(tmpdir)
458
+ assert cache_path.endswith("cache/trt")
459
+
460
+
461
+ def test_cache_path_resolution_apple_silicon():
462
+ """Test that Apple Silicon preset accepts cache_dir and builds absolute paths."""
463
+ with tempfile.TemporaryDirectory() as tmpdir:
464
+ # Pass cache_dir directly to the preset method
465
+ device_config = DeviceConfig.apple_silicon(cache_dir=tmpdir)
466
+ config_obj = Config(
467
+ cache_dir=tmpdir,
468
+ device_config=device_config,
469
+ region=Region.other,
470
+ service_name="lumen-test",
471
+ port=50051,
472
+ )
473
+
474
+ # Check onnx_providers has absolute paths
475
+ providers = config_obj.device_config.onnx_providers
476
+ assert providers is not None
477
+
478
+ # Find CoreML provider
479
+ coreml_provider = None
480
+ for p in providers:
481
+ if isinstance(p, tuple) and p[0] == "CoreMLExecutionProvider":
482
+ coreml_provider = p
483
+ break
484
+
485
+ assert coreml_provider is not None
486
+ model_cache_dir = coreml_provider[1]["ModelCacheDirectory"]
487
+
488
+ # Should be absolute path under tmpdir, not "./cache/coreml"
489
+ assert model_cache_dir.startswith(tmpdir)
490
+ assert model_cache_dir.endswith("cache/coreml")
491
+
492
+
493
+ def test_cache_path_resolution_cpu_no_relative_paths():
494
+ """Test that CPU preset (no cache paths) is handled correctly."""
495
+ with tempfile.TemporaryDirectory() as tmpdir:
496
+ device_config = DeviceConfig.cpu()
497
+ config_obj = Config(
498
+ cache_dir=tmpdir,
499
+ device_config=device_config,
500
+ region=Region.other,
501
+ service_name="lumen-test",
502
+ port=50051,
503
+ )
504
+
505
+ # CPU provider has no cache configs, should pass through unchanged
506
+ providers = config_obj.device_config.onnx_providers
507
+ assert providers == ["CPUExecutionProvider"]
508
+
509
+
510
+ def test_cache_path_resolution_applied_to_config():
511
+ """Test that cache paths from preset are applied to generated LumenConfig."""
512
+ with tempfile.TemporaryDirectory() as tmpdir:
513
+ # Pass cache_dir directly to the preset method
514
+ device_config = DeviceConfig.intel_gpu(cache_dir=tmpdir)
515
+ config_obj = Config(
516
+ cache_dir=tmpdir,
517
+ device_config=device_config,
518
+ region=Region.other,
519
+ service_name="lumen-test",
520
+ port=50051,
521
+ )
522
+
523
+ lumen_config = config_obj.minimal()
524
+
525
+ # Check that the service config has the preset providers
526
+ ocr_service = lumen_config.services["ocr"]
527
+ backend_settings = ocr_service.backend_settings
528
+
529
+ assert backend_settings.onnx_providers is not None
530
+
531
+ # Verify OpenVINO provider has absolute cache path
532
+ openvino_provider = None
533
+ for p in backend_settings.onnx_providers:
534
+ if isinstance(p, tuple) and p[0] == "OpenVINOExecutionProvider":
535
+ openvino_provider = p
536
+ break
537
+
538
+ assert openvino_provider is not None
539
+ cache_dir = openvino_provider[1]["cache_dir"]
540
+ assert cache_dir.startswith(tmpdir)
541
+ assert cache_dir.endswith("cache/ov")
542
+
543
+
544
+ def test_multiple_device_presets_independence():
545
+ """Test that multiple device configs don't share state."""
546
+ cpu_config = DeviceConfig.cpu()
547
+ gpu_config = DeviceConfig.nvidia_gpu()
548
+
549
+ assert cpu_config.runtime == Runtime.onnx
550
+ assert gpu_config.runtime == Runtime.onnx
551
+ assert cpu_config.batch_size == 1
552
+ assert gpu_config.batch_size == 4
553
+
554
+ # Modify one shouldn't affect the other
555
+ cpu_config.batch_size = 999
556
+ assert cpu_config.batch_size == 999
557
+ assert gpu_config.batch_size == 4
558
+
559
+
560
+ if __name__ == "__main__":
561
+ pytest.main([__file__, "-v"])