lumen-resources 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. lumen_resources-0.2.0/.gitignore +64 -0
  2. lumen_resources-0.2.0/PKG-INFO +133 -0
  3. lumen_resources-0.2.0/README.md +109 -0
  4. lumen_resources-0.2.0/docs/examples/clip_torch_cn.yaml +37 -0
  5. lumen_resources-0.2.0/docs/examples/hub-service.yaml +69 -0
  6. lumen_resources-0.2.0/docs/examples/model_info_template.json +48 -0
  7. lumen_resources-0.2.0/docs/examples/single-service.yaml +40 -0
  8. lumen_resources-0.2.0/pyproject.toml +57 -0
  9. lumen_resources-0.2.0/setup.cfg +4 -0
  10. lumen_resources-0.2.0/src/lumen_resources/__init__.py +89 -0
  11. lumen_resources-0.2.0/src/lumen_resources/cli.py +402 -0
  12. lumen_resources-0.2.0/src/lumen_resources/downloader.py +449 -0
  13. lumen_resources-0.2.0/src/lumen_resources/exceptions.py +110 -0
  14. lumen_resources-0.2.0/src/lumen_resources/lumen_config.py +459 -0
  15. lumen_resources-0.2.0/src/lumen_resources/lumen_config_validator.py +270 -0
  16. lumen_resources-0.2.0/src/lumen_resources/model_info.py +233 -0
  17. lumen_resources-0.2.0/src/lumen_resources/model_info_validator.py +257 -0
  18. lumen_resources-0.2.0/src/lumen_resources/platform.py +270 -0
  19. lumen_resources-0.2.0/src/lumen_resources/result_schemas/README.md +14 -0
  20. lumen_resources-0.2.0/src/lumen_resources/result_schemas/__init__.py +14 -0
  21. lumen_resources-0.2.0/src/lumen_resources/result_schemas/embedding_v1.py +29 -0
  22. lumen_resources-0.2.0/src/lumen_resources/result_schemas/face_v1.py +55 -0
  23. lumen_resources-0.2.0/src/lumen_resources/result_schemas/labels_v1.py +39 -0
  24. lumen_resources-0.2.0/src/lumen_resources/schemas/config-schema.yaml +249 -0
  25. lumen_resources-0.2.0/src/lumen_resources/schemas/model_info-schema.json +166 -0
  26. lumen_resources-0.2.0/src/lumen_resources/schemas/result_schemas/embedding_v1.json +35 -0
  27. lumen_resources-0.2.0/src/lumen_resources/schemas/result_schemas/face_v1.json +61 -0
  28. lumen_resources-0.2.0/src/lumen_resources/schemas/result_schemas/labels_v1.json +49 -0
  29. lumen_resources-0.2.0/src/lumen_resources.egg-info/PKG-INFO +133 -0
  30. lumen_resources-0.2.0/src/lumen_resources.egg-info/SOURCES.txt +33 -0
  31. lumen_resources-0.2.0/src/lumen_resources.egg-info/dependency_links.txt +1 -0
  32. lumen_resources-0.2.0/src/lumen_resources.egg-info/entry_points.txt +2 -0
  33. lumen_resources-0.2.0/src/lumen_resources.egg-info/requires.txt +15 -0
  34. lumen_resources-0.2.0/src/lumen_resources.egg-info/top_level.txt +1 -0
  35. lumen_resources-0.2.0/uv.lock +1320 -0
@@ -0,0 +1,64 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ share/python-wheels/
20
+ *.egg-info/
21
+ .installed.cfg
22
+ *.egg
23
+ PIPFILE.lock
24
+
25
+ # Virtual environments
26
+ .env
27
+ .venv
28
+ env/
29
+ venv/
30
+ ENV/
31
+ env.bak/
32
+ venv.bak/
33
+
34
+ # Testing
35
+ .pytest_cache/
36
+ .coverage
37
+ .coverage.*
38
+ htmlcov/
39
+ .tox/
40
+ .nox/
41
+
42
+ # IDEs
43
+ .idea/
44
+ *.swp
45
+ *.swo
46
+ *~
47
+ .DS_Store
48
+ .vscode/
49
+
50
+ # Project specific
51
+ .cache/
52
+ models/
53
+ *.npz
54
+ *.onnx
55
+ *.rknn
56
+ *.pt
57
+
58
+ # Temporary files
59
+ *.tmp
60
+ *.bak
61
+ *.log
62
+
63
+ # UV - keep lock file for reproducible builds
64
+ .uv/
@@ -0,0 +1,133 @@
1
+ Metadata-Version: 2.4
2
+ Name: lumen-resources
3
+ Version: 0.2.0
4
+ Summary: Unified model resource management for Lumen ML services
5
+ Author-email: EdwinZhanCN <support@lumilio.org>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/EdwinZhanCN/Lumen
8
+ Project-URL: Issues, https://github.com/EdwinZhanCN/Lumen/issues
9
+ Requires-Python: >=3.10
10
+ Description-Content-Type: text/markdown
11
+ Requires-Dist: huggingface_hub>=0.20.0
12
+ Requires-Dist: modelscope>=1.11.0
13
+ Requires-Dist: pydantic>=2.0.0
14
+ Requires-Dist: jsonschema>=4.0.0
15
+ Provides-Extra: dev
16
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
17
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
18
+ Requires-Dist: black>=23.0.0; extra == "dev"
19
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
20
+ Requires-Dist: datamodel-code-generator[http]>=0.25.0; extra == "dev"
21
+ Requires-Dist: pre-commit>=3.0.0; extra == "dev"
22
+ Provides-Extra: config
23
+ Requires-Dist: pyyaml>=6.0.0; extra == "config"
24
+
25
+ # Lumen Resources
26
+
27
+ Lightweight tooling for shipping Lumen ML services. This package centralizes how models are described, validated, downloaded, and cached so every service (CLIP, face, etc.) follows the same playbook—whether weights live on Hugging Face, ModelScope, or a private registry.
28
+
29
+ ## Why use it?
30
+
31
+ - **Single source of truth** – YAML configs describing deployments, devices, runtimes, and model aliases.
32
+ - **Schema-backed validation** – JSON Schema plus Pydantic to catch errors before runtime.
33
+ - **Cross-platform downloads** – Intelligent routing between Hugging Face and ModelScope with caching/resume support.
34
+ - **CLI + Python API** – Automate in CI or embed in service bootstraps.
35
+ - **Result schemas** – Typed response validators (`EmbeddingV1`, `FaceV1`, `LabelsV1`) for downstream services.
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ # project install
41
+ pip install "lumen-resources @ git+https://github.com/EdwinZhanCN/Lumen.git@main#subdirectory=lumen-resources"
42
+
43
+ # dev install
44
+ git clone https://github.com/EdwinZhanCN/Lumen.git
45
+ cd Lumen/lumen-resources
46
+ pip install -e ".[dev,config]"
47
+ ```
48
+
49
+ Optional extras depending on your targets:
50
+
51
+ ```bash
52
+ pip install huggingface_hub
53
+ pip install modelscope
54
+ pip install torch torchvision
55
+ pip install onnxruntime
56
+ ```
57
+
58
+ ## Usage
59
+
60
+ ### CLI
61
+
62
+ ```bash
63
+ # download everything defined in config.yaml
64
+ lumen-resources download config.yaml
65
+
66
+ # strict config validation
67
+ lumen-resources validate config.yaml
68
+
69
+ # validate a model_info.json
70
+ lumen-resources validate-model-info path/to/model_info.json
71
+
72
+ # inspect cache contents (defaults to ~/.lumen/)
73
+ lumen-resources list ~/.lumen/
74
+ ```
75
+
76
+ ### Python API
77
+
78
+ ```python
79
+ from lumen_resources import (
80
+ load_and_validate_config,
81
+ Downloader,
82
+ load_and_validate_model_info,
83
+ EmbeddingV1,
84
+ )
85
+
86
+ config = load_and_validate_config("config.yaml")
87
+ downloader = Downloader(config, verbose=True)
88
+ results = downloader.download_all(force=False)
89
+
90
+ model_info = load_and_validate_model_info("model_info.json")
91
+ ```
92
+
93
+ ## Configuration essentials
94
+
95
+ ```yaml
96
+ metadata:
97
+ region: "other" # or "cn" to prefer ModelScope
98
+ cache_dir: "~/.lumen/models"
99
+
100
+ deployment:
101
+ mode: "single" # or "hub"
102
+ service: "clip"
103
+
104
+ services:
105
+ clip:
106
+ enabled: true
107
+ package: "lumen_clip"
108
+ backend_settings:
109
+ device: "cuda"
110
+ batch_size: 16
111
+ onnx_providers: ["CUDAExecutionProvider", "CPUExecutionProvider"]
112
+ models:
113
+ default:
114
+ model: "ViT-B-32"
115
+ runtime: "torch"
116
+ fp16:
117
+ model: "ViT-B-32"
118
+ runtime: "onnx"
119
+ ```
120
+
121
+ - `metadata.region` decides whether downloads prefer ModelScope or Hugging Face.
122
+ - `backend_settings` lets you declare execution providers, batch sizes, devices, etc.
123
+ - Each entry in `models` becomes a cache namespace (`clip/default`, `clip/fp16`, …).
124
+
125
+ ## Reference
126
+
127
+ - Source: `src/lumen_resources/`
128
+ - `lumen_config.py` – Typed config models
129
+ - `downloader.py` – Platform abstraction + caching
130
+ - `cli.py` – Command entrypoint
131
+ - `result_schemas/` – Response validators
132
+ - Docs: https://doc.lumilio.org
133
+ - Issues & support: open a ticket in the main Lumen monorepo.
@@ -0,0 +1,109 @@
1
+ # Lumen Resources
2
+
3
+ Lightweight tooling for shipping Lumen ML services. This package centralizes how models are described, validated, downloaded, and cached so every service (CLIP, face, etc.) follows the same playbook—whether weights live on Hugging Face, ModelScope, or a private registry.
4
+
5
+ ## Why use it?
6
+
7
+ - **Single source of truth** – YAML configs describing deployments, devices, runtimes, and model aliases.
8
+ - **Schema-backed validation** – JSON Schema plus Pydantic to catch errors before runtime.
9
+ - **Cross-platform downloads** – Intelligent routing between Hugging Face and ModelScope with caching/resume support.
10
+ - **CLI + Python API** – Automate in CI or embed in service bootstraps.
11
+ - **Result schemas** – Typed response validators (`EmbeddingV1`, `FaceV1`, `LabelsV1`) for downstream services.
12
+
13
+ ## Installation
14
+
15
+ ```bash
16
+ # project install
17
+ pip install "lumen-resources @ git+https://github.com/EdwinZhanCN/Lumen.git@main#subdirectory=lumen-resources"
18
+
19
+ # dev install
20
+ git clone https://github.com/EdwinZhanCN/Lumen.git
21
+ cd Lumen/lumen-resources
22
+ pip install -e ".[dev,config]"
23
+ ```
24
+
25
+ Optional extras depending on your targets:
26
+
27
+ ```bash
28
+ pip install huggingface_hub
29
+ pip install modelscope
30
+ pip install torch torchvision
31
+ pip install onnxruntime
32
+ ```
33
+
34
+ ## Usage
35
+
36
+ ### CLI
37
+
38
+ ```bash
39
+ # download everything defined in config.yaml
40
+ lumen-resources download config.yaml
41
+
42
+ # strict config validation
43
+ lumen-resources validate config.yaml
44
+
45
+ # validate a model_info.json
46
+ lumen-resources validate-model-info path/to/model_info.json
47
+
48
+ # inspect cache contents (defaults to ~/.lumen/)
49
+ lumen-resources list ~/.lumen/
50
+ ```
51
+
52
+ ### Python API
53
+
54
+ ```python
55
+ from lumen_resources import (
56
+ load_and_validate_config,
57
+ Downloader,
58
+ load_and_validate_model_info,
59
+ EmbeddingV1,
60
+ )
61
+
62
+ config = load_and_validate_config("config.yaml")
63
+ downloader = Downloader(config, verbose=True)
64
+ results = downloader.download_all(force=False)
65
+
66
+ model_info = load_and_validate_model_info("model_info.json")
67
+ ```
68
+
69
+ ## Configuration essentials
70
+
71
+ ```yaml
72
+ metadata:
73
+ region: "other" # or "cn" to prefer ModelScope
74
+ cache_dir: "~/.lumen/models"
75
+
76
+ deployment:
77
+ mode: "single" # or "hub"
78
+ service: "clip"
79
+
80
+ services:
81
+ clip:
82
+ enabled: true
83
+ package: "lumen_clip"
84
+ backend_settings:
85
+ device: "cuda"
86
+ batch_size: 16
87
+ onnx_providers: ["CUDAExecutionProvider", "CPUExecutionProvider"]
88
+ models:
89
+ default:
90
+ model: "ViT-B-32"
91
+ runtime: "torch"
92
+ fp16:
93
+ model: "ViT-B-32"
94
+ runtime: "onnx"
95
+ ```
96
+
97
+ - `metadata.region` decides whether downloads prefer ModelScope or Hugging Face.
98
+ - `backend_settings` lets you declare execution providers, batch sizes, devices, etc.
99
+ - Each entry in `models` becomes a cache namespace (`clip/default`, `clip/fp16`, …).
100
+
101
+ ## Reference
102
+
103
+ - Source: `src/lumen_resources/`
104
+ - `lumen_config.py` – Typed config models
105
+ - `downloader.py` – Platform abstraction + caching
106
+ - `cli.py` – Command entrypoint
107
+ - `result_schemas/` – Response validators
108
+ - Docs: https://doc.lumilio.org
109
+ - Issues & support: open a ticket in the main Lumen monorepo.
@@ -0,0 +1,37 @@
1
+ # Lumen Services Configuration - Single Service Mode
2
+ # yaml-language-server: $schema=https://doc.lumilio.org/schema/config-schema.yaml
3
+
4
+ metadata:
5
+ version: "1.0.0"
6
+ region: "cn" # "cn" for ModelScope, "other" for HuggingFace
7
+ cache_dir: "~/Lumen-Resources"
8
+
9
+ deployment:
10
+ mode: "single"
11
+ service: "clip"
12
+
13
+ server:
14
+ port: 50051
15
+ host: "0.0.0.0"
16
+ mdns:
17
+ enabled: true
18
+ service_name: "lumen-clip"
19
+
20
+ services:
21
+ # CLIP Service
22
+ clip:
23
+ enabled: true
24
+ package: "lumen_clip"
25
+ import:
26
+ registry_class: "lumen_clip.service_registry.CLIPService"
27
+ add_to_server: "lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server"
28
+ backend_settings:
29
+ device: "mps"
30
+ onnx_providers:
31
+ - "CoreMLExecutionProvider"
32
+ batch_size: 8
33
+ models:
34
+ general:
35
+ model: "MobileCLIP2-S2" # CN-CLIP_ViT-L-14
36
+ runtime: torch
37
+ dataset: ImageNet_1k
@@ -0,0 +1,69 @@
1
+ # Lumen Services Configuration - Hub Mode
2
+ # yaml-language-server: $schema=https://doc.lumilio.org/schema/config-schema.yaml
3
+
4
+ metadata:
5
+ version: "1.0.0"
6
+ region: "other" # "cn" for ModelScope, "other" for HuggingFace
7
+ cache_dir: "/opt/lumen/models"
8
+
9
+ deployment:
10
+ mode: "hub"
11
+ services: ["clip", "face"]
12
+
13
+ server:
14
+ port: 50051
15
+ host: "0.0.0.0"
16
+ mdns:
17
+ enabled: true
18
+ service_name: "lumen-hub"
19
+
20
+ services:
21
+ # CLIP Service
22
+ clip:
23
+ enabled: true
24
+ package: "lumen_clip"
25
+ import:
26
+ registry_class: "lumen_clip.service_registry.ClipService"
27
+ add_to_server: "lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server"
28
+
29
+ models:
30
+ # Default general-purpose CLIP model
31
+ default:
32
+ model: "ViT-B-32"
33
+ runtime: "torch"
34
+ dataset: "ImageNet_1k"
35
+
36
+ # Mobile-optimized model
37
+ mobile:
38
+ model: "MobileCLIP-S2"
39
+ runtime: "onnx"
40
+
41
+ # Face Recognition Service
42
+ face:
43
+ enabled: true
44
+ package: "lumen_face"
45
+ import:
46
+ registry_class: "lumen_face.service_registry.FaceService"
47
+ add_to_server: "lumen_face.proto.ml_service_pb2_grpc.add_FaceServicer_to_server"
48
+
49
+ models:
50
+ default:
51
+ model: "FaceNet-512"
52
+ runtime: "onnx"
53
+
54
+ mobile:
55
+ model: "MobileFaceNet"
56
+ runtime: "onnx"
57
+
58
+ # OCR Service (disabled, but configured)
59
+ ocr:
60
+ enabled: false
61
+ package: "lumen_ocr"
62
+ import:
63
+ registry_class: "lumen_ocr.service_registry.OcrService"
64
+ add_to_server: "lumen_ocr.proto.ml_service_pb2_grpc.add_OcrServicer_to_server"
65
+
66
+ models:
67
+ default:
68
+ model: "PaddleOCR-v4"
69
+ runtime: "onnx"
@@ -0,0 +1,48 @@
1
+ {
2
+ "name": "CN-CLIP_ViT-L-14",
3
+ "version": "1.0.0",
4
+ "description": "OFA-Sys/chinese-clip-vit-large-patch14 Chinese CLIP model for chinese-specific tasks",
5
+ "model_type": "chinese_clip",
6
+ "embedding_dim": 768,
7
+ "source": {
8
+ "format": "huggingface",
9
+ "repo_id": "OFA-Sys/chinese-clip-vit-large-patch14"
10
+ },
11
+ "runtimes": {
12
+ "torch": {
13
+ "available": true,
14
+ "files": ["torch/model.pt"]
15
+ },
16
+ "onnx": {
17
+ "available": true,
18
+ "files": [
19
+ "onnx/text.fp32.onnx",
20
+ "onnx/text.fp16.onnx",
21
+ "onnx/vision.fp32.onnx",
22
+ "onnx/vision.fp16.onnx"
23
+ ]
24
+ },
25
+ "rknn": {
26
+ "available": true,
27
+ "devices": ["rk3566", "rk3588"],
28
+ "files": {
29
+ "rk3566": [
30
+ "rknn/rk3566/text.fp16.rknn",
31
+ "rknn/rk3566/vision.fp16.rknn"
32
+ ],
33
+ "rk3588": [
34
+ "rknn/rk3588/text.fp16.rknn",
35
+ "rknn/rk3588/vision.fp16.rknn"
36
+ ]
37
+ }
38
+ }
39
+ },
40
+ "datasets": {
41
+ "ImageNet_1k": "ImageNet_1k.npy"
42
+ },
43
+ "metadata": {
44
+ "license": "Apache License 2.0",
45
+ "author": "Lumilio Photos",
46
+ "created_at": "2025-10-16"
47
+ }
48
+ }
@@ -0,0 +1,40 @@
1
+ # Lumen Services Configuration - Single Service Mode
2
+ # yaml-language-server: $schema=https://doc.lumilio.org/schema/config-schema.yaml
3
+
4
+ metadata:
5
+ version: "1.0.0"
6
+ region: "other" # "cn" for ModelScope, "other" for HuggingFace
7
+ cache_dir: "~/.lumen/models"
8
+
9
+ deployment:
10
+ mode: "single"
11
+ service: "clip"
12
+
13
+ server:
14
+ port: 50051
15
+ host: "0.0.0.0"
16
+ mdns:
17
+ enabled: true
18
+ service_name: "lumen-clip"
19
+
20
+ services:
21
+ # CLIP Service
22
+ clip:
23
+ enabled: true
24
+ package: "lumen_clip"
25
+ import:
26
+ registry_class: "lumen_clip.service_registry.ClipService"
27
+ add_to_server: "lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server"
28
+
29
+ models:
30
+ # Default general-purpose CLIP model
31
+ default:
32
+ model: "ViT-B-32"
33
+ runtime: "torch"
34
+ dataset: "ImageNet_1k"
35
+
36
+ # Chinese CLIP model
37
+ cn_clip:
38
+ model: "CN-CLIP-ViT-B-16"
39
+ runtime: "torch"
40
+ dataset: "ImageNet_1k"
@@ -0,0 +1,57 @@
1
+ [project]
2
+ name = "lumen-resources"
3
+ dynamic = ["version"]
4
+ description = "Unified model resource management for Lumen ML services"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ license = { text = "MIT" }
8
+ authors = [
9
+ { name = "EdwinZhanCN", email = "support@lumilio.org" }
10
+ ]
11
+ dependencies = [
12
+ "huggingface_hub>=0.20.0",
13
+ "modelscope>=1.11.0",
14
+ "pydantic>=2.0.0",
15
+ "jsonschema>=4.0.0",
16
+ ]
17
+
18
+ [project.urls]
19
+ Homepage = "https://github.com/EdwinZhanCN/Lumen"
20
+ Issues = "https://github.com/EdwinZhanCN/Lumen/issues"
21
+
22
+ [project.optional-dependencies]
23
+ dev = [
24
+ "pytest>=7.0.0",
25
+ "pytest-cov>=4.0.0",
26
+ "black>=23.0.0",
27
+ "ruff>=0.1.0",
28
+ "datamodel-code-generator[http]>=0.25.0",
29
+ "pre-commit>=3.0.0",
30
+ ]
31
+ config = ["pyyaml>=6.0.0"]
32
+
33
+ [project.scripts]
34
+ lumen-resources = "lumen_resources.cli:main"
35
+
36
+ [tool.ruff.lint]
37
+ select = ["E", "F", "W", "I", "N"]
38
+ ignore = []
39
+
40
+ [tool.pytest.ini_options]
41
+ testpaths = ["tests"]
42
+ python_files = ["test_*.py"]
43
+ python_functions = ["test_*"]
44
+
45
+ [build-system]
46
+ requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"]
47
+ build-backend = "setuptools.build_meta"
48
+
49
+ [tool.setuptools_scm]
50
+ fallback_version = "0.0.0"
51
+ search_parent_directories = true
52
+
53
+ [tool.setuptools.packages.find]
54
+ where = ["src"]
55
+
56
+ [tool.setuptools.package-data]
57
+ "lumen_resources" = ["schemas/*.yaml", "schemas/*.json"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,89 @@
1
+ """Lumen Resources - Unified Model Resource Management.
2
+
3
+ Configuration-driven tool for managing ML model resources with production-grade
4
+ YAML configuration, JSON Schema validation, and Pydantic models. Provides a
5
+ unified interface for downloading models from multiple platforms including
6
+ HuggingFace Hub and ModelScope.
7
+
8
+ This package offers:
9
+ - Configuration-driven YAML setup for ML model resources
10
+ - Multi-platform support (HuggingFace Hub, ModelScope)
11
+ - Runtime flexibility (ONNX, PyTorch, TensorFlow, RKNN)
12
+ - Production-grade validation using JSON Schema and Pydantic
13
+ - CLI interface for command-line operations
14
+ - Programmatic API for integration into other applications
15
+
16
+ Example:
17
+ >>> from lumen_resources import load_and_validate_config, Downloader
18
+ >>>
19
+ >>> # Load and validate configuration
20
+ >>> config = load_and_validate_config("config.yaml")
21
+ >>>
22
+ >>> # Download models
23
+ >>> downloader = Downloader(config, verbose=True)
24
+ >>> results = downloader.download_all()
25
+ >>>
26
+ >>> # Check results
27
+ >>> for model_type, result in results.items():
28
+ ... if result.success:
29
+ ... print(f"Downloaded: {model_type} to {result.model_path}")
30
+ ... else:
31
+ ... print(f"Failed: {model_type} - {result.error}")
32
+
33
+ The package follows a layered architecture:
34
+ - Configuration layer: Pydantic models for type-safe config handling
35
+ - Validation layer: JSON Schema and Pydantic validation
36
+ - Platform layer: Unified interface for different model repositories
37
+ - Download layer: Efficient model downloading with validation
38
+ - CLI layer: User-friendly command-line interface
39
+ """
40
+
41
+ from .lumen_config import LumenConfig, Runtime, Region
42
+ from .downloader import Downloader, DownloadResult
43
+ from .exceptions import (
44
+ ResourceError,
45
+ ConfigError,
46
+ DownloadError,
47
+ PlatformUnavailableError,
48
+ ValidationError,
49
+ ModelInfoError,
50
+ )
51
+ from .lumen_config_validator import load_and_validate_config
52
+
53
+ from .model_info import ModelInfo, Source, Runtimes, Metadata
54
+ from .model_info_validator import load_and_validate_model_info
55
+ from .result_schemas import (
56
+ EmbeddingV1,
57
+ FaceV1,
58
+ LabelsV1
59
+ )
60
+
61
+ __version__ = "0.1.0"
62
+
63
+ __all__ = [
64
+ # Configuration
65
+ "LumenConfig",
66
+ "Runtime",
67
+ "Region",
68
+ "load_and_validate_config",
69
+ # Model Info
70
+ "ModelInfo",
71
+ "Source",
72
+ "Runtimes",
73
+ "Metadata",
74
+ "load_and_validate_model_info",
75
+ # Response Validation
76
+ "FaceV1",
77
+ "EmbeddingV1",
78
+ "LabelsV1",
79
+ # Downloader
80
+ "Downloader",
81
+ "DownloadResult",
82
+ # Exceptions
83
+ "ResourceError",
84
+ "ConfigError",
85
+ "DownloadError",
86
+ "PlatformUnavailableError",
87
+ "ValidationError",
88
+ "ModelInfoError",
89
+ ]