lumen-resources 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,459 @@
1
+ # generated by datamodel-codegen:
2
+ # filename: config-schema.yaml
3
+ # timestamp: 2025-10-19T06:58:36+00:00
4
+
5
+ from __future__ import annotations
6
+
7
+ from enum import Enum
8
+ from typing import Literal
9
+
10
+ from pydantic import BaseModel, ConfigDict, Field, RootModel
11
+
12
+
13
+ class Region(Enum):
14
+ """Platform region selection for model repositories.
15
+
16
+ Determines which model repository platform to use for downloading models.
17
+ Different regions have access to different model platforms and content.
18
+
19
+ Attributes:
20
+ cn: China region - uses ModelScope platform
21
+ other: Other regions - uses Hugging Face platform
22
+
23
+ Example:
24
+ >>> config = Metadata(version="1.0.0", region=Region.cn, cache_dir="~/.lumen")
25
+ >>> print(config.region.value)
26
+ 'cn'
27
+ """
28
+
29
+ cn = "cn"
30
+ other = "other"
31
+
32
+
33
+ class Metadata(BaseModel):
34
+ """Configuration metadata for Lumen services.
35
+
36
+ Contains version information, region settings, and cache directory configuration.
37
+ This is required for all Lumen service configurations.
38
+
39
+ Attributes:
40
+ version: Configuration version following semantic versioning (X.Y.Z format).
41
+ region: Platform region for model downloads (cn=ModelScope, other=HuggingFace).
42
+ cache_dir: Directory path for caching downloaded models (supports ~ expansion).
43
+
44
+ Example:
45
+ >>> metadata = Metadata(
46
+ ... version="1.0.0",
47
+ ... region=Region.other,
48
+ ... cache_dir="~/.lumen/models"
49
+ ... )
50
+ >>> print(metadata.version)
51
+ '1.0.0'
52
+ """
53
+
54
+ version: str = Field(
55
+ ...,
56
+ description="Configuration version (semantic versioning)",
57
+ examples=["1.0.0", "2.1.3"],
58
+ pattern="^\\d+\\.\\d+\\.\\d+$",
59
+ )
60
+ region: Region = Field(
61
+ ..., description="Platform region selection (cn=ModelScope, other=HuggingFace)"
62
+ )
63
+ cache_dir: str = Field(
64
+ ...,
65
+ description="Model cache directory path (supports ~ expansion)",
66
+ examples=["~/.lumen/models", "/opt/lumen/models"],
67
+ )
68
+
69
+
70
+ class Mode(Enum):
71
+ """Deployment mode for Lumen services.
72
+
73
+ Determines how services are deployed and managed. Different modes support
74
+ different service architectures and scaling patterns.
75
+
76
+ Attributes:
77
+ single: Single service deployment - runs one specific service
78
+ hub: Hub service deployment - runs multiple services as a hub
79
+
80
+ Example:
81
+ >>> deployment = Deployment(mode=Mode.single, service="clip")
82
+ >>> print(deployment.mode.value)
83
+ 'single'
84
+ """
85
+
86
+ single = "single"
87
+ hub = "hub"
88
+
89
+
90
+ class Service(RootModel[str]):
91
+ """Service identifier for Lumen deployments.
92
+
93
+ A string-based root model that validates service names according to naming
94
+ conventions. Service names must start with a lowercase letter and can contain
95
+ lowercase letters, numbers, and underscores.
96
+
97
+ Attributes:
98
+ root: Service name string matching the pattern.
99
+
100
+ Example:
101
+ >>> service = Service("clip_service")
102
+ >>> print(service.root)
103
+ 'clip_service'
104
+ >>> invalid_service = Service("Invalid-Name") # Raises ValidationError
105
+ """
106
+
107
+ root: str = Field(..., pattern="^[a-z][a-z0-9_]*$")
108
+
109
+
110
+ class Deployment(BaseModel):
111
+ """Deployment configuration for single service mode.
112
+
113
+ Configuration for deploying a single Lumen service. This class is used
114
+ when the deployment mode is set to 'single', requiring a specific service
115
+ name to be provided.
116
+
117
+ Attributes:
118
+ mode: Deployment mode, fixed to 'single' for this class.
119
+ service: Name of the single service to deploy.
120
+ services: List of services for hub mode (should be None for single mode).
121
+
122
+ Example:
123
+ >>> deployment = Deployment(
124
+ ... mode="single",
125
+ ... service="clip"
126
+ ... )
127
+ >>> print(deployment.service)
128
+ 'clip'
129
+ """
130
+
131
+ mode: Literal["single"] = Field(..., description="Deployment mode")
132
+ service: str = Field(
133
+ ...,
134
+ description="Service name for single mode (required if mode=single)",
135
+ pattern="^[a-z][a-z0-9_]*$",
136
+ )
137
+ services: list[Service] | None = Field(
138
+ None,
139
+ description="Service names for hub mode (required if mode=hub)",
140
+ min_length=1,
141
+ )
142
+
143
+
144
+ class Deployment1(BaseModel):
145
+ """Deployment configuration for hub service mode.
146
+
147
+ Configuration for deploying multiple Lumen services as a hub. This class is used
148
+ when the deployment mode is set to 'hub', requiring a list of services to be provided.
149
+
150
+ Attributes:
151
+ mode: Deployment mode, fixed to 'hub' for this class.
152
+ service: Service name for single mode (should be None for hub mode).
153
+ services: List of services to deploy in hub mode (required).
154
+
155
+ Example:
156
+ >>> deployment = Deployment1(
157
+ ... mode="hub",
158
+ ... services=[Service("clip"), Service("face")]
159
+ ... )
160
+ >>> print(len(deployment.services))
161
+ 2
162
+ """
163
+
164
+ mode: Literal["hub"] = Field(..., description="Deployment mode")
165
+ service: str | None = Field(
166
+ None,
167
+ description="Service name for single mode (required if mode=single)",
168
+ pattern="^[a-z][a-z0-9_]*$",
169
+ )
170
+ services: list[Service] = Field(
171
+ ...,
172
+ description="Service names for hub mode (required if mode=hub)",
173
+ min_length=1,
174
+ )
175
+
176
+
177
+ class Mdns(BaseModel):
178
+ """mDNS service discovery configuration.
179
+
180
+ Configuration for multicast DNS service discovery, allowing Lumen services
181
+ to be automatically discoverable on local networks.
182
+
183
+ Attributes:
184
+ enabled: Whether to enable mDNS service discovery. Defaults to False.
185
+ service_name: mDNS service name, required if enabled is True.
186
+
187
+ Example:
188
+ >>> mdns = Mdns(
189
+ ... enabled=True,
190
+ ... service_name="lumen-clip"
191
+ ... )
192
+ >>> print(mdns.enabled)
193
+ True
194
+ """
195
+
196
+ enabled: bool | None = Field(False, description="Enable mDNS service discovery")
197
+ service_name: str | None = Field(
198
+ None,
199
+ description="mDNS service name (required if enabled=true)",
200
+ examples=["lumen-clip", "lumen-hub"],
201
+ pattern="^[a-z][a-z0-9-]*$",
202
+ )
203
+
204
+
205
+ class Server(BaseModel):
206
+ """gRPC server configuration.
207
+
208
+ Configuration for the gRPC server that hosts Lumen services, including
209
+ network settings and optional mDNS service discovery.
210
+
211
+ Attributes:
212
+ port: gRPC server port number (1024-65535).
213
+ host: Server bind address. Defaults to "0.0.0.0".
214
+ mdns: Optional mDNS configuration for service discovery.
215
+
216
+ Example:
217
+ >>> server = Server(
218
+ ... port=50051,
219
+ ... host="127.0.0.1",
220
+ ... mdns=Mdns(enabled=True, service_name="lumen-clip")
221
+ ... )
222
+ >>> print(server.port)
223
+ 50051
224
+ """
225
+
226
+ port: int = Field(..., description="gRPC server port", ge=1024, le=65535)
227
+ host: str | None = Field(
228
+ "0.0.0.0",
229
+ description="Server bind address",
230
+ examples=["0.0.0.0", "127.0.0.1", "[::]"],
231
+ )
232
+ mdns: Mdns | None = None
233
+
234
+
235
+ class Import(BaseModel):
236
+ """Service import configuration for dynamic loading.
237
+
238
+ Configuration for dynamically importing and registering Lumen services
239
+ with the gRPC server. Contains paths to the service registry class
240
+ and the gRPC server registration function.
241
+
242
+ Attributes:
243
+ registry_class: Full dotted path to the service registry class.
244
+ add_to_server: Full dotted path to the gRPC add_to_server function.
245
+
246
+ Example:
247
+ >>> import_config = Import(
248
+ ... registry_class="lumen_clip.service_registry.ClipService",
249
+ ... add_to_server="lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server"
250
+ ... )
251
+ >>> print("clip" in import_config.registry_class)
252
+ True
253
+ """
254
+
255
+ registry_class: str = Field(
256
+ ...,
257
+ description="Full dotted path to service registry class",
258
+ examples=[
259
+ "lumen_clip.service_registry.ClipService",
260
+ "lumen_face.service_registry.FaceService",
261
+ ],
262
+ pattern="^[a-z_][a-z0-9_.]*\\.[A-Z][a-zA-Z0-9]*$",
263
+ )
264
+ add_to_server: str = Field(
265
+ ...,
266
+ description="Full dotted path to gRPC add_to_server function",
267
+ examples=[
268
+ "lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server",
269
+ "lumen_face.proto.ml_service_pb2_grpc.add_FaceServicer_to_server",
270
+ ],
271
+ pattern="^[a-z_][a-z0-9_.]*\\.add_[A-Za-z0-9_]+_to_server$",
272
+ )
273
+
274
+
275
+ class BackendSettings(BaseModel):
276
+ """Optional settings for inference backend configuration.
277
+
278
+ Configuration for inference backend optimization including device selection,
279
+ batch processing, and ONNX runtime provider settings.
280
+
281
+ Attributes:
282
+ device: Preferred compute device ('cuda', 'mps', 'cpu'). If None, auto-detects.
283
+ batch_size: Maximum batch size for inference processing. Defaults to 8.
284
+ onnx_providers: List of ONNX execution providers. If None, uses defaults.
285
+
286
+ Example:
287
+ >>> backend = BackendSettings(
288
+ ... device="cuda",
289
+ ... batch_size=16,
290
+ ... onnx_providers=["CUDAExecutionProvider", "CPUExecutionProvider"]
291
+ ... )
292
+ >>> print(backend.device)
293
+ 'cuda'
294
+ """
295
+
296
+ model_config = ConfigDict(
297
+ extra="forbid",
298
+ )
299
+ device: str | None = Field(
300
+ None,
301
+ description="Preferred device ('cuda', 'mps', 'cpu'). If null, auto-detects best available.",
302
+ )
303
+ batch_size: int | None = Field(
304
+ 8, description="Maximum batch size for inference.", ge=1
305
+ )
306
+ onnx_providers: list[str] | None = Field(
307
+ None,
308
+ description="List of ONNX execution providers. If null, uses ONNX Runtime defaults.",
309
+ )
310
+
311
+
312
+ class Runtime(Enum):
313
+ """Model runtime type for inference execution.
314
+
315
+ Determines which runtime environment to use for model inference.
316
+ Different runtimes have different performance characteristics and
317
+ hardware requirements.
318
+
319
+ Attributes:
320
+ torch: PyTorch runtime for native PyTorch models.
321
+ onnx: ONNX runtime for optimized cross-platform inference.
322
+ rknn: RKNN runtime for Rockchip NPU acceleration.
323
+
324
+ Example:
325
+ >>> config = ModelConfig(
326
+ ... model="ViT-B-32",
327
+ ... runtime=Runtime.torch
328
+ ... )
329
+ >>> print(config.runtime.value)
330
+ 'torch'
331
+ """
332
+
333
+ torch = "torch"
334
+ onnx = "onnx"
335
+ rknn = "rknn"
336
+
337
+
338
+ class ModelConfig(BaseModel):
339
+ """Configuration for a single model within a service.
340
+
341
+ Defines the model repository, runtime requirements, and optional settings
342
+ for dataset and device-specific configurations.
343
+
344
+ Attributes:
345
+ model: Model repository name or identifier.
346
+ runtime: Runtime environment for model execution.
347
+ rknn_device: RKNN device identifier, required if runtime is rknn.
348
+ dataset: Dataset name for zero-shot classification tasks.
349
+
350
+ Example:
351
+ >>> config = ModelConfig(
352
+ ... model="ViT-B-32",
353
+ ... runtime=Runtime.torch,
354
+ ... dataset="ImageNet_1k"
355
+ ... )
356
+ >>> print(config.model)
357
+ 'ViT-B-32'
358
+ """
359
+
360
+ model: str = Field(
361
+ ...,
362
+ description="Model repository name",
363
+ examples=["ViT-B-32", "CN-CLIP-ViT-B-16", "MobileCLIP-S2"],
364
+ )
365
+ runtime: Runtime = Field(..., description="Model runtime type")
366
+ rknn_device: str | None = Field(
367
+ None,
368
+ description="RKNN device identifier (required if runtime=rknn)",
369
+ examples=["rk3566", "rk3588"],
370
+ pattern="^rk\\d+$",
371
+ )
372
+ dataset: str | None = Field(
373
+ None,
374
+ description="Dataset name for zero-shot classification (optional)",
375
+ examples=["ImageNet_1k", "TreeOfLife-10M"],
376
+ )
377
+
378
+
379
+ class Services(BaseModel):
380
+ """Configuration for a Lumen service.
381
+
382
+ Defines a complete service configuration including package information,
383
+ import settings, backend optimization, and model configurations.
384
+
385
+ Attributes:
386
+ enabled: Whether this service should be loaded and started.
387
+ package: Python package name containing the service implementation.
388
+ import_: Dynamic import configuration for the service.
389
+ backend_settings: Optional backend optimization settings.
390
+ models: Dictionary of model configurations keyed by alias.
391
+
392
+ Example:
393
+ >>> service = Services(
394
+ ... enabled=True,
395
+ ... package="lumen_clip",
396
+ ... import_=Import(
397
+ ... registry_class="lumen_clip.service_registry.ClipService",
398
+ ... add_to_server="lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server"
399
+ ... ),
400
+ ... models={"default": ModelConfig(model="ViT-B-32", runtime=Runtime.torch)}
401
+ ... )
402
+ >>> print(service.package)
403
+ 'lumen_clip'
404
+ """
405
+
406
+ enabled: bool = Field(..., description="Whether to load this service")
407
+ package: str = Field(
408
+ ...,
409
+ description="Python package name",
410
+ examples=["lumen_clip", "lumen_face"],
411
+ pattern="^[a-z][a-z0-9_]*$",
412
+ )
413
+ import_: Import = Field(..., alias="import")
414
+ backend_settings: BackendSettings | None = None
415
+ models: dict[str, ModelConfig] = Field(
416
+ ..., description="Model configurations (alias → config)"
417
+ )
418
+
419
+
420
+ class LumenConfig(BaseModel):
421
+ """Unified configuration schema for all Lumen ML services.
422
+
423
+ Root configuration model that combines metadata, deployment settings,
424
+ server configuration, and service definitions into a complete
425
+ configuration for Lumen ML services.
426
+
427
+ Attributes:
428
+ metadata: Configuration metadata including version and region settings.
429
+ deployment: Deployment configuration (single or hub mode).
430
+ server: gRPC server configuration.
431
+ services: Dictionary of service configurations keyed by service name.
432
+
433
+ Example:
434
+ >>> config = LumenConfig(
435
+ ... metadata=Metadata(
436
+ ... version="1.0.0",
437
+ ... region=Region.other,
438
+ ... cache_dir="~/.lumen/models"
439
+ ... ),
440
+ ... deployment=Deployment(mode="single", service="clip"),
441
+ ... server=Server(port=50051),
442
+ ... services={"clip": Services(
443
+ ... enabled=True,
444
+ ... package="lumen_clip",
445
+ ... import_=Import(...),
446
+ ... models={}
447
+ ... )}
448
+ ... )
449
+ >>> print(config.metadata.version)
450
+ '1.0.0'
451
+ """
452
+
453
+ model_config = ConfigDict(
454
+ extra="forbid",
455
+ )
456
+ metadata: Metadata
457
+ deployment: Deployment | Deployment1
458
+ server: Server
459
+ services: dict[str, Services] = Field(..., description="Service definitions")