lumen-resources 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: config-schema.yaml
3
- # timestamp: 2025-10-19T06:58:36+00:00
3
+ # timestamp: 2025-12-12T07:57:50+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -11,19 +11,8 @@ from pydantic import BaseModel, ConfigDict, Field, RootModel
11
11
 
12
12
 
13
13
  class Region(Enum):
14
- """Platform region selection for model repositories.
15
-
16
- Determines which model repository platform to use for downloading models.
17
- Different regions have access to different model platforms and content.
18
-
19
- Attributes:
20
- cn: China region - uses ModelScope platform
21
- other: Other regions - uses Hugging Face platform
22
-
23
- Example:
24
- >>> config = Metadata(version="1.0.0", region=Region.cn, cache_dir="~/.lumen")
25
- >>> print(config.region.value)
26
- 'cn'
14
+ """
15
+ Platform region selection (cn=ModelScope, other=HuggingFace)
27
16
  """
28
17
 
29
18
  cn = "cn"
@@ -31,56 +20,25 @@ class Region(Enum):
31
20
 
32
21
 
33
22
  class Metadata(BaseModel):
34
- """Configuration metadata for Lumen services.
35
-
36
- Contains version information, region settings, and cache directory configuration.
37
- This is required for all Lumen service configurations.
38
-
39
- Attributes:
40
- version: Configuration version following semantic versioning (X.Y.Z format).
41
- region: Platform region for model downloads (cn=ModelScope, other=HuggingFace).
42
- cache_dir: Directory path for caching downloaded models (supports ~ expansion).
43
-
44
- Example:
45
- >>> metadata = Metadata(
46
- ... version="1.0.0",
47
- ... region=Region.other,
48
- ... cache_dir="~/.lumen/models"
49
- ... )
50
- >>> print(metadata.version)
51
- '1.0.0'
52
- """
53
-
54
23
  version: str = Field(
55
- ...,
56
- description="Configuration version (semantic versioning)",
57
- examples=["1.0.0", "2.1.3"],
58
- pattern="^\\d+\\.\\d+\\.\\d+$",
59
- )
60
- region: Region = Field(
61
- ..., description="Platform region selection (cn=ModelScope, other=HuggingFace)"
62
- )
63
- cache_dir: str = Field(
64
- ...,
65
- description="Model cache directory path (supports ~ expansion)",
66
- examples=["~/.lumen/models", "/opt/lumen/models"],
24
+ ..., examples=["1.0.0", "2.1.3"], pattern="^\\d+\\.\\d+\\.\\d+$"
67
25
  )
26
+ """
27
+ Configuration version (semantic versioning)
28
+ """
29
+ region: Region
30
+ """
31
+ Platform region selection (cn=ModelScope, other=HuggingFace)
32
+ """
33
+ cache_dir: str = Field(..., examples=["~/.lumen/models", "/opt/lumen/models"])
34
+ """
35
+ Model cache directory path (supports ~ expansion)
36
+ """
68
37
 
69
38
 
70
39
  class Mode(Enum):
71
- """Deployment mode for Lumen services.
72
-
73
- Determines how services are deployed and managed. Different modes support
74
- different service architectures and scaling patterns.
75
-
76
- Attributes:
77
- single: Single service deployment - runs one specific service
78
- hub: Hub service deployment - runs multiple services as a hub
79
-
80
- Example:
81
- >>> deployment = Deployment(mode=Mode.single, service="clip")
82
- >>> print(deployment.mode.value)
83
- 'single'
40
+ """
41
+ Deployment mode
84
42
  """
85
43
 
86
44
  single = "single"
@@ -88,246 +46,119 @@ class Mode(Enum):
88
46
 
89
47
 
90
48
  class Service(RootModel[str]):
91
- """Service identifier for Lumen deployments.
92
-
93
- A string-based root model that validates service names according to naming
94
- conventions. Service names must start with a lowercase letter and can contain
95
- lowercase letters, numbers, and underscores.
96
-
97
- Attributes:
98
- root: Service name string matching the pattern.
99
-
100
- Example:
101
- >>> service = Service("clip_service")
102
- >>> print(service.root)
103
- 'clip_service'
104
- >>> invalid_service = Service("Invalid-Name") # Raises ValidationError
105
- """
106
-
107
49
  root: str = Field(..., pattern="^[a-z][a-z0-9_]*$")
108
50
 
109
51
 
110
52
  class Deployment(BaseModel):
111
- """Deployment configuration for single service mode.
112
-
113
- Configuration for deploying a single Lumen service. This class is used
114
- when the deployment mode is set to 'single', requiring a specific service
115
- name to be provided.
116
-
117
- Attributes:
118
- mode: Deployment mode, fixed to 'single' for this class.
119
- service: Name of the single service to deploy.
120
- services: List of services for hub mode (should be None for single mode).
121
-
122
- Example:
123
- >>> deployment = Deployment(
124
- ... mode="single",
125
- ... service="clip"
126
- ... )
127
- >>> print(deployment.service)
128
- 'clip'
53
+ mode: Literal["single"]
54
+ """
55
+ Deployment mode
56
+ """
57
+ service: str = Field(..., pattern="^[a-z][a-z0-9_]*$")
58
+ """
59
+ Service name for single mode (required if mode=single)
60
+ """
61
+ services: list[Service] | None = Field(None, min_length=1)
62
+ """
63
+ Service names for hub mode (required if mode=hub)
129
64
  """
130
-
131
- mode: Literal["single"] = Field(..., description="Deployment mode")
132
- service: str = Field(
133
- ...,
134
- description="Service name for single mode (required if mode=single)",
135
- pattern="^[a-z][a-z0-9_]*$",
136
- )
137
- services: list[Service] | None = Field(
138
- None,
139
- description="Service names for hub mode (required if mode=hub)",
140
- min_length=1,
141
- )
142
65
 
143
66
 
144
67
  class Deployment1(BaseModel):
145
- """Deployment configuration for hub service mode.
146
-
147
- Configuration for deploying multiple Lumen services as a hub. This class is used
148
- when the deployment mode is set to 'hub', requiring a list of services to be provided.
149
-
150
- Attributes:
151
- mode: Deployment mode, fixed to 'hub' for this class.
152
- service: Service name for single mode (should be None for hub mode).
153
- services: List of services to deploy in hub mode (required).
154
-
155
- Example:
156
- >>> deployment = Deployment1(
157
- ... mode="hub",
158
- ... services=[Service("clip"), Service("face")]
159
- ... )
160
- >>> print(len(deployment.services))
161
- 2
162
- """
163
-
164
- mode: Literal["hub"] = Field(..., description="Deployment mode")
165
- service: str | None = Field(
166
- None,
167
- description="Service name for single mode (required if mode=single)",
168
- pattern="^[a-z][a-z0-9_]*$",
169
- )
170
- services: list[Service] = Field(
171
- ...,
172
- description="Service names for hub mode (required if mode=hub)",
173
- min_length=1,
174
- )
68
+ mode: Literal["hub"]
69
+ """
70
+ Deployment mode
71
+ """
72
+ service: str | None = Field(None, pattern="^[a-z][a-z0-9_]*$")
73
+ """
74
+ Service name for single mode (required if mode=single)
75
+ """
76
+ services: list[Service] = Field(..., min_length=1)
77
+ """
78
+ Service names for hub mode (required if mode=hub)
79
+ """
175
80
 
176
81
 
177
82
  class Mdns(BaseModel):
178
- """mDNS service discovery configuration.
179
-
180
- Configuration for multicast DNS service discovery, allowing Lumen services
181
- to be automatically discoverable on local networks.
182
-
183
- Attributes:
184
- enabled: Whether to enable mDNS service discovery. Defaults to False.
185
- service_name: mDNS service name, required if enabled is True.
186
-
187
- Example:
188
- >>> mdns = Mdns(
189
- ... enabled=True,
190
- ... service_name="lumen-clip"
191
- ... )
192
- >>> print(mdns.enabled)
193
- True
83
+ enabled: bool | None = False
84
+ """
85
+ Enable mDNS service discovery
194
86
  """
195
-
196
- enabled: bool | None = Field(False, description="Enable mDNS service discovery")
197
87
  service_name: str | None = Field(
198
- None,
199
- description="mDNS service name (required if enabled=true)",
200
- examples=["lumen-clip", "lumen-hub"],
201
- pattern="^[a-z][a-z0-9-]*$",
88
+ None, examples=["lumen-clip", "lumen-hub"], pattern="^[a-z][a-z0-9-]*$"
202
89
  )
90
+ """
91
+ mDNS service name (required if enabled=true)
92
+ """
203
93
 
204
94
 
205
95
  class Server(BaseModel):
206
- """gRPC server configuration.
207
-
208
- Configuration for the gRPC server that hosts Lumen services, including
209
- network settings and optional mDNS service discovery.
210
-
211
- Attributes:
212
- port: gRPC server port number (1024-65535).
213
- host: Server bind address. Defaults to "0.0.0.0".
214
- mdns: Optional mDNS configuration for service discovery.
215
-
216
- Example:
217
- >>> server = Server(
218
- ... port=50051,
219
- ... host="127.0.0.1",
220
- ... mdns=Mdns(enabled=True, service_name="lumen-clip")
221
- ... )
222
- >>> print(server.port)
223
- 50051
224
- """
225
-
226
- port: int = Field(..., description="gRPC server port", ge=1024, le=65535)
227
- host: str | None = Field(
228
- "0.0.0.0",
229
- description="Server bind address",
230
- examples=["0.0.0.0", "127.0.0.1", "[::]"],
231
- )
96
+ port: int = Field(..., ge=1024, le=65535)
97
+ """
98
+ gRPC server port
99
+ """
100
+ host: str | None = Field("0.0.0.0", examples=["0.0.0.0", "127.0.0.1", "[::]"])
101
+ """
102
+ Server bind address
103
+ """
232
104
  mdns: Mdns | None = None
233
105
 
234
106
 
235
107
  class Import(BaseModel):
236
- """Service import configuration for dynamic loading.
237
-
238
- Configuration for dynamically importing and registering Lumen services
239
- with the gRPC server. Contains paths to the service registry class
240
- and the gRPC server registration function.
241
-
242
- Attributes:
243
- registry_class: Full dotted path to the service registry class.
244
- add_to_server: Full dotted path to the gRPC add_to_server function.
245
-
246
- Example:
247
- >>> import_config = Import(
248
- ... registry_class="lumen_clip.service_registry.ClipService",
249
- ... add_to_server="lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server"
250
- ... )
251
- >>> print("clip" in import_config.registry_class)
252
- True
253
- """
254
-
255
108
  registry_class: str = Field(
256
109
  ...,
257
- description="Full dotted path to service registry class",
258
110
  examples=[
259
111
  "lumen_clip.service_registry.ClipService",
260
112
  "lumen_face.service_registry.FaceService",
261
113
  ],
262
114
  pattern="^[a-z_][a-z0-9_.]*\\.[A-Z][a-zA-Z0-9]*$",
263
115
  )
116
+ """
117
+ Full dotted path to service registry class
118
+ """
264
119
  add_to_server: str = Field(
265
120
  ...,
266
- description="Full dotted path to gRPC add_to_server function",
267
121
  examples=[
268
122
  "lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server",
269
123
  "lumen_face.proto.ml_service_pb2_grpc.add_FaceServicer_to_server",
270
124
  ],
271
125
  pattern="^[a-z_][a-z0-9_.]*\\.add_[A-Za-z0-9_]+_to_server$",
272
126
  )
127
+ """
128
+ Full dotted path to gRPC add_to_server function
129
+ """
273
130
 
274
131
 
275
132
  class BackendSettings(BaseModel):
276
- """Optional settings for inference backend configuration.
277
-
278
- Configuration for inference backend optimization including device selection,
279
- batch processing, and ONNX runtime provider settings.
280
-
281
- Attributes:
282
- device: Preferred compute device ('cuda', 'mps', 'cpu'). If None, auto-detects.
283
- batch_size: Maximum batch size for inference processing. Defaults to 8.
284
- onnx_providers: List of ONNX execution providers. If None, uses defaults.
285
-
286
- Example:
287
- >>> backend = BackendSettings(
288
- ... device="cuda",
289
- ... batch_size=16,
290
- ... onnx_providers=["CUDAExecutionProvider", "CPUExecutionProvider"]
291
- ... )
292
- >>> print(backend.device)
293
- 'cuda'
133
+ """
134
+ Optional settings for inference backend configuration.
294
135
  """
295
136
 
296
137
  model_config = ConfigDict(
297
138
  extra="forbid",
298
139
  )
299
- device: str | None = Field(
300
- None,
301
- description="Preferred device ('cuda', 'mps', 'cpu'). If null, auto-detects best available.",
302
- )
303
- batch_size: int | None = Field(
304
- 8, description="Maximum batch size for inference.", ge=1
305
- )
306
- onnx_providers: list[str] | None = Field(
307
- None,
308
- description="List of ONNX execution providers. If null, uses ONNX Runtime defaults.",
309
- )
140
+ device: str | None = None
141
+ """
142
+ Preferred device ('cuda', 'mps', 'cpu'). If null, auto-detects best available.
143
+ """
144
+ batch_size: int | None = Field(8, ge=1)
145
+ """
146
+ Maximum batch size for inference.
147
+ """
148
+ onnx_providers: list[str] | None = None
149
+ """
150
+ List of ONNX execution providers. If null, uses ONNX Runtime defaults.
151
+ """
152
+ prefer_fp16: bool | None = True
153
+ """
154
+ Whether to prefer FP16 model files over FP32 when available.
155
+ Improves performance on supported hardware.
156
+ """
310
157
 
311
158
 
312
159
  class Runtime(Enum):
313
- """Model runtime type for inference execution.
314
-
315
- Determines which runtime environment to use for model inference.
316
- Different runtimes have different performance characteristics and
317
- hardware requirements.
318
-
319
- Attributes:
320
- torch: PyTorch runtime for native PyTorch models.
321
- onnx: ONNX runtime for optimized cross-platform inference.
322
- rknn: RKNN runtime for Rockchip NPU acceleration.
323
-
324
- Example:
325
- >>> config = ModelConfig(
326
- ... model="ViT-B-32",
327
- ... runtime=Runtime.torch
328
- ... )
329
- >>> print(config.runtime.value)
330
- 'torch'
160
+ """
161
+ Model runtime type
331
162
  """
332
163
 
333
164
  torch = "torch"
@@ -336,118 +167,48 @@ class Runtime(Enum):
336
167
 
337
168
 
338
169
  class ModelConfig(BaseModel):
339
- """Configuration for a single model within a service.
340
-
341
- Defines the model repository, runtime requirements, and optional settings
342
- for dataset and device-specific configurations.
343
-
344
- Attributes:
345
- model: Model repository name or identifier.
346
- runtime: Runtime environment for model execution.
347
- rknn_device: RKNN device identifier, required if runtime is rknn.
348
- dataset: Dataset name for zero-shot classification tasks.
349
-
350
- Example:
351
- >>> config = ModelConfig(
352
- ... model="ViT-B-32",
353
- ... runtime=Runtime.torch,
354
- ... dataset="ImageNet_1k"
355
- ... )
356
- >>> print(config.model)
357
- 'ViT-B-32'
170
+ model: str = Field(..., examples=["ViT-B-32", "CN-CLIP-ViT-B-16", "MobileCLIP-S2"])
171
+ """
172
+ Model repository name
173
+ """
174
+ runtime: Runtime
175
+ """
176
+ Model runtime type
358
177
  """
359
-
360
- model: str = Field(
361
- ...,
362
- description="Model repository name",
363
- examples=["ViT-B-32", "CN-CLIP-ViT-B-16", "MobileCLIP-S2"],
364
- )
365
- runtime: Runtime = Field(..., description="Model runtime type")
366
178
  rknn_device: str | None = Field(
367
- None,
368
- description="RKNN device identifier (required if runtime=rknn)",
369
- examples=["rk3566", "rk3588"],
370
- pattern="^rk\\d+$",
371
- )
372
- dataset: str | None = Field(
373
- None,
374
- description="Dataset name for zero-shot classification (optional)",
375
- examples=["ImageNet_1k", "TreeOfLife-10M"],
179
+ None, examples=["rk3566", "rk3588"], pattern="^rk\\d+$"
376
180
  )
181
+ """
182
+ RKNN device identifier (required if runtime=rknn)
183
+ """
184
+ dataset: str | None = Field(None, examples=["ImageNet_1k", "TreeOfLife-10M"])
185
+ """
186
+ Dataset name for zero-shot classification (optional)
187
+ """
377
188
 
378
189
 
379
190
  class Services(BaseModel):
380
- """Configuration for a Lumen service.
381
-
382
- Defines a complete service configuration including package information,
383
- import settings, backend optimization, and model configurations.
384
-
385
- Attributes:
386
- enabled: Whether this service should be loaded and started.
387
- package: Python package name containing the service implementation.
388
- import_: Dynamic import configuration for the service.
389
- backend_settings: Optional backend optimization settings.
390
- models: Dictionary of model configurations keyed by alias.
391
-
392
- Example:
393
- >>> service = Services(
394
- ... enabled=True,
395
- ... package="lumen_clip",
396
- ... import_=Import(
397
- ... registry_class="lumen_clip.service_registry.ClipService",
398
- ... add_to_server="lumen_clip.proto.ml_service_pb2_grpc.add_InferenceServicer_to_server"
399
- ... ),
400
- ... models={"default": ModelConfig(model="ViT-B-32", runtime=Runtime.torch)}
401
- ... )
402
- >>> print(service.package)
403
- 'lumen_clip'
404
- """
405
-
406
- enabled: bool = Field(..., description="Whether to load this service")
191
+ enabled: bool
192
+ """
193
+ Whether to load this service
194
+ """
407
195
  package: str = Field(
408
- ...,
409
- description="Python package name",
410
- examples=["lumen_clip", "lumen_face"],
411
- pattern="^[a-z][a-z0-9_]*$",
196
+ ..., examples=["lumen_clip", "lumen_face"], pattern="^[a-z][a-z0-9_]*$"
412
197
  )
198
+ """
199
+ Python package name
200
+ """
413
201
  import_: Import = Field(..., alias="import")
414
202
  backend_settings: BackendSettings | None = None
415
- models: dict[str, ModelConfig] = Field(
416
- ..., description="Model configurations (alias → config)"
417
- )
203
+ models: dict[str, ModelConfig]
204
+ """
205
+ Model configurations (alias → config)
206
+ """
418
207
 
419
208
 
420
209
  class LumenConfig(BaseModel):
421
- """Unified configuration schema for all Lumen ML services.
422
-
423
- Root configuration model that combines metadata, deployment settings,
424
- server configuration, and service definitions into a complete
425
- configuration for Lumen ML services.
426
-
427
- Attributes:
428
- metadata: Configuration metadata including version and region settings.
429
- deployment: Deployment configuration (single or hub mode).
430
- server: gRPC server configuration.
431
- services: Dictionary of service configurations keyed by service name.
432
-
433
- Example:
434
- >>> config = LumenConfig(
435
- ... metadata=Metadata(
436
- ... version="1.0.0",
437
- ... region=Region.other,
438
- ... cache_dir="~/.lumen/models"
439
- ... ),
440
- ... deployment=Deployment(mode="single", service="clip"),
441
- ... server=Server(port=50051),
442
- ... services={"clip": Services(
443
- ... enabled=True,
444
- ... package="lumen_clip",
445
- ... import_=Import(...),
446
- ... models={}
447
- ... )}
448
- ... )
449
- >>> print(config.metadata.version)
450
- '1.0.0'
210
+ """
211
+ Unified configuration schema for all Lumen ML services
451
212
  """
452
213
 
453
214
  model_config = ConfigDict(
@@ -456,4 +217,7 @@ class LumenConfig(BaseModel):
456
217
  metadata: Metadata
457
218
  deployment: Deployment | Deployment1
458
219
  server: Server
459
- services: dict[str, Services] = Field(..., description="Service definitions")
220
+ services: dict[str, Services]
221
+ """
222
+ Service definitions
223
+ """
@@ -12,8 +12,8 @@ import yaml
12
12
  from jsonschema import Draft7Validator
13
13
  from pydantic import ValidationError
14
14
 
15
- from .lumen_config import LumenConfig
16
15
  from .exceptions import ConfigError
16
+ from .lumen_config import LumenConfig
17
17
 
18
18
 
19
19
  class ConfigValidator:
@@ -1,11 +1,11 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: result_schemas
3
- # timestamp: 2025-12-10T15:13:26+00:00
4
-
3
+ # timestamp: 2025-12-12T07:02:32+00:00
5
4
 
6
5
  from .embedding_v1 import EmbeddingV1
7
6
  from .face_v1 import FaceV1
8
7
  from .labels_v1 import LabelsV1
9
8
  from .ocr_v1 import OCRV1
9
+ from .text_generation_v1 import TextGenerationV1
10
10
 
11
- __all__ = ["FaceV1", "EmbeddingV1", "LabelsV1", "OCRV1"]
11
+ __all__ = ["FaceV1", "EmbeddingV1", "LabelsV1", "OCRV1", "TextGenerationV1"]
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: embedding_v1.json
3
- # timestamp: 2025-12-10T15:13:26+00:00
3
+ # timestamp: 2025-12-12T07:02:32+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: face_v1.json
3
- # timestamp: 2025-12-10T15:13:26+00:00
3
+ # timestamp: 2025-12-12T07:02:32+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: labels_v1.json
3
- # timestamp: 2025-12-10T15:13:26+00:00
3
+ # timestamp: 2025-12-12T07:02:32+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: ocr_v1.json
3
- # timestamp: 2025-12-10T15:13:26+00:00
3
+ # timestamp: 2025-12-12T07:02:32+00:00
4
4
 
5
5
  from __future__ import annotations
6
6