lumen-resources 0.3.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: labels_v1.json
3
- # timestamp: 2025-12-10T15:13:26+00:00
3
+ # timestamp: 2025-12-12T07:02:32+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: ocr_v1.json
3
- # timestamp: 2025-12-10T15:13:26+00:00
3
+ # timestamp: 2025-12-12T07:02:32+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -0,0 +1,89 @@
1
+ # generated by datamodel-codegen:
2
+ # filename: text_generation_v1.json
3
+ # timestamp: 2025-12-12T07:02:32+00:00
4
+
5
+ from __future__ import annotations
6
+
7
+ from enum import Enum
8
+
9
+ from pydantic import BaseModel, ConfigDict, Field
10
+
11
+
12
+ class FinishReason(Enum):
13
+ """
14
+ Reason why generation terminated
15
+ """
16
+
17
+ stop = 'stop'
18
+ length = 'length'
19
+ eos_token = 'eos_token'
20
+ stop_sequence = 'stop_sequence'
21
+ error = 'error'
22
+
23
+
24
+ class Metadata(BaseModel):
25
+ """
26
+ Optional metadata about the generation process
27
+ """
28
+
29
+ model_config = ConfigDict(
30
+ extra='forbid',
31
+ )
32
+ temperature: float | None = Field(None, ge=0.0)
33
+ """
34
+ Sampling temperature used for generation
35
+ """
36
+ top_p: float | None = Field(None, ge=0.0, le=1.0)
37
+ """
38
+ Nucleus sampling parameter used for generation
39
+ """
40
+ max_tokens: int | None = Field(None, ge=1)
41
+ """
42
+ Maximum tokens allowed for generation
43
+ """
44
+ seed: int | None = None
45
+ """
46
+ Random seed used for generation (if deterministic)
47
+ """
48
+ generation_time_ms: float | None = Field(None, ge=0.0)
49
+ """
50
+ Time taken to generate the response in milliseconds
51
+ """
52
+ streaming_chunks: int | None = Field(None, ge=0)
53
+ """
54
+ Number of chunks in streaming generation (if applicable)
55
+ """
56
+
57
+
58
+ class TextGenerationV1(BaseModel):
59
+ """
60
+ Universal schema for text generation responses across Lumen VLM services. Returns generated text with metadata about the generation process.
61
+ """
62
+
63
+ model_config = ConfigDict(
64
+ extra='forbid',
65
+ )
66
+ text: str = Field(..., min_length=0)
67
+ """
68
+ Generated text content
69
+ """
70
+ finish_reason: FinishReason
71
+ """
72
+ Reason why generation terminated
73
+ """
74
+ generated_tokens: int = Field(..., ge=0)
75
+ """
76
+ Number of tokens generated in the response
77
+ """
78
+ input_tokens: int | None = Field(None, ge=0)
79
+ """
80
+ Number of tokens in the input prompt
81
+ """
82
+ model_id: str = Field(..., min_length=1)
83
+ """
84
+ Identifier of the model that generated the text
85
+ """
86
+ metadata: Metadata | None = None
87
+ """
88
+ Optional metadata about the generation process
89
+ """
@@ -131,6 +131,9 @@ properties:
131
131
  patternProperties:
132
132
  "^[a-z][a-z0-9_]*$":
133
133
  type: object
134
+ x-pydantic-config:
135
+ validate_by_name: true
136
+ validate_by_alias: true
134
137
  required:
135
138
  - enabled
136
139
  - package
@@ -150,7 +153,7 @@ properties:
150
153
  - "lumen_clip"
151
154
  - "lumen_face"
152
155
 
153
- import:
156
+ import_info:
154
157
  type: object
155
158
  required:
156
159
  - registry_class
@@ -197,11 +200,10 @@ definitions:
197
200
  minimum: 1
198
201
  default: 8
199
202
  onnx_providers:
200
- type: [array, "null"] # Allow array or null
201
- items:
202
- type: string
203
- description: "List of ONNX execution providers. If null, uses ONNX Runtime defaults."
203
+ type: [array, "null"]
204
+ description: "List of ONNX execution providers. Each item can be a string or a tuple of (name, config_dict)."
204
205
  default: null
206
+
205
207
  additionalProperties: false
206
208
  ModelConfig:
207
209
  type: object
@@ -236,6 +238,13 @@ definitions:
236
238
  examples:
237
239
  - "ImageNet_1k"
238
240
  - "TreeOfLife-10M"
241
+ precision:
242
+ type: string
243
+ description: Preferred precision for running the model, valid only when runtime is 'onnx' or 'rknn'. The download validator will check the precision field in model_info.json to verify if the preferred precision is available for the current model.
244
+ examples:
245
+ - "fp32"
246
+ - "int8"
247
+ - "q4fp16"
239
248
 
240
249
  # If runtime=rknn, rknn_device is required
241
250
  if:
@@ -0,0 +1,94 @@
1
+ {
2
+ "$schema": "http://json-schema.org/draft-07/schema#",
3
+ "title": "TextGenerationV1",
4
+ "description": "Universal schema for text generation responses across Lumen VLM services. Returns generated text with metadata about the generation process.",
5
+ "type": "object",
6
+ "properties": {
7
+ "text": {
8
+ "type": "string",
9
+ "minLength": 0,
10
+ "description": "Generated text content"
11
+ },
12
+ "finish_reason": {
13
+ "type": "string",
14
+ "enum": ["stop", "length", "eos_token", "stop_sequence", "error"],
15
+ "description": "Reason why generation terminated"
16
+ },
17
+ "generated_tokens": {
18
+ "type": "integer",
19
+ "minimum": 0,
20
+ "description": "Number of tokens generated in the response"
21
+ },
22
+ "input_tokens": {
23
+ "type": "integer",
24
+ "minimum": 0,
25
+ "description": "Number of tokens in the input prompt"
26
+ },
27
+ "model_id": {
28
+ "type": "string",
29
+ "minLength": 1,
30
+ "description": "Identifier of the model that generated the text"
31
+ },
32
+ "metadata": {
33
+ "type": "object",
34
+ "properties": {
35
+ "temperature": {
36
+ "type": "number",
37
+ "minimum": 0.0,
38
+ "description": "Sampling temperature used for generation"
39
+ },
40
+ "top_p": {
41
+ "type": "number",
42
+ "minimum": 0.0,
43
+ "maximum": 1.0,
44
+ "description": "Nucleus sampling parameter used for generation"
45
+ },
46
+ "max_tokens": {
47
+ "type": "integer",
48
+ "minimum": 1,
49
+ "description": "Maximum tokens allowed for generation"
50
+ },
51
+ "seed": {
52
+ "type": "integer",
53
+ "description": "Random seed used for generation (if deterministic)"
54
+ },
55
+ "generation_time_ms": {
56
+ "type": "number",
57
+ "minimum": 0,
58
+ "description": "Time taken to generate the response in milliseconds"
59
+ },
60
+ "streaming_chunks": {
61
+ "type": "integer",
62
+ "minimum": 0,
63
+ "description": "Number of chunks in streaming generation (if applicable)"
64
+ }
65
+ },
66
+ "additionalProperties": false,
67
+ "description": "Optional metadata about the generation process"
68
+ }
69
+ },
70
+ "required": ["text", "finish_reason", "generated_tokens", "model_id"],
71
+ "additionalProperties": false,
72
+ "examples": [
73
+ {
74
+ "text": "The image shows a beautiful sunset over mountains with vibrant colors.",
75
+ "finish_reason": "stop",
76
+ "generated_tokens": 15,
77
+ "input_tokens": 8,
78
+ "model_id": "fastvlm-2b-onnx",
79
+ "metadata": {
80
+ "temperature": 0.7,
81
+ "top_p": 0.9,
82
+ "max_tokens": 512,
83
+ "generation_time_ms": 245.6
84
+ }
85
+ },
86
+ {
87
+ "text": "A cat is sitting on a windowsill looking outside.",
88
+ "finish_reason": "eos_token",
89
+ "generated_tokens": 9,
90
+ "input_tokens": 12,
91
+ "model_id": "fastvlm-2b-onnx"
92
+ }
93
+ ]
94
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lumen-resources
3
- Version: 0.3.1
3
+ Version: 0.4.0
4
4
  Summary: Unified model resource management for Lumen ML services
5
5
  Author-email: EdwinZhanCN <support@lumilio.org>
6
6
  License: MIT
@@ -0,0 +1,28 @@
1
+ lumen_resources/__init__.py,sha256=EwI0cDEF9eIlWT4eYWwL8MUR8-vaHEzpZ0Kf94Kxn60,2729
2
+ lumen_resources/cli.py,sha256=gGGfe0BmqjZX-2fIEEQ-MgElMn21OrabPs7Fcu6aY6A,14204
3
+ lumen_resources/downloader.py,sha256=Gn0or7qvk3BfmKu7VtKklXxVKuBj2lk9b5Nq7Cll-kY,21589
4
+ lumen_resources/exceptions.py,sha256=cUS76Revda0Vt6rPeUNLI2QhluNyLon1GiF5ZM4Kx30,3148
5
+ lumen_resources/lumen_config.py,sha256=YqJohnwSjL1rUb9UzFv3fwav4PcU6etIJ96y0zi1XqM,6042
6
+ lumen_resources/lumen_config_validator.py,sha256=6GIBwresO1k7yALaNt5EgQZpcxFVq7k1obRQjl4AEEI,9734
7
+ lumen_resources/model_info.py,sha256=O7LRJnC2OJ5uAuic074hzWThruHhVyMg5Kt7eNAdZQ4,2355
8
+ lumen_resources/model_info_validator.py,sha256=91QsUCRuxZT3wCt7xI3wsLVBmEiVXxJUk8d2MOHwNb0,9294
9
+ lumen_resources/platform.py,sha256=EwqjQMoQY2jK0VP1jstgIW_Lu82Cxn9HDxwdCj1EN8w,9824
10
+ lumen_resources/result_schemas/README.md,sha256=jQ_J3WBcZJZbD9kVkbEj6wngDBN_HQ5S-wRiUy_ociI,845
11
+ lumen_resources/result_schemas/__init__.py,sha256=iOxu2V8w5tXrFF5jH-Qn-En7nr2b3ZlcaiswyJCtxCQ,357
12
+ lumen_resources/result_schemas/embedding_v1.py,sha256=XlruWBMKefG7p_f06KS-_iLs8fSwO_wxkfFuBUWgSTk,671
13
+ lumen_resources/result_schemas/face_v1.py,sha256=Vqd-VxzmmPC0tqNtRV9qYx7Xaax5CXxjJhjJ5whiT9w,1314
14
+ lumen_resources/result_schemas/labels_v1.py,sha256=alrU2XKiLSVXGTAfcOZlPbGGKX2s-UrGQ6uYluM-BBY,829
15
+ lumen_resources/result_schemas/ocr_v1.py,sha256=5YFAwBkRJ_dMlyENxosXRYEIyZnDOAYx7JRDVnY7ah4,1259
16
+ lumen_resources/result_schemas/text_generation_v1.py,sha256=DQ6vb5e9tHYzd1Qf-JkDTflzbnyUWOCgleeJnTImt50,2147
17
+ lumen_resources/schemas/config-schema.yaml,sha256=ts_X2ANVcWpDDeCdHYiQA-7cvEIWxPxWIjMVQFB3sHY,8671
18
+ lumen_resources/schemas/model_info-schema.json,sha256=VkARnNvzdFKee_AROQG9AUU5Q9s2P_dt7Nvm9Mj39aU,5837
19
+ lumen_resources/schemas/result_schemas/embedding_v1.json,sha256=6iZaXbmkP0J5fXGD4Gkrt6AZPvpK6FZaQ754sOXxFrc,841
20
+ lumen_resources/schemas/result_schemas/face_v1.json,sha256=XcnHxwP_KR8lAv6s6npjWQxwyYAQTuBLEKrvlOqH84g,1771
21
+ lumen_resources/schemas/result_schemas/labels_v1.json,sha256=AnOiM0VCuIKrLdrbA73pmu4QD4QaHIDNsANeU3BIOeg,1222
22
+ lumen_resources/schemas/result_schemas/ocr_v1.json,sha256=btVUZjR_RW4CLJjIyEGCITPJFJxMqGSM7QX8OyfVguo,1653
23
+ lumen_resources/schemas/result_schemas/text_generation_v1.json,sha256=f_qwzlfKR_zZMZsn9qQ3U1U3t7HkSk9j7XED-q_s2hc,2876
24
+ lumen_resources-0.4.0.dist-info/METADATA,sha256=O4n69Lcqm1VN4NbgxNxsu_cJFrtjY9hYt6i4O1e-Mys,4002
25
+ lumen_resources-0.4.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
26
+ lumen_resources-0.4.0.dist-info/entry_points.txt,sha256=fLCrIB9BxyIDAbJVqDGW4QyvLPlvL53WI-6lkTZ3h2M,61
27
+ lumen_resources-0.4.0.dist-info/top_level.txt,sha256=XgLNoNrF2RIpI2sYIpjLuUREYRVHW13ElHoCnFYHjAQ,16
28
+ lumen_resources-0.4.0.dist-info/RECORD,,
@@ -1,26 +0,0 @@
1
- lumen_resources/__init__.py,sha256=yS4AldwTQlRYfu35DlqBlnoSNXyGw0G0P41AUlZyFac,2687
2
- lumen_resources/cli.py,sha256=gGGfe0BmqjZX-2fIEEQ-MgElMn21OrabPs7Fcu6aY6A,14204
3
- lumen_resources/downloader.py,sha256=nQtYcCfSHdWpCqB9zy6-Ri0Ot7j8Hdz--F4oNNPa04Q,17655
4
- lumen_resources/exceptions.py,sha256=cUS76Revda0Vt6rPeUNLI2QhluNyLon1GiF5ZM4Kx30,3148
5
- lumen_resources/lumen_config.py,sha256=EtWdlWiN6sP8QZxJfw3mZZKkN_9Kh6aRl-EeC4iSpKg,14814
6
- lumen_resources/lumen_config_validator.py,sha256=6ZaCVZViEPSDTfbke37P0bDSNI7hE5K8H17bv1V8KSE,9734
7
- lumen_resources/model_info.py,sha256=O7LRJnC2OJ5uAuic074hzWThruHhVyMg5Kt7eNAdZQ4,2355
8
- lumen_resources/model_info_validator.py,sha256=91QsUCRuxZT3wCt7xI3wsLVBmEiVXxJUk8d2MOHwNb0,9294
9
- lumen_resources/platform.py,sha256=EwqjQMoQY2jK0VP1jstgIW_Lu82Cxn9HDxwdCj1EN8w,9824
10
- lumen_resources/result_schemas/README.md,sha256=nPbihM4RCJxFbBHyUsRYLljyaRjULuMQgW6K1nWYGy0,384
11
- lumen_resources/result_schemas/__init__.py,sha256=7nsrPKQE9WN39-DwTSrir4sZD7IiDlxfuY12TPKbJ8s,289
12
- lumen_resources/result_schemas/embedding_v1.py,sha256=VtU4Uy_tTKDIKy4cKZxH_rp_L6VGDlz-ptUbWBwkebw,671
13
- lumen_resources/result_schemas/face_v1.py,sha256=d-cHhkwvrKw4x7CT_UsihIpDrnxuqnkrnQ38_I3ZfvI,1314
14
- lumen_resources/result_schemas/labels_v1.py,sha256=S8hgrcJs1XlxOibfgeqiXcNv2epdShuN83kuunQEVbw,829
15
- lumen_resources/result_schemas/ocr_v1.py,sha256=XddG9tKp2Nw77NEGmRTYGJFoFbXfLFjoPJFzYuiLtAI,1259
16
- lumen_resources/schemas/config-schema.yaml,sha256=FC4Iz4O-nzAvFXkGMhLkAxy2-efq47wPgcID962Vjx4,8167
17
- lumen_resources/schemas/model_info-schema.json,sha256=VkARnNvzdFKee_AROQG9AUU5Q9s2P_dt7Nvm9Mj39aU,5837
18
- lumen_resources/schemas/result_schemas/embedding_v1.json,sha256=6iZaXbmkP0J5fXGD4Gkrt6AZPvpK6FZaQ754sOXxFrc,841
19
- lumen_resources/schemas/result_schemas/face_v1.json,sha256=XcnHxwP_KR8lAv6s6npjWQxwyYAQTuBLEKrvlOqH84g,1771
20
- lumen_resources/schemas/result_schemas/labels_v1.json,sha256=AnOiM0VCuIKrLdrbA73pmu4QD4QaHIDNsANeU3BIOeg,1222
21
- lumen_resources/schemas/result_schemas/ocr_v1.json,sha256=btVUZjR_RW4CLJjIyEGCITPJFJxMqGSM7QX8OyfVguo,1653
22
- lumen_resources-0.3.1.dist-info/METADATA,sha256=Al6kVXuTCVTb0TxP6_BbcxaeHIEjAVLumZMpsSs0Bi0,4002
23
- lumen_resources-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
24
- lumen_resources-0.3.1.dist-info/entry_points.txt,sha256=fLCrIB9BxyIDAbJVqDGW4QyvLPlvL53WI-6lkTZ3h2M,61
25
- lumen_resources-0.3.1.dist-info/top_level.txt,sha256=XgLNoNrF2RIpI2sYIpjLuUREYRVHW13ElHoCnFYHjAQ,16
26
- lumen_resources-0.3.1.dist-info/RECORD,,