lumen-resources 0.2.1__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {lumen_resources-0.2.1/src/lumen_resources.egg-info → lumen_resources-0.3.1}/PKG-INFO +1 -1
  2. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/__init__.py +6 -10
  3. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/downloader.py +20 -2
  4. lumen_resources-0.3.1/src/lumen_resources/model_info.py +101 -0
  5. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/result_schemas/__init__.py +3 -6
  6. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/result_schemas/embedding_v1.py +1 -1
  7. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/result_schemas/face_v1.py +1 -1
  8. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/result_schemas/labels_v1.py +1 -1
  9. lumen_resources-0.3.1/src/lumen_resources/result_schemas/ocr_v1.py +54 -0
  10. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/schemas/model_info-schema.json +5 -1
  11. lumen_resources-0.3.1/src/lumen_resources/schemas/result_schemas/ocr_v1.json +55 -0
  12. {lumen_resources-0.2.1 → lumen_resources-0.3.1/src/lumen_resources.egg-info}/PKG-INFO +1 -1
  13. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources.egg-info/SOURCES.txt +3 -1
  14. lumen_resources-0.2.1/src/lumen_resources/model_info.py +0 -233
  15. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/.gitignore +0 -0
  16. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/README.md +0 -0
  17. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/docs/examples/clip_torch_cn.yaml +0 -0
  18. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/docs/examples/hub-service.yaml +0 -0
  19. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/docs/examples/model_info_template.json +0 -0
  20. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/docs/examples/single-service.yaml +0 -0
  21. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/pyproject.toml +0 -0
  22. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/setup.cfg +0 -0
  23. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/cli.py +0 -0
  24. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/exceptions.py +0 -0
  25. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/lumen_config.py +0 -0
  26. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/lumen_config_validator.py +0 -0
  27. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/model_info_validator.py +0 -0
  28. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/platform.py +0 -0
  29. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/result_schemas/README.md +0 -0
  30. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/schemas/config-schema.yaml +0 -0
  31. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/schemas/result_schemas/embedding_v1.json +0 -0
  32. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/schemas/result_schemas/face_v1.json +0 -0
  33. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources/schemas/result_schemas/labels_v1.json +0 -0
  34. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources.egg-info/dependency_links.txt +0 -0
  35. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources.egg-info/entry_points.txt +0 -0
  36. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources.egg-info/requires.txt +0 -0
  37. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/src/lumen_resources.egg-info/top_level.txt +0 -0
  38. {lumen_resources-0.2.1 → lumen_resources-0.3.1}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lumen-resources
3
- Version: 0.2.1
3
+ Version: 0.3.1
4
4
  Summary: Unified model resource management for Lumen ML services
5
5
  Author-email: EdwinZhanCN <support@lumilio.org>
6
6
  License: MIT
@@ -38,25 +38,20 @@ The package follows a layered architecture:
38
38
  - CLI layer: User-friendly command-line interface
39
39
  """
40
40
 
41
- from .lumen_config import LumenConfig, Runtime, Region
42
41
  from .downloader import Downloader, DownloadResult
43
42
  from .exceptions import (
44
- ResourceError,
45
43
  ConfigError,
46
44
  DownloadError,
45
+ ModelInfoError,
47
46
  PlatformUnavailableError,
47
+ ResourceError,
48
48
  ValidationError,
49
- ModelInfoError,
50
49
  )
50
+ from .lumen_config import LumenConfig, Region, Runtime
51
51
  from .lumen_config_validator import load_and_validate_config
52
-
53
- from .model_info import ModelInfo, Source, Runtimes, Metadata
52
+ from .model_info import Metadata, ModelInfo, Runtimes, Source
54
53
  from .model_info_validator import load_and_validate_model_info
55
- from .result_schemas import (
56
- EmbeddingV1,
57
- FaceV1,
58
- LabelsV1
59
- )
54
+ from .result_schemas import OCRV1, EmbeddingV1, FaceV1, LabelsV1
60
55
 
61
56
  __version__ = "0.1.0"
62
57
 
@@ -76,6 +71,7 @@ __all__ = [
76
71
  "FaceV1",
77
72
  "EmbeddingV1",
78
73
  "LabelsV1",
74
+ "OCRV1",
79
75
  # Downloader
80
76
  "Downloader",
81
77
  "DownloadResult",
@@ -183,6 +183,7 @@ class Downloader:
183
183
  patterns = [
184
184
  "model_info.json",
185
185
  "*config*",
186
+ "*.txt",
186
187
  ] # Always include model_info.json and config files.
187
188
 
188
189
  if runtime == Runtime.torch:
@@ -200,9 +201,26 @@ class Downloader:
200
201
  ]
201
202
  )
202
203
  elif runtime == Runtime.onnx:
203
- patterns.extend(["*.onnx", "*.ort"])
204
+ patterns.extend(
205
+ [
206
+ "*.onnx",
207
+ "*.ort",
208
+ "*vocab*",
209
+ "*tokenizer*",
210
+ "special_tokens_map.json",
211
+ "preprocessor_config.json",
212
+ ]
213
+ )
204
214
  elif runtime == Runtime.rknn:
205
- patterns.extend(["*.rknn"])
215
+ patterns.extend(
216
+ [
217
+ "*.rknn",
218
+ "*vocab*",
219
+ "*tokenizer*",
220
+ "special_tokens_map.json",
221
+ "preprocessor_config.json",
222
+ ]
223
+ )
206
224
 
207
225
  return patterns
208
226
 
@@ -0,0 +1,101 @@
1
+ # generated by datamodel-codegen:
2
+ # filename: model_info-schema.json
3
+ # timestamp: 2025-12-11T07:52:17+00:00
4
+
5
+ from __future__ import annotations
6
+
7
+ from datetime import date
8
+ from enum import Enum
9
+ from typing import Any
10
+
11
+ from pydantic import AwareDatetime, BaseModel, ConfigDict, Field
12
+
13
+
14
+ class Format(Enum):
15
+ huggingface = 'huggingface'
16
+ openclip = 'openclip'
17
+ modelscope = 'modelscope'
18
+ custom = 'custom'
19
+
20
+
21
+ class Source(BaseModel):
22
+ model_config = ConfigDict(
23
+ extra='forbid',
24
+ )
25
+ format: Format
26
+ repo_id: str = Field(..., min_length=1)
27
+ """
28
+ Repository identifier for model source
29
+ """
30
+
31
+
32
+ class Requirements(BaseModel):
33
+ python: str | None = None
34
+ dependencies: list[str] | None = None
35
+
36
+
37
+ class Runtimes(BaseModel):
38
+ model_config = ConfigDict(
39
+ extra='forbid',
40
+ )
41
+ available: bool
42
+ files: list[str] | dict[str, list[str]] | None = None
43
+ devices: list[str] | None = None
44
+ requirements: Requirements | None = None
45
+
46
+
47
+ class Datasets(BaseModel):
48
+ model_config = ConfigDict(
49
+ extra='forbid',
50
+ )
51
+ labels: str
52
+ embeddings: str
53
+
54
+
55
+ class Metadata(BaseModel):
56
+ model_config = ConfigDict(
57
+ extra='forbid',
58
+ )
59
+ license: str | None = None
60
+ author: str | None = None
61
+ created_at: date | None = None
62
+ updated_at: AwareDatetime | None = None
63
+ tags: list[str] | None = None
64
+
65
+
66
+ class ModelInfo(BaseModel):
67
+ """
68
+ Schema for Lumen AI model configuration files
69
+ """
70
+
71
+ model_config = ConfigDict(
72
+ extra='forbid',
73
+ )
74
+ name: str = Field(..., max_length=100, min_length=1)
75
+ """
76
+ Model name identifier, this is also openclip model identifier if openclip is set as source format
77
+ """
78
+ version: str = Field(..., pattern='^[0-9]+\\.[0-9]+\\.[0-9]+$')
79
+ """
80
+ Model version
81
+ """
82
+ description: str = Field(..., max_length=500, min_length=1)
83
+ """
84
+ Model description and purpose
85
+ """
86
+ model_type: str
87
+ """
88
+ Type of the model
89
+ """
90
+ embedding_dim: int | None = Field(None, ge=1, le=100000)
91
+ """
92
+ Dimension of the embedding space
93
+ """
94
+ source: Source
95
+ runtimes: dict[str, Runtimes]
96
+ datasets: dict[str, Datasets] | None = None
97
+ extra_metadata: dict[str, Any] | None = None
98
+ """
99
+ Additional model-specific configuration and metadata
100
+ """
101
+ metadata: Metadata | None = None
@@ -1,14 +1,11 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: result_schemas
3
- # timestamp: 2025-11-28T17:04:43+00:00
3
+ # timestamp: 2025-12-10T15:13:26+00:00
4
4
 
5
5
 
6
6
  from .embedding_v1 import EmbeddingV1
7
7
  from .face_v1 import FaceV1
8
8
  from .labels_v1 import LabelsV1
9
+ from .ocr_v1 import OCRV1
9
10
 
10
- __all__ = [
11
- "FaceV1",
12
- "EmbeddingV1",
13
- "LabelsV1",
14
- ]
11
+ __all__ = ["FaceV1", "EmbeddingV1", "LabelsV1", "OCRV1"]
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: embedding_v1.json
3
- # timestamp: 2025-11-28T17:04:43+00:00
3
+ # timestamp: 2025-12-10T15:13:26+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: face_v1.json
3
- # timestamp: 2025-11-28T17:04:43+00:00
3
+ # timestamp: 2025-12-10T15:13:26+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -1,6 +1,6 @@
1
1
  # generated by datamodel-codegen:
2
2
  # filename: labels_v1.json
3
- # timestamp: 2025-11-28T17:04:43+00:00
3
+ # timestamp: 2025-12-10T15:13:26+00:00
4
4
 
5
5
  from __future__ import annotations
6
6
 
@@ -0,0 +1,54 @@
1
+ # generated by datamodel-codegen:
2
+ # filename: ocr_v1.json
3
+ # timestamp: 2025-12-10T15:13:26+00:00
4
+
5
+ from __future__ import annotations
6
+
7
+ from pydantic import BaseModel, ConfigDict, Field, RootModel
8
+
9
+
10
+ class BoxItem(RootModel[list[int]]):
11
+ root: list[int] = Field(..., max_length=2, min_length=2)
12
+ """
13
+ Point coordinates [x, y]
14
+ """
15
+
16
+
17
+ class Item(BaseModel):
18
+ model_config = ConfigDict(
19
+ extra='forbid',
20
+ )
21
+ box: list[BoxItem] = Field(..., min_length=3)
22
+ """
23
+ Polygon coordinates defining the text region (usually 4 points for rotated rectangle: TL, TR, BR, BL)
24
+ """
25
+ text: str
26
+ """
27
+ Recognized text content
28
+ """
29
+ confidence: float = Field(..., ge=0.0, le=1.0)
30
+ """
31
+ Recognition confidence score
32
+ """
33
+
34
+
35
+ class OCRV1(BaseModel):
36
+ """
37
+ Universal schema for OCR text detection and recognition responses across Lumen services
38
+ """
39
+
40
+ model_config = ConfigDict(
41
+ extra='forbid',
42
+ )
43
+ items: list[Item]
44
+ """
45
+ Detected text regions with content and metadata
46
+ """
47
+ count: int = Field(..., ge=0)
48
+ """
49
+ Number of detected text regions
50
+ """
51
+ model_id: str = Field(..., min_length=1)
52
+ """
53
+ Model identifier (combined detection and recognition models)
54
+ """
@@ -9,7 +9,6 @@
9
9
  "version",
10
10
  "description",
11
11
  "model_type",
12
- "embedding_dim",
13
12
  "source",
14
13
  "runtimes"
15
14
  ],
@@ -135,6 +134,11 @@
135
134
  },
136
135
  "additionalProperties": false
137
136
  },
137
+ "extra_metadata": {
138
+ "type": "object",
139
+ "description": "Additional model-specific configuration and metadata",
140
+ "additionalProperties": true
141
+ },
138
142
  "metadata": {
139
143
  "type": "object",
140
144
  "properties": {
@@ -0,0 +1,55 @@
1
+ {
2
+ "$schema": "http://json-schema.org/draft-07/schema#",
3
+ "title": "OCRV1",
4
+ "description": "Universal schema for OCR text detection and recognition responses across Lumen services",
5
+ "type": "object",
6
+ "properties": {
7
+ "items": {
8
+ "type": "array",
9
+ "items": {
10
+ "type": "object",
11
+ "properties": {
12
+ "box": {
13
+ "type": "array",
14
+ "items": {
15
+ "type": "array",
16
+ "items": {
17
+ "type": "integer"
18
+ },
19
+ "minItems": 2,
20
+ "maxItems": 2,
21
+ "description": "Point coordinates [x, y]"
22
+ },
23
+ "minItems": 3,
24
+ "description": "Polygon coordinates defining the text region (usually 4 points for rotated rectangle: TL, TR, BR, BL)"
25
+ },
26
+ "text": {
27
+ "type": "string",
28
+ "description": "Recognized text content"
29
+ },
30
+ "confidence": {
31
+ "type": "number",
32
+ "minimum": 0.0,
33
+ "maximum": 1.0,
34
+ "description": "Recognition confidence score"
35
+ }
36
+ },
37
+ "required": ["box", "text", "confidence"],
38
+ "additionalProperties": false
39
+ },
40
+ "description": "Detected text regions with content and metadata"
41
+ },
42
+ "count": {
43
+ "type": "integer",
44
+ "minimum": 0,
45
+ "description": "Number of detected text regions"
46
+ },
47
+ "model_id": {
48
+ "type": "string",
49
+ "minLength": 1,
50
+ "description": "Model identifier (combined detection and recognition models)"
51
+ }
52
+ },
53
+ "required": ["items", "count", "model_id"],
54
+ "additionalProperties": false
55
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lumen-resources
3
- Version: 0.2.1
3
+ Version: 0.3.1
4
4
  Summary: Unified model resource management for Lumen ML services
5
5
  Author-email: EdwinZhanCN <support@lumilio.org>
6
6
  License: MIT
@@ -26,8 +26,10 @@ src/lumen_resources/result_schemas/__init__.py
26
26
  src/lumen_resources/result_schemas/embedding_v1.py
27
27
  src/lumen_resources/result_schemas/face_v1.py
28
28
  src/lumen_resources/result_schemas/labels_v1.py
29
+ src/lumen_resources/result_schemas/ocr_v1.py
29
30
  src/lumen_resources/schemas/config-schema.yaml
30
31
  src/lumen_resources/schemas/model_info-schema.json
31
32
  src/lumen_resources/schemas/result_schemas/embedding_v1.json
32
33
  src/lumen_resources/schemas/result_schemas/face_v1.json
33
- src/lumen_resources/schemas/result_schemas/labels_v1.json
34
+ src/lumen_resources/schemas/result_schemas/labels_v1.json
35
+ src/lumen_resources/schemas/result_schemas/ocr_v1.json
@@ -1,233 +0,0 @@
1
- # generated by datamodel-codegen:
2
- # filename: model_info-schema.json
3
- # timestamp: 2025-10-19T06:58:43+00:00
4
-
5
- from __future__ import annotations
6
-
7
- from datetime import date
8
- from enum import Enum
9
-
10
- from pydantic import AwareDatetime, BaseModel, ConfigDict, Field
11
-
12
-
13
- class Format(Enum):
14
- """Model source format type.
15
-
16
- Defines the format and source platform for a model. Different formats
17
- have different loading mechanisms and repository structures.
18
-
19
- Attributes:
20
- huggingface: Hugging Face Hub model format.
21
- openclip: OpenCLIP model format.
22
- modelscope: ModelScope model format.
23
- custom: Custom model format.
24
-
25
- Example:
26
- >>> source = Source(format=Format.huggingface, repo_id="openai/clip-vit-base-patch32")
27
- >>> print(source.format.value)
28
- 'huggingface'
29
- """
30
-
31
- huggingface = "huggingface"
32
- openclip = "openclip"
33
- modelscope = "modelscope"
34
- custom = "custom"
35
-
36
-
37
- class Source(BaseModel):
38
- """Model source information.
39
-
40
- Contains information about where and how to obtain the model, including
41
- the format type and repository identifier.
42
-
43
- Attributes:
44
- format: Model format type (huggingface, openclip, modelscope, custom).
45
- repo_id: Repository identifier for the model source.
46
-
47
- Example:
48
- >>> source = Source(
49
- ... format=Format.huggingface,
50
- ... repo_id="openai/clip-vit-base-patch32"
51
- ... )
52
- >>> print(source.repo_id)
53
- 'openai/clip-vit-base-patch32'
54
- """
55
-
56
- model_config = ConfigDict(
57
- extra="forbid",
58
- )
59
- format: Format
60
- repo_id: str = Field(
61
- ..., description="Repository identifier for model source", min_length=1
62
- )
63
-
64
-
65
- class Requirements(BaseModel):
66
- """Python environment requirements for model runtime.
67
-
68
- Specifies the Python version and package dependencies required to run
69
- the model in a specific runtime configuration.
70
-
71
- Attributes:
72
- python: Minimum Python version requirement.
73
- dependencies: List of required Python package dependencies.
74
-
75
- Example:
76
- >>> req = Requirements(
77
- ... python=">=3.8",
78
- ... dependencies=["torch", "transformers", "pillow"]
79
- ... )
80
- >>> print(req.python)
81
- '>=3.8'
82
- """
83
-
84
- python: str | None = None
85
- dependencies: list[str] | None = None
86
-
87
-
88
- class Runtimes(BaseModel):
89
- """Runtime configuration for a specific model execution environment.
90
-
91
- Defines the availability, file requirements, device compatibility, and
92
- dependencies for a model runtime (e.g., torch, onnx, rknn).
93
-
94
- Attributes:
95
- available: Whether this runtime is available for the model.
96
- files: List of required files or dict mapping runtime to file lists.
97
- devices: List of compatible devices for this runtime.
98
- requirements: Python environment requirements for this runtime.
99
-
100
- Example:
101
- >>> runtime = Runtimes(
102
- ... available=True,
103
- ... files=["model.pt", "config.json"],
104
- ... devices=["cuda", "cpu"],
105
- ... requirements=Requirements(python=">=3.8", dependencies=["torch"])
106
- ... )
107
- >>> print(runtime.available)
108
- True
109
- """
110
-
111
- model_config = ConfigDict(
112
- extra="forbid",
113
- )
114
- available: bool
115
- files: list[str] | dict[str, list[str]] | None = None
116
- devices: list[str] | None = None
117
- requirements: Requirements | None = None
118
-
119
-
120
- class Datasets(BaseModel):
121
- """Dataset configuration for model evaluation and inference.
122
-
123
- Defines the label and embedding datasets used for zero-shot classification
124
- or other dataset-specific model operations.
125
-
126
- Attributes:
127
- labels: Dataset identifier for class labels.
128
- embeddings: Dataset identifier for embeddings.
129
-
130
- Example:
131
- >>> dataset = Datasets(
132
- ... labels="imagenet1k_labels",
133
- ... embeddings="imagenet1k_embeddings"
134
- ... )
135
- >>> print(dataset.labels)
136
- 'imagenet1k_labels'
137
- """
138
-
139
- model_config = ConfigDict(
140
- extra="forbid",
141
- )
142
- labels: str
143
- embeddings: str
144
-
145
-
146
- class Metadata(BaseModel):
147
- """Model metadata information.
148
-
149
- Contains descriptive metadata about the model including licensing,
150
- authorship, creation dates, and categorization tags.
151
-
152
- Attributes:
153
- license: License identifier for the model.
154
- author: Model author or organization.
155
- created_at: Model creation date.
156
- updated_at: Last model update timestamp.
157
- tags: List of descriptive tags for categorization.
158
-
159
- Example:
160
- >>> metadata = Metadata(
161
- ... license="MIT",
162
- ... author="OpenAI",
163
- ... tags=["computer-vision", "multimodal", "clip"]
164
- ... )
165
- >>> print(metadata.license)
166
- 'MIT'
167
- """
168
-
169
- model_config = ConfigDict(
170
- extra="forbid",
171
- )
172
- license: str | None = None
173
- author: str | None = None
174
- created_at: date | None = None
175
- updated_at: AwareDatetime | None = None
176
- tags: list[str] | None = None
177
-
178
-
179
- class ModelInfo(BaseModel):
180
- """Schema for Lumen AI model configuration files.
181
-
182
- Complete model definition including source information, runtime configurations,
183
- dataset compatibility, and metadata. This is the top-level schema for
184
- model_info.json files.
185
-
186
- Attributes:
187
- name: Model name identifier, also OpenCLIP model identifier if applicable.
188
- version: Model version following semantic versioning (X.Y.Z).
189
- description: Model description and purpose.
190
- model_type: Type/category of the model.
191
- embedding_dim: Dimension of the model's embedding space.
192
- source: Model source information including format and repository.
193
- runtimes: Dictionary mapping runtime names to runtime configurations.
194
- datasets: Optional dataset configurations for model evaluation.
195
- metadata: Optional model metadata including license and author.
196
-
197
- Example:
198
- >>> model_info = ModelInfo(
199
- ... name="ViT-B-32",
200
- ... version="1.0.0",
201
- ... description="Vision Transformer for CLIP",
202
- ... model_type="vision-transformer",
203
- ... embedding_dim=512,
204
- ... source=Source(format=Format.huggingface, repo_id="openai/clip-vit-base-patch32"),
205
- ... runtimes={"torch": Runtimes(available=True)}
206
- ... )
207
- >>> print(model_info.name)
208
- 'ViT-B-32'
209
- """
210
-
211
- model_config = ConfigDict(
212
- extra="forbid",
213
- )
214
- name: str = Field(
215
- ...,
216
- description="Model name identifier, this is also openclip model identifier if openclip is set as source format",
217
- max_length=100,
218
- min_length=1,
219
- )
220
- version: str = Field(
221
- ..., description="Model version", pattern="^[0-9]+\\.[0-9]+\\.[0-9]+$"
222
- )
223
- description: str = Field(
224
- ..., description="Model description and purpose", max_length=500, min_length=1
225
- )
226
- model_type: str = Field(..., description="Type of the model")
227
- embedding_dim: int = Field(
228
- ..., description="Dimension of the embedding space", ge=1, le=100000
229
- )
230
- source: Source
231
- runtimes: dict[str, Runtimes]
232
- datasets: dict[str, Datasets] | None = None
233
- metadata: Metadata | None = None
File without changes