cuvis-ai-schemas 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
- """cuvis-ai-schemas: Lightweight schema definitions for the cuvis-ai ecosystem."""
2
-
3
- __version__ = "0.1.0"
4
-
5
- __all__ = ["__version__"]
1
+ """cuvis-ai-schemas: Lightweight schema definitions for the cuvis-ai ecosystem."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ __all__ = ["__version__"]
@@ -1,6 +1,6 @@
1
- """Discovery and metadata schemas."""
2
-
3
- # Discovery schemas are primarily defined in proto
4
- # Python implementations will be added as needed
5
-
6
- __all__: list[str] = []
1
+ """Discovery and metadata schemas."""
2
+
3
+ # Discovery schemas are primarily defined in proto
4
+ # Python implementations will be added as needed
5
+
6
+ __all__: list[str] = []
@@ -1,5 +1,5 @@
1
- """Shared enums across cuvis-ai ecosystem."""
2
-
3
- from cuvis_ai_schemas.enums.types import ArtifactType, ExecutionStage
4
-
5
- __all__ = ["ExecutionStage", "ArtifactType"]
1
+ """Shared enums across cuvis-ai ecosystem."""
2
+
3
+ from cuvis_ai_schemas.enums.types import ArtifactType, ExecutionStage
4
+
5
+ __all__ = ["ExecutionStage", "ArtifactType"]
@@ -1,30 +1,30 @@
1
- """Shared type enums for cuvis-ai ecosystem."""
2
-
3
- from enum import StrEnum
4
-
5
-
6
- class ExecutionStage(StrEnum):
7
- """Execution stages for node filtering.
8
-
9
- Nodes can specify which stages they should execute in to enable
10
- stage-aware graph execution (e.g., loss nodes only in training).
11
- """
12
-
13
- ALWAYS = "always"
14
- TRAIN = "train"
15
- VAL = "val"
16
- VALIDATE = "val"
17
- TEST = "test"
18
- INFERENCE = "inference"
19
-
20
-
21
- class ArtifactType(StrEnum):
22
- """Types of artifacts with different validation/logging policies.
23
-
24
- Attributes
25
- ----------
26
- IMAGE : str
27
- Image artifact - expects shape (H, W, 1) monocular or (H, W, 3) RGB
28
- """
29
-
30
- IMAGE = "image"
1
+ """Shared type enums for cuvis-ai ecosystem."""
2
+
3
+ from enum import StrEnum
4
+
5
+
6
+ class ExecutionStage(StrEnum):
7
+ """Execution stages for node filtering.
8
+
9
+ Nodes can specify which stages they should execute in to enable
10
+ stage-aware graph execution (e.g., loss nodes only in training).
11
+ """
12
+
13
+ ALWAYS = "always"
14
+ TRAIN = "train"
15
+ VAL = "val"
16
+ VALIDATE = "val"
17
+ TEST = "test"
18
+ INFERENCE = "inference"
19
+
20
+
21
+ class ArtifactType(StrEnum):
22
+ """Types of artifacts with different validation/logging policies.
23
+
24
+ Attributes
25
+ ----------
26
+ IMAGE : str
27
+ Image artifact - expects shape (H, W, 1) monocular or (H, W, 3) RGB
28
+ """
29
+
30
+ IMAGE = "image"
@@ -1,12 +1,12 @@
1
- """Execution context schemas."""
2
-
3
- from collections.abc import Iterator
4
- from typing import Any
5
-
6
- from cuvis_ai_schemas.execution.context import Context
7
- from cuvis_ai_schemas.execution.monitoring import Artifact, Metric
8
-
9
- # Type alias for data streaming
10
- InputStream = Iterator[dict[str, Any]]
11
-
12
- __all__ = ["Context", "Artifact", "Metric", "InputStream"]
1
+ """Execution context schemas."""
2
+
3
+ from collections.abc import Iterator
4
+ from typing import Any
5
+
6
+ from cuvis_ai_schemas.execution.context import Context
7
+ from cuvis_ai_schemas.execution.monitoring import Artifact, Metric
8
+
9
+ # Type alias for data streaming
10
+ InputStream = Iterator[dict[str, Any]]
11
+
12
+ __all__ = ["Context", "Artifact", "Metric", "InputStream"]
@@ -1,41 +1,41 @@
1
- """Execution context for pipeline execution."""
2
-
3
- from dataclasses import dataclass
4
-
5
- from cuvis_ai_schemas.enums.types import ExecutionStage
6
-
7
-
8
- @dataclass
9
- class Context:
10
- """Execution context passed to executor and nodes.
11
-
12
- Contains runtime information that doesn't flow through data edges.
13
- This replaces mutable global state with explicit context parameters.
14
-
15
- Attributes
16
- ----------
17
- stage : ExecutionStage
18
- Execution stage: "train", "val", "test", "inference"
19
- epoch : int
20
- Current training epoch
21
- batch_idx : int
22
- Current batch index within epoch
23
- global_step : int
24
- Global training step across all epochs
25
-
26
- Examples
27
- --------
28
- >>> context = Context(stage=ExecutionStage.TRAIN, epoch=5, batch_idx=42, global_step=1337)
29
- >>> executor.forward(context=context, batch=batch)
30
-
31
- Notes
32
- -----
33
- Future extensions for distributed training:
34
- - rank: int (process rank in distributed training)
35
- - world_size: int (total number of processes)
36
- """
37
-
38
- stage: ExecutionStage = ExecutionStage.INFERENCE
39
- epoch: int = 0
40
- batch_idx: int = 0
41
- global_step: int = 0
1
+ """Execution context for pipeline execution."""
2
+
3
+ from dataclasses import dataclass
4
+
5
+ from cuvis_ai_schemas.enums.types import ExecutionStage
6
+
7
+
8
+ @dataclass
9
+ class Context:
10
+ """Execution context passed to executor and nodes.
11
+
12
+ Contains runtime information that doesn't flow through data edges.
13
+ This replaces mutable global state with explicit context parameters.
14
+
15
+ Attributes
16
+ ----------
17
+ stage : ExecutionStage
18
+ Execution stage: "train", "val", "test", "inference"
19
+ epoch : int
20
+ Current training epoch
21
+ batch_idx : int
22
+ Current batch index within epoch
23
+ global_step : int
24
+ Global training step across all epochs
25
+
26
+ Examples
27
+ --------
28
+ >>> context = Context(stage=ExecutionStage.TRAIN, epoch=5, batch_idx=42, global_step=1337)
29
+ >>> executor.forward(context=context, batch=batch)
30
+
31
+ Notes
32
+ -----
33
+ Future extensions for distributed training:
34
+ - rank: int (process rank in distributed training)
35
+ - world_size: int (total number of processes)
36
+ """
37
+
38
+ stage: ExecutionStage = ExecutionStage.INFERENCE
39
+ epoch: int = 0
40
+ batch_idx: int = 0
41
+ global_step: int = 0
@@ -1,83 +1,83 @@
1
- """Monitoring schemas for artifacts and metrics."""
2
-
3
- from dataclasses import dataclass
4
- from typing import TYPE_CHECKING
5
-
6
- from cuvis_ai_schemas.enums.types import ArtifactType, ExecutionStage
7
-
8
- if TYPE_CHECKING:
9
- import numpy as np
10
-
11
-
12
- @dataclass
13
- class Artifact:
14
- """Artifact for logging visualizations and data to monitoring systems.
15
-
16
- Attributes
17
- ----------
18
- name : str
19
- Name/identifier for the artifact
20
- value : np.ndarray
21
- Numpy array containing the artifact data (shape validated by type)
22
- el_id : int
23
- Element ID (e.g., batch item index, image index)
24
- desc : str
25
- Human-readable description of the artifact
26
- type : ArtifactType
27
- Type of artifact, determines validation and logging policy
28
- stage : ExecutionStage
29
- Execution stage when artifact was generated
30
- epoch : int
31
- Training epoch when artifact was generated
32
- batch_idx : int
33
- Batch index when artifact was generated
34
-
35
- Examples
36
- --------
37
- >>> import numpy as np
38
- >>> artifact = Artifact(
39
- ... name="heatmap_img_0",
40
- ... value=np.random.rand(256, 256, 3),
41
- ... el_id=0,
42
- ... desc="Anomaly heatmap for first image",
43
- ... type=ArtifactType.IMAGE
44
- ... )
45
- """
46
-
47
- name: str
48
- value: "np.ndarray"
49
- el_id: int
50
- desc: str
51
- type: ArtifactType
52
- stage: ExecutionStage = ExecutionStage.INFERENCE
53
- epoch: int = 0
54
- batch_idx: int = 0
55
-
56
-
57
- @dataclass
58
- class Metric:
59
- """Metric for logging scalar values to monitoring systems.
60
-
61
- Attributes
62
- ----------
63
- name : str
64
- Name/identifier for the metric
65
- value : float
66
- Scalar metric value
67
- stage : ExecutionStage
68
- Execution stage when metric was recorded
69
- epoch : int
70
- Training epoch when metric was recorded
71
- batch_idx : int
72
- Batch index when metric was recorded
73
-
74
- Examples
75
- --------
76
- >>> metric = Metric(name="loss/train", value=0.123, stage=ExecutionStage.TRAIN)
77
- """
78
-
79
- name: str
80
- value: float
81
- stage: ExecutionStage = ExecutionStage.INFERENCE
82
- epoch: int = 0
83
- batch_idx: int = 0
1
+ """Monitoring schemas for artifacts and metrics."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import TYPE_CHECKING
5
+
6
+ from cuvis_ai_schemas.enums.types import ArtifactType, ExecutionStage
7
+
8
+ if TYPE_CHECKING:
9
+ import numpy as np
10
+
11
+
12
+ @dataclass
13
+ class Artifact:
14
+ """Artifact for logging visualizations and data to monitoring systems.
15
+
16
+ Attributes
17
+ ----------
18
+ name : str
19
+ Name/identifier for the artifact
20
+ value : np.ndarray
21
+ Numpy array containing the artifact data (shape validated by type)
22
+ el_id : int
23
+ Element ID (e.g., batch item index, image index)
24
+ desc : str
25
+ Human-readable description of the artifact
26
+ type : ArtifactType
27
+ Type of artifact, determines validation and logging policy
28
+ stage : ExecutionStage
29
+ Execution stage when artifact was generated
30
+ epoch : int
31
+ Training epoch when artifact was generated
32
+ batch_idx : int
33
+ Batch index when artifact was generated
34
+
35
+ Examples
36
+ --------
37
+ >>> import numpy as np
38
+ >>> artifact = Artifact(
39
+ ... name="heatmap_img_0",
40
+ ... value=np.random.rand(256, 256, 3),
41
+ ... el_id=0,
42
+ ... desc="Anomaly heatmap for first image",
43
+ ... type=ArtifactType.IMAGE
44
+ ... )
45
+ """
46
+
47
+ name: str
48
+ value: "np.ndarray"
49
+ el_id: int
50
+ desc: str
51
+ type: ArtifactType
52
+ stage: ExecutionStage = ExecutionStage.INFERENCE
53
+ epoch: int = 0
54
+ batch_idx: int = 0
55
+
56
+
57
+ @dataclass
58
+ class Metric:
59
+ """Metric for logging scalar values to monitoring systems.
60
+
61
+ Attributes
62
+ ----------
63
+ name : str
64
+ Name/identifier for the metric
65
+ value : float
66
+ Scalar metric value
67
+ stage : ExecutionStage
68
+ Execution stage when metric was recorded
69
+ epoch : int
70
+ Training epoch when metric was recorded
71
+ batch_idx : int
72
+ Batch index when metric was recorded
73
+
74
+ Examples
75
+ --------
76
+ >>> metric = Metric(name="loss/train", value=0.123, stage=ExecutionStage.TRAIN)
77
+ """
78
+
79
+ name: str
80
+ value: float
81
+ stage: ExecutionStage = ExecutionStage.INFERENCE
82
+ epoch: int = 0
83
+ batch_idx: int = 0
@@ -1,3 +1,3 @@
1
- """Optional extensions for specific use cases."""
2
-
3
- __all__: list[str] = []
1
+ """Optional extensions for specific use cases."""
2
+
3
+ __all__: list[str] = []
@@ -1,8 +1,8 @@
1
- """UI-specific extensions for port display."""
2
-
3
- from cuvis_ai_schemas.extensions.ui.port_display import (
4
- DTYPE_COLORS,
5
- PortDisplaySpec,
6
- )
7
-
8
- __all__ = ["PortDisplaySpec", "DTYPE_COLORS"]
1
+ """UI-specific extensions for port display."""
2
+
3
+ from cuvis_ai_schemas.extensions.ui.port_display import (
4
+ DTYPE_COLORS,
5
+ PortDisplaySpec,
6
+ )
7
+
8
+ __all__ = ["PortDisplaySpec", "DTYPE_COLORS"]