coreason-manifest 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,41 @@
1
+ # Prosperity-3.0
2
+ from .engine import ManifestConfig, ManifestEngine
3
+ from .errors import (
4
+ IntegrityCompromisedError,
5
+ ManifestError,
6
+ ManifestSyntaxError,
7
+ PolicyViolationError,
8
+ )
9
+ from .integrity import IntegrityChecker
10
+ from .loader import ManifestLoader
11
+ from .models import (
12
+ AgentDefinition,
13
+ AgentDependencies,
14
+ AgentInterface,
15
+ AgentMetadata,
16
+ AgentTopology,
17
+ ModelConfig,
18
+ Step,
19
+ )
20
+ from .policy import PolicyEnforcer
21
+ from .validator import SchemaValidator
22
+
23
+ __all__ = [
24
+ "AgentDefinition",
25
+ "AgentDependencies",
26
+ "AgentInterface",
27
+ "AgentMetadata",
28
+ "AgentTopology",
29
+ "IntegrityChecker",
30
+ "IntegrityCompromisedError",
31
+ "ManifestConfig",
32
+ "ManifestEngine",
33
+ "ManifestError",
34
+ "ManifestLoader",
35
+ "ManifestSyntaxError",
36
+ "ModelConfig",
37
+ "PolicyEnforcer",
38
+ "PolicyViolationError",
39
+ "SchemaValidator",
40
+ "Step",
41
+ ]
@@ -0,0 +1,117 @@
1
+ # Prosperity-3.0
2
+ from __future__ import annotations
3
+
4
+ import time
5
+ from dataclasses import dataclass, field
6
+ from pathlib import Path
7
+ from typing import List, Optional, Union
8
+
9
+ from coreason_manifest.integrity import IntegrityChecker
10
+ from coreason_manifest.loader import ManifestLoader
11
+ from coreason_manifest.models import AgentDefinition
12
+ from coreason_manifest.policy import PolicyEnforcer
13
+
14
+ # Import logger from utils to ensure configuration is applied
15
+ from coreason_manifest.utils.logger import logger
16
+ from coreason_manifest.validator import SchemaValidator
17
+
18
+
19
+ @dataclass
20
+ class ManifestConfig:
21
+ """Configuration for the ManifestEngine."""
22
+
23
+ policy_path: Union[str, Path]
24
+ opa_path: str = "opa"
25
+ tbom_path: Optional[Union[str, Path]] = None
26
+ extra_data_paths: List[Union[str, Path]] = field(default_factory=list)
27
+
28
+
29
+ class ManifestEngine:
30
+ """
31
+ The main entry point for verifying and loading Agent Manifests.
32
+ """
33
+
34
+ def __init__(self, config: ManifestConfig) -> None:
35
+ """
36
+ Initialize the ManifestEngine.
37
+
38
+ Args:
39
+ config: Configuration including policy path and OPA path.
40
+ """
41
+ self.config = config
42
+ self.schema_validator = SchemaValidator()
43
+
44
+ # Collect data paths
45
+ data_paths = list(config.extra_data_paths)
46
+ if config.tbom_path:
47
+ data_paths.append(config.tbom_path)
48
+
49
+ self.policy_enforcer = PolicyEnforcer(
50
+ policy_path=config.policy_path,
51
+ opa_path=config.opa_path,
52
+ data_paths=data_paths,
53
+ )
54
+
55
+ def load_and_validate(self, manifest_path: Union[str, Path], source_dir: Union[str, Path]) -> AgentDefinition:
56
+ """
57
+ Loads, validates, and verifies an Agent Manifest.
58
+
59
+ Steps:
60
+ 1. Load raw YAML.
61
+ 2. Validate against JSON Schema.
62
+ 3. Convert to AgentDefinition Pydantic model (Normalization).
63
+ 4. Enforce Policy (Rego).
64
+ 5. Verify Integrity (Hash check).
65
+
66
+ Args:
67
+ manifest_path: Path to the agent.yaml file.
68
+ source_dir: Path to the source code directory.
69
+
70
+ Returns:
71
+ AgentDefinition: The fully validated and verified agent definition.
72
+
73
+ Raises:
74
+ ManifestSyntaxError: If structure or schema is invalid.
75
+ PolicyViolationError: If business rules are violated.
76
+ IntegrityCompromisedError: If source code hash does not match.
77
+ FileNotFoundError: If files are missing.
78
+ """
79
+ manifest_path = Path(manifest_path)
80
+ source_dir = Path(source_dir)
81
+
82
+ logger.info(f"Validating Agent Manifest: {manifest_path}")
83
+
84
+ # 1. Load Raw YAML
85
+ raw_data = ManifestLoader.load_raw_from_file(manifest_path)
86
+
87
+ # 2. Schema Validation
88
+ logger.debug("Running Schema Validation...")
89
+ self.schema_validator.validate(raw_data)
90
+
91
+ # 3. Model Conversion (Normalization)
92
+ logger.debug("Converting to AgentDefinition...")
93
+ agent_def = ManifestLoader.load_from_dict(raw_data)
94
+ logger.info(f"Validating Agent {agent_def.metadata.id} v{agent_def.metadata.version}")
95
+
96
+ # 4. Policy Enforcement
97
+ logger.debug("Enforcing Policies...")
98
+ # We assume policy is checked against the Normalized data (model dumped back to dict)
99
+ # or raw data? Standard practice: Check against normalized data to prevent bypasses.
100
+ # dump mode='json' converts UUIDs/Dates to strings which is what OPA expects usually.
101
+ normalized_data = agent_def.model_dump(mode="json")
102
+ start_time = time.perf_counter()
103
+ try:
104
+ self.policy_enforcer.evaluate(normalized_data)
105
+ duration_ms = (time.perf_counter() - start_time) * 1000
106
+ logger.info(f"Policy Check: Pass - {duration_ms:.2f}ms")
107
+ except Exception:
108
+ duration_ms = (time.perf_counter() - start_time) * 1000
109
+ logger.info(f"Policy Check: Fail - {duration_ms:.2f}ms")
110
+ raise
111
+
112
+ # 5. Integrity Check
113
+ logger.debug("Verifying Integrity...")
114
+ IntegrityChecker.verify(agent_def, source_dir, manifest_path=manifest_path)
115
+
116
+ logger.info("Agent validation successful.")
117
+ return agent_def
@@ -0,0 +1,28 @@
1
+ # Prosperity-3.0
2
+ from __future__ import annotations
3
+
4
+
5
+ class ManifestError(Exception):
6
+ """Base exception for coreason_manifest errors."""
7
+
8
+ pass
9
+
10
+
11
+ class ManifestSyntaxError(ManifestError):
12
+ """Raised when the manifest YAML is invalid or missing required fields."""
13
+
14
+ pass
15
+
16
+
17
+ class PolicyViolationError(ManifestError):
18
+ """Raised when the agent violates a compliance policy."""
19
+
20
+ def __init__(self, message: str, violations: list[str] | None = None) -> None:
21
+ super().__init__(message)
22
+ self.violations = violations or []
23
+
24
+
25
+ class IntegrityCompromisedError(ManifestError):
26
+ """Raised when the source code hash does not match the manifest."""
27
+
28
+ pass
@@ -0,0 +1,136 @@
1
+ # Prosperity-3.0
2
+ from __future__ import annotations
3
+
4
+ import hashlib
5
+ import os
6
+ from pathlib import Path
7
+ from typing import List, Optional, Set, Union
8
+
9
+ from coreason_manifest.errors import IntegrityCompromisedError
10
+ from coreason_manifest.models import AgentDefinition
11
+
12
+
13
+ class IntegrityChecker:
14
+ """
15
+ Component D: IntegrityChecker (The Notary).
16
+
17
+ Responsibility:
18
+ - Calculate the SHA256 hash of the source code directory.
19
+ - Compare it against the integrity_hash defined in the manifest.
20
+ """
21
+
22
+ IGNORED_DIRS = frozenset({".git", "__pycache__", ".venv", ".env", ".DS_Store"})
23
+
24
+ @staticmethod
25
+ def calculate_hash(source_dir: Union[Path, str], exclude_files: Optional[Set[Union[Path, str]]] = None) -> str:
26
+ """
27
+ Calculates a deterministic SHA256 hash of the source code directory.
28
+
29
+ It walks the directory using os.walk to efficiently prune ignored directories.
30
+ Sorts files by relative path, hashes each file, and then hashes the sequence.
31
+
32
+ Ignores hidden directories/files in IGNORED_DIRS.
33
+ Rejects symbolic links for security.
34
+
35
+ Args:
36
+ source_dir: The directory containing source code.
37
+ exclude_files: Optional set of file paths (absolute or relative to CWD) to exclude from hashing.
38
+
39
+ Returns:
40
+ The hex digest of the SHA256 hash.
41
+
42
+ Raises:
43
+ FileNotFoundError: If source_dir does not exist.
44
+ IntegrityCompromisedError: If a symlink is found.
45
+ """
46
+ path_obj = Path(source_dir)
47
+ if path_obj.is_symlink():
48
+ raise IntegrityCompromisedError(f"Symbolic links are forbidden: {path_obj}")
49
+
50
+ source_path = path_obj.resolve()
51
+ if not source_path.exists():
52
+ raise FileNotFoundError(f"Source directory not found: {source_path}")
53
+
54
+ # Normalize excluded files to absolute paths
55
+ excludes = set()
56
+ if exclude_files:
57
+ for ex_path in exclude_files:
58
+ excludes.add(Path(ex_path).resolve())
59
+
60
+ sha256 = hashlib.sha256()
61
+ file_paths: List[Path] = []
62
+
63
+ # Use os.walk for efficient traversal and pruning
64
+ for root, dirs, files in os.walk(source_path, topdown=True):
65
+ root_path = Path(root)
66
+
67
+ # Check for symlinks in directories before pruning
68
+ for d_name in dirs:
69
+ d_path = root_path / d_name
70
+ if d_path.is_symlink():
71
+ raise IntegrityCompromisedError(f"Symbolic links are forbidden: {d_path}") # pragma: no cover
72
+
73
+ # Prune directories efficiently using slice assignment
74
+ dirs[:] = [d for d in dirs if d not in IntegrityChecker.IGNORED_DIRS]
75
+
76
+ # Collect files
77
+ for f_name in files:
78
+ f_path = root_path / f_name
79
+
80
+ if f_path.is_symlink():
81
+ raise IntegrityCompromisedError(f"Symbolic links are forbidden: {f_path}")
82
+
83
+ if f_name in IntegrityChecker.IGNORED_DIRS:
84
+ continue
85
+
86
+ # Use resolved path for exclusion checking and inclusion
87
+ f_path_abs = f_path.resolve()
88
+ if f_path_abs in excludes:
89
+ continue
90
+
91
+ file_paths.append(f_path_abs)
92
+
93
+ # Sort to ensure deterministic order
94
+ # Use as_posix() to ensure ASCII sorting (case-sensitive) on all platforms (Windows vs Linux)
95
+ file_paths.sort(key=lambda p: p.relative_to(source_path).as_posix())
96
+
97
+ for path in file_paths:
98
+ # Update hash with relative path to ensure structure matters
99
+ # Use forward slashes for cross-platform consistency
100
+ rel_path = path.relative_to(source_path).as_posix().encode("utf-8")
101
+ sha256.update(rel_path)
102
+
103
+ # Update hash with file content
104
+ with open(path, "rb") as f:
105
+ while chunk := f.read(8192):
106
+ sha256.update(chunk)
107
+
108
+ return sha256.hexdigest()
109
+
110
+ @staticmethod
111
+ def verify(
112
+ agent_def: AgentDefinition,
113
+ source_dir: Union[Path, str],
114
+ manifest_path: Optional[Union[Path, str]] = None,
115
+ ) -> None:
116
+ """
117
+ Verifies the integrity of the source code against the manifest.
118
+
119
+ Args:
120
+ agent_def: The AgentDefinition containing the expected hash.
121
+ source_dir: The directory containing source code.
122
+ manifest_path: Optional path to the manifest file to exclude from hashing.
123
+
124
+ Raises:
125
+ IntegrityCompromisedError: If the hash does not match or is missing.
126
+ FileNotFoundError: If source_dir does not exist.
127
+ """
128
+ exclude_files = {manifest_path} if manifest_path else None
129
+
130
+ # agent_def.integrity_hash is now required by Pydantic model
131
+ calculated = IntegrityChecker.calculate_hash(source_dir, exclude_files=exclude_files)
132
+
133
+ if calculated != agent_def.integrity_hash:
134
+ raise IntegrityCompromisedError(
135
+ f"Integrity check failed. Expected {agent_def.integrity_hash}, got {calculated}"
136
+ )
@@ -0,0 +1,125 @@
1
+ # Prosperity-3.0
2
+ from __future__ import annotations
3
+
4
+ from pathlib import Path
5
+ from typing import Any, Union
6
+
7
+ import yaml
8
+ from pydantic import ValidationError
9
+
10
+ from coreason_manifest.errors import ManifestSyntaxError
11
+ from coreason_manifest.models import AgentDefinition
12
+
13
+
14
+ class ManifestLoader:
15
+ """
16
+ Component A: ManifestLoader (The Parser).
17
+
18
+ Responsibility:
19
+ - Load YAML safely.
20
+ - Convert raw data into a Pydantic AgentDefinition model.
21
+ - Normalization: Ensure all version strings follow SemVer and all IDs are canonical UUIDs.
22
+ """
23
+
24
+ @staticmethod
25
+ def load_raw_from_file(path: Union[str, Path]) -> dict[str, Any]:
26
+ """
27
+ Loads the raw dict from a YAML file.
28
+
29
+ Args:
30
+ path: The path to the agent.yaml file.
31
+
32
+ Returns:
33
+ dict: The raw dictionary content.
34
+
35
+ Raises:
36
+ ManifestSyntaxError: If YAML is invalid.
37
+ FileNotFoundError: If the file does not exist.
38
+ """
39
+ try:
40
+ path_obj = Path(path)
41
+ if not path_obj.exists():
42
+ raise FileNotFoundError(f"Manifest file not found: {path}")
43
+
44
+ with open(path_obj, "r", encoding="utf-8") as f:
45
+ # safe_load is recommended for untrusted input
46
+ data = yaml.safe_load(f)
47
+
48
+ if not isinstance(data, dict):
49
+ raise ManifestSyntaxError(f"Invalid YAML content in {path}: must be a dictionary.")
50
+
51
+ # Normalization logic is now centralized in load_from_dict
52
+ # But we perform it here too if we return the raw dict, or we delegate.
53
+ # However, standardizing on load_from_dict doing the normalization is cleaner.
54
+ # But for backward compatibility with callers who use raw dict, we keep the logic here too?
55
+ # Or we call a shared helper. For now, we replicate or keep it.
56
+ # Actually, the requirement is "explicitly strip ... before they reach Pydantic models".
57
+ # If load_raw returns data, and then user calls load_from_dict, load_from_dict must do it.
58
+ # If load_raw returns data with 'v', the caller sees 'v'. That's fine for raw.
59
+ # But let's apply it here too for consistency.
60
+
61
+ ManifestLoader._normalize_data(data)
62
+
63
+ return data
64
+
65
+ except yaml.YAMLError as e:
66
+ raise ManifestSyntaxError(f"Failed to parse YAML file {path}: {str(e)}") from e
67
+ except OSError as e:
68
+ if isinstance(e, FileNotFoundError):
69
+ raise
70
+ raise ManifestSyntaxError(f"Error reading file {path}: {str(e)}") from e
71
+
72
+ @staticmethod
73
+ def load_from_file(path: Union[str, Path]) -> AgentDefinition:
74
+ """
75
+ Loads the agent manifest from a YAML file.
76
+
77
+ Args:
78
+ path: The path to the agent.yaml file.
79
+
80
+ Returns:
81
+ AgentDefinition: The validated Pydantic model.
82
+
83
+ Raises:
84
+ ManifestSyntaxError: If YAML is invalid or Pydantic validation fails.
85
+ FileNotFoundError: If the file does not exist.
86
+ """
87
+ data = ManifestLoader.load_raw_from_file(path)
88
+ return ManifestLoader.load_from_dict(data)
89
+
90
+ @staticmethod
91
+ def load_from_dict(data: dict[str, Any]) -> AgentDefinition:
92
+ """
93
+ Converts a dictionary into an AgentDefinition model.
94
+
95
+ Args:
96
+ data: The raw dictionary.
97
+
98
+ Returns:
99
+ AgentDefinition: The validated Pydantic model.
100
+
101
+ Raises:
102
+ ManifestSyntaxError: If Pydantic validation fails.
103
+ """
104
+ try:
105
+ # Ensure normalization happens before Pydantic validation
106
+ # We work on a copy to avoid side effects if possible, but deep copy is expensive.
107
+ # The input 'data' might be modified in place.
108
+ ManifestLoader._normalize_data(data)
109
+
110
+ return AgentDefinition.model_validate(data)
111
+ except ValidationError as e:
112
+ # Convert Pydantic ValidationError to ManifestSyntaxError
113
+ # We assume "normalization" happens via Pydantic validators (e.g. UUID, SemVer checks)
114
+ raise ManifestSyntaxError(f"Manifest validation failed: {str(e)}") from e
115
+
116
+ @staticmethod
117
+ def _normalize_data(data: dict[str, Any]) -> None:
118
+ """
119
+ Normalizes the data dictionary in place.
120
+ Specifically strips 'v' or 'V' from version strings.
121
+ """
122
+ if "metadata" in data and isinstance(data["metadata"], dict):
123
+ version = data["metadata"].get("version")
124
+ if isinstance(version, str) and version.lower().startswith("v"):
125
+ data["metadata"]["version"] = version[1:]
@@ -0,0 +1,16 @@
1
+ # Copyright (c) 2025 CoReason, Inc.
2
+ #
3
+ # This software is proprietary and dual-licensed.
4
+ # Licensed under the Prosperity Public License 3.0 (the "License").
5
+ # A copy of the license is available at https://prosperitylicense.com/versions/3.0.0
6
+ # For details, see the LICENSE file.
7
+ # Commercial use beyond a 30-day trial requires a separate license.
8
+ #
9
+ # Source Code: https://github.com/CoReason-AI/coreason_manifest
10
+
11
+ from coreason_manifest.utils.logger import logger
12
+
13
+
14
+ def hello_world() -> str:
15
+ logger.info("Hello World!")
16
+ return "Hello World!"
@@ -0,0 +1,156 @@
1
+ # Prosperity-3.0
2
+ from __future__ import annotations
3
+
4
+ from datetime import datetime
5
+ from types import MappingProxyType
6
+ from typing import Any, Dict, List, Mapping, Optional, Tuple
7
+ from uuid import UUID
8
+
9
+ from pydantic import (
10
+ AfterValidator,
11
+ AnyUrl,
12
+ BaseModel,
13
+ ConfigDict,
14
+ Field,
15
+ PlainSerializer,
16
+ field_validator,
17
+ )
18
+ from typing_extensions import Annotated
19
+
20
+ # SemVer Regex pattern (simplified for standard SemVer)
21
+ # Modified to accept optional 'v' or 'V' prefix (multiple allowed) for input normalization
22
+ SEMVER_REGEX = (
23
+ r"^[vV]*(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)"
24
+ r"(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?"
25
+ r"(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"
26
+ )
27
+
28
+
29
+ def normalize_version(v: str) -> str:
30
+ """Normalize version string by recursively stripping 'v' or 'V' prefix."""
31
+ while v.lower().startswith("v"):
32
+ v = v[1:]
33
+ return v
34
+
35
+
36
+ # Annotated type that validates SemVer regex (allowing multiple v) then normalizes to strict SemVer (no v)
37
+ VersionStr = Annotated[
38
+ str,
39
+ Field(pattern=SEMVER_REGEX),
40
+ AfterValidator(normalize_version),
41
+ ]
42
+
43
+ # Reusable immutable dictionary type
44
+ ImmutableDict = Annotated[
45
+ Mapping[str, Any],
46
+ AfterValidator(lambda x: MappingProxyType(x)),
47
+ PlainSerializer(lambda x: dict(x), return_type=Dict[str, Any]),
48
+ ]
49
+
50
+
51
+ # Strict URI type that serializes to string
52
+ StrictUri = Annotated[
53
+ AnyUrl,
54
+ PlainSerializer(lambda x: str(x), return_type=str),
55
+ ]
56
+
57
+
58
+ class AgentMetadata(BaseModel):
59
+ """Metadata for the Agent."""
60
+
61
+ model_config = ConfigDict(extra="forbid", frozen=True)
62
+
63
+ id: UUID = Field(..., description="Unique Identifier for the Agent (UUID).")
64
+ version: VersionStr = Field(..., description="Semantic Version of the Agent.")
65
+ name: str = Field(..., min_length=1, description="Name of the Agent.")
66
+ author: str = Field(..., min_length=1, description="Author of the Agent.")
67
+ created_at: datetime = Field(..., description="Creation timestamp (ISO 8601).")
68
+
69
+
70
+ class AgentInterface(BaseModel):
71
+ """Interface definition for the Agent."""
72
+
73
+ model_config = ConfigDict(extra="forbid", frozen=True)
74
+
75
+ inputs: ImmutableDict = Field(..., description="Typed arguments the agent accepts (JSON Schema).")
76
+ outputs: ImmutableDict = Field(..., description="Typed structure of the result.")
77
+
78
+
79
+ class Step(BaseModel):
80
+ """A single step in the execution graph."""
81
+
82
+ model_config = ConfigDict(extra="forbid", frozen=True)
83
+
84
+ id: str = Field(..., min_length=1, description="Unique identifier for the step.")
85
+ description: Optional[str] = Field(None, description="Description of the step.")
86
+
87
+
88
+ class ModelConfig(BaseModel):
89
+ """LLM Configuration parameters."""
90
+
91
+ model_config = ConfigDict(extra="forbid", frozen=True)
92
+
93
+ model: str = Field(..., description="The LLM model identifier.")
94
+ temperature: float = Field(..., ge=0.0, le=2.0, description="Temperature for generation.")
95
+
96
+
97
+ class AgentTopology(BaseModel):
98
+ """Topology of the Agent execution."""
99
+
100
+ model_config = ConfigDict(extra="forbid", frozen=True)
101
+
102
+ steps: Tuple[Step, ...] = Field(..., description="A directed acyclic graph (DAG) of execution steps.")
103
+ llm_config: ModelConfig = Field(..., alias="model_config", description="Specific LLM parameters.")
104
+
105
+ @field_validator("steps")
106
+ @classmethod
107
+ def validate_unique_step_ids(cls, v: Tuple[Step, ...]) -> Tuple[Step, ...]:
108
+ """Ensure all step IDs are unique."""
109
+ ids = [step.id for step in v]
110
+ if len(ids) != len(set(ids)):
111
+ # Find duplicates
112
+ seen = set()
113
+ dupes = set()
114
+ for x in ids:
115
+ if x in seen:
116
+ dupes.add(x)
117
+ seen.add(x)
118
+ raise ValueError(f"Duplicate step IDs found: {', '.join(dupes)}")
119
+ return v
120
+
121
+
122
+ class AgentDependencies(BaseModel):
123
+ """External dependencies for the Agent."""
124
+
125
+ model_config = ConfigDict(extra="forbid", frozen=True)
126
+
127
+ # Use AnyUrl to enforce strictly valid URIs
128
+ # Changed to List[StrictUri] to strictly enforce valid URI formatting and string serialization
129
+ tools: List[StrictUri] = Field(default_factory=list, description="List of MCP capability URIs required.")
130
+ libraries: Tuple[str, ...] = Field(
131
+ default_factory=tuple, description="List of Python packages required (if code execution is allowed)."
132
+ )
133
+
134
+
135
+ class AgentDefinition(BaseModel):
136
+ """The Root Object for the CoReason Agent Manifest."""
137
+
138
+ model_config = ConfigDict(
139
+ extra="forbid",
140
+ frozen=True,
141
+ title="CoReason Agent Manifest",
142
+ json_schema_extra={
143
+ "$id": "https://coreason.ai/schemas/agent.schema.json",
144
+ "description": "The definitive source of truth for CoReason Agent definitions.",
145
+ },
146
+ )
147
+
148
+ metadata: AgentMetadata
149
+ interface: AgentInterface
150
+ topology: AgentTopology
151
+ dependencies: AgentDependencies
152
+ integrity_hash: str = Field(
153
+ ...,
154
+ pattern=r"^[a-fA-F0-9]{64}$",
155
+ description="SHA256 hash of the source code.",
156
+ )