xenfra-sdk 0.2.5__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra_sdk/__init__.py +46 -2
- xenfra_sdk/blueprints/base.py +150 -0
- xenfra_sdk/blueprints/factory.py +99 -0
- xenfra_sdk/blueprints/node.py +219 -0
- xenfra_sdk/blueprints/python.py +57 -0
- xenfra_sdk/blueprints/railpack.py +99 -0
- xenfra_sdk/blueprints/schema.py +70 -0
- xenfra_sdk/cli/main.py +175 -49
- xenfra_sdk/client.py +6 -2
- xenfra_sdk/constants.py +26 -0
- xenfra_sdk/db/session.py +8 -3
- xenfra_sdk/detection.py +262 -191
- xenfra_sdk/dockerizer.py +76 -120
- xenfra_sdk/engine.py +758 -172
- xenfra_sdk/events.py +254 -0
- xenfra_sdk/exceptions.py +9 -0
- xenfra_sdk/governance.py +150 -0
- xenfra_sdk/manifest.py +93 -138
- xenfra_sdk/mcp_client.py +7 -5
- xenfra_sdk/{models.py → models/__init__.py} +17 -1
- xenfra_sdk/models/context.py +61 -0
- xenfra_sdk/orchestrator.py +223 -99
- xenfra_sdk/privacy.py +11 -0
- xenfra_sdk/protocol.py +38 -0
- xenfra_sdk/railpack_adapter.py +357 -0
- xenfra_sdk/railpack_detector.py +587 -0
- xenfra_sdk/railpack_manager.py +312 -0
- xenfra_sdk/recipes.py +152 -19
- xenfra_sdk/resources/activity.py +45 -0
- xenfra_sdk/resources/build.py +157 -0
- xenfra_sdk/resources/deployments.py +22 -2
- xenfra_sdk/resources/intelligence.py +25 -0
- xenfra_sdk-0.2.6.dist-info/METADATA +118 -0
- xenfra_sdk-0.2.6.dist-info/RECORD +49 -0
- {xenfra_sdk-0.2.5.dist-info → xenfra_sdk-0.2.6.dist-info}/WHEEL +1 -1
- xenfra_sdk/templates/Caddyfile.j2 +0 -14
- xenfra_sdk/templates/Dockerfile.j2 +0 -41
- xenfra_sdk/templates/cloud-init.sh.j2 +0 -90
- xenfra_sdk/templates/docker-compose-multi.yml.j2 +0 -29
- xenfra_sdk/templates/docker-compose.yml.j2 +0 -30
- xenfra_sdk-0.2.5.dist-info/METADATA +0 -116
- xenfra_sdk-0.2.5.dist-info/RECORD +0 -38
xenfra_sdk/events.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Xenfra Events - Structured Event Streaming for Deployment Observability.
|
|
3
|
+
|
|
4
|
+
This module provides deterministic, JSON-serializable events for tracking
|
|
5
|
+
deployment progress. No AI, no guessing - just structured data.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
11
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DeploymentPhase(str, Enum):
|
|
15
|
+
"""Deployment phases mapped to biological metaphors (Zen Dashboard)."""
|
|
16
|
+
# Phase 1: Setup
|
|
17
|
+
DNA_ENCODING = "DNA_ENCODING" # SSH key setup, validation
|
|
18
|
+
# Phase 2: Asset Generation
|
|
19
|
+
CELL_BLUEPRINT = "CELL_BLUEPRINT" # Dockerfile, docker-compose generation
|
|
20
|
+
# Phase 3: Cloud-init
|
|
21
|
+
GENESIS_SCRIPT = "GENESIS_SCRIPT" # Cloud-init script creation
|
|
22
|
+
# Phase 4: Droplet Creation
|
|
23
|
+
CELL_BIRTH = "CELL_BIRTH" # Droplet provisioning
|
|
24
|
+
# Phase 5: Polling/SSH
|
|
25
|
+
NEURAL_SYNC = "NEURAL_SYNC" # Waiting for SSH, polling
|
|
26
|
+
# Phase 6: Code Upload
|
|
27
|
+
GENOME_TRANSFER = "GENOME_TRANSFER" # Git clone or file upload
|
|
28
|
+
# Phase 6.5: Asset Write
|
|
29
|
+
MEMBRANE_FORMATION = "MEMBRANE_FORMATION" # Writing deployment assets
|
|
30
|
+
# Phase 7: Container Build
|
|
31
|
+
CELL_REIFY = "CELL_REIFY" # Docker build and start
|
|
32
|
+
# Phase 8: Verification
|
|
33
|
+
VITALS_CHECK = "VITALS_CHECK" # Health check
|
|
34
|
+
# Phase 9: Persistence
|
|
35
|
+
MEMORY_COMMIT = "MEMORY_COMMIT" # Database persistence
|
|
36
|
+
# Error states
|
|
37
|
+
APOPTOSIS = "APOPTOSIS" # Cleanup on failure
|
|
38
|
+
NECROSIS = "NECROSIS" # Unrecoverable failure
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class EventStatus(str, Enum):
|
|
42
|
+
"""Status of an event within a phase."""
|
|
43
|
+
STARTED = "started"
|
|
44
|
+
IN_PROGRESS = "in_progress"
|
|
45
|
+
COMPLETED = "completed"
|
|
46
|
+
FAILED = "failed"
|
|
47
|
+
SKIPPED = "skipped"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class BuildEvent(BaseModel):
|
|
51
|
+
"""
|
|
52
|
+
Structured event for deployment progress tracking.
|
|
53
|
+
|
|
54
|
+
100% deterministic - no AI inference, just JSON serialization.
|
|
55
|
+
"""
|
|
56
|
+
phase: DeploymentPhase
|
|
57
|
+
status: EventStatus
|
|
58
|
+
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
59
|
+
message: str = ""
|
|
60
|
+
progress_percent: Optional[int] = Field(None, ge=0, le=100)
|
|
61
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
62
|
+
|
|
63
|
+
model_config = {
|
|
64
|
+
"populate_by_name": True,
|
|
65
|
+
"arbitrary_types_allowed": True
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
def to_sse(self) -> str:
|
|
69
|
+
"""Format as Server-Sent Event."""
|
|
70
|
+
return f"data: {self.model_dump_json()}\n\n"
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class WorkerHeartbeat(BaseModel):
|
|
74
|
+
"""
|
|
75
|
+
Heartbeat signal from a background deployment task.
|
|
76
|
+
|
|
77
|
+
If no heartbeat received for 90 seconds, task is considered STALE.
|
|
78
|
+
"""
|
|
79
|
+
task_id: str
|
|
80
|
+
deployment_id: int
|
|
81
|
+
phase: DeploymentPhase
|
|
82
|
+
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
83
|
+
is_alive: bool = True
|
|
84
|
+
last_activity: str = ""
|
|
85
|
+
|
|
86
|
+
model_config = {
|
|
87
|
+
"populate_by_name": True,
|
|
88
|
+
"arbitrary_types_allowed": True
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class DropletHeartbeat(BaseModel):
|
|
93
|
+
"""
|
|
94
|
+
Heartbeat callback from cloud-init on the droplet.
|
|
95
|
+
|
|
96
|
+
Sent via curl when the droplet's network is ready.
|
|
97
|
+
"""
|
|
98
|
+
droplet_id: int
|
|
99
|
+
status: str # "network_ready", "cloud_init_complete", "docker_ready"
|
|
100
|
+
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
101
|
+
ip_address: Optional[str] = None
|
|
102
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class EventEmitter:
|
|
106
|
+
"""
|
|
107
|
+
Event emitter for deployment progress tracking.
|
|
108
|
+
|
|
109
|
+
Wraps callback functions to emit structured events.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
def __init__(
|
|
113
|
+
self,
|
|
114
|
+
logger: Optional[Callable[[str], None]] = None,
|
|
115
|
+
event_callback: Optional[Callable[[BuildEvent], None]] = None,
|
|
116
|
+
):
|
|
117
|
+
"""
|
|
118
|
+
Initialize the event emitter.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
logger: Traditional string logger (backward compatibility)
|
|
122
|
+
event_callback: Structured event callback (for SSE/WebSocket)
|
|
123
|
+
"""
|
|
124
|
+
self.logger = logger or (lambda msg: None)
|
|
125
|
+
self.event_callback = event_callback
|
|
126
|
+
self.events: List[BuildEvent] = []
|
|
127
|
+
self.current_phase: Optional[DeploymentPhase] = None
|
|
128
|
+
|
|
129
|
+
def start(self):
|
|
130
|
+
"""Reset the event emitter for a new trace."""
|
|
131
|
+
self.events = []
|
|
132
|
+
self.current_phase = None
|
|
133
|
+
self.logger("--- Starting Event Stream ---")
|
|
134
|
+
|
|
135
|
+
def emit(
|
|
136
|
+
self,
|
|
137
|
+
phase: DeploymentPhase,
|
|
138
|
+
status: EventStatus,
|
|
139
|
+
message: str = "",
|
|
140
|
+
progress_percent: Optional[int] = None,
|
|
141
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
142
|
+
) -> BuildEvent:
|
|
143
|
+
"""
|
|
144
|
+
Emit a structured event.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
phase: The deployment phase
|
|
148
|
+
status: Status within the phase
|
|
149
|
+
message: Human-readable message
|
|
150
|
+
progress_percent: Optional progress (0-100)
|
|
151
|
+
metadata: Additional structured data
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
The emitted BuildEvent
|
|
155
|
+
"""
|
|
156
|
+
event = BuildEvent(
|
|
157
|
+
phase=phase,
|
|
158
|
+
status=status,
|
|
159
|
+
message=message,
|
|
160
|
+
progress_percent=progress_percent,
|
|
161
|
+
metadata=metadata or {},
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
self.events.append(event)
|
|
165
|
+
self.current_phase = phase
|
|
166
|
+
|
|
167
|
+
# Backward compatibility: also log as string
|
|
168
|
+
self.logger(f"[{phase.value}] {status.value}: {message}")
|
|
169
|
+
|
|
170
|
+
# Structured callback
|
|
171
|
+
if self.event_callback:
|
|
172
|
+
self.event_callback(event)
|
|
173
|
+
|
|
174
|
+
return event
|
|
175
|
+
|
|
176
|
+
def start_phase(
|
|
177
|
+
self,
|
|
178
|
+
phase: DeploymentPhase,
|
|
179
|
+
message: str = "",
|
|
180
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
181
|
+
) -> BuildEvent:
|
|
182
|
+
"""Convenience method to start a phase."""
|
|
183
|
+
return self.emit(phase, EventStatus.STARTED, message, metadata=metadata)
|
|
184
|
+
|
|
185
|
+
def complete_phase(
|
|
186
|
+
self,
|
|
187
|
+
phase: DeploymentPhase,
|
|
188
|
+
message: str = "",
|
|
189
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
190
|
+
) -> BuildEvent:
|
|
191
|
+
"""Convenience method to complete a phase."""
|
|
192
|
+
return self.emit(phase, EventStatus.COMPLETED, message, progress_percent=100, metadata=metadata)
|
|
193
|
+
|
|
194
|
+
def fail_phase(
|
|
195
|
+
self,
|
|
196
|
+
phase: DeploymentPhase,
|
|
197
|
+
message: str,
|
|
198
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
199
|
+
) -> BuildEvent:
|
|
200
|
+
"""Convenience method to mark a phase as failed."""
|
|
201
|
+
return self.emit(phase, EventStatus.FAILED, message, metadata=metadata)
|
|
202
|
+
|
|
203
|
+
def progress(
|
|
204
|
+
self,
|
|
205
|
+
phase: DeploymentPhase,
|
|
206
|
+
percent: int,
|
|
207
|
+
message: str = "",
|
|
208
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
209
|
+
) -> BuildEvent:
|
|
210
|
+
"""Convenience method to report progress within a phase."""
|
|
211
|
+
return self.emit(phase, EventStatus.IN_PROGRESS, message, progress_percent=percent, metadata=metadata)
|
|
212
|
+
|
|
213
|
+
def get_events(self) -> List[BuildEvent]:
|
|
214
|
+
"""Get all emitted events."""
|
|
215
|
+
return self.events.copy()
|
|
216
|
+
|
|
217
|
+
def get_last_event(self) -> Optional[BuildEvent]:
|
|
218
|
+
"""Get the most recent event."""
|
|
219
|
+
return self.events[-1] if self.events else None
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
# Phase progress mapping (for calculating overall progress)
|
|
223
|
+
PHASE_WEIGHTS = {
|
|
224
|
+
DeploymentPhase.DNA_ENCODING: 5,
|
|
225
|
+
DeploymentPhase.CELL_BLUEPRINT: 10,
|
|
226
|
+
DeploymentPhase.GENESIS_SCRIPT: 5,
|
|
227
|
+
DeploymentPhase.CELL_BIRTH: 15,
|
|
228
|
+
DeploymentPhase.NEURAL_SYNC: 20,
|
|
229
|
+
DeploymentPhase.GENOME_TRANSFER: 15,
|
|
230
|
+
DeploymentPhase.MEMBRANE_FORMATION: 5,
|
|
231
|
+
DeploymentPhase.CELL_REIFY: 15,
|
|
232
|
+
DeploymentPhase.VITALS_CHECK: 5,
|
|
233
|
+
DeploymentPhase.MEMORY_COMMIT: 5,
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def calculate_overall_progress(events: List[BuildEvent]) -> int:
|
|
238
|
+
"""
|
|
239
|
+
Calculate overall deployment progress from events.
|
|
240
|
+
|
|
241
|
+
Returns percentage (0-100) based on completed phases.
|
|
242
|
+
"""
|
|
243
|
+
completed_weight = 0
|
|
244
|
+
total_weight = sum(PHASE_WEIGHTS.values())
|
|
245
|
+
|
|
246
|
+
completed_phases = set()
|
|
247
|
+
for event in events:
|
|
248
|
+
if event.status == EventStatus.COMPLETED and event.phase in PHASE_WEIGHTS:
|
|
249
|
+
completed_phases.add(event.phase)
|
|
250
|
+
|
|
251
|
+
for phase in completed_phases:
|
|
252
|
+
completed_weight += PHASE_WEIGHTS.get(phase, 0)
|
|
253
|
+
|
|
254
|
+
return int((completed_weight / total_weight) * 100)
|
xenfra_sdk/exceptions.py
CHANGED
|
@@ -17,3 +17,12 @@ class XenfraAPIError(XenfraError):
|
|
|
17
17
|
self.status_code = status_code
|
|
18
18
|
self.detail = detail
|
|
19
19
|
super().__init__(f"API Error {status_code}: {detail}")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class DeploymentError(XenfraError):
|
|
23
|
+
"""Custom exception for deployment failures."""
|
|
24
|
+
|
|
25
|
+
def __init__(self, message, stage="Unknown"):
|
|
26
|
+
self.message = message
|
|
27
|
+
self.stage = stage
|
|
28
|
+
super().__init__(f"Deployment failed at stage '{stage}': {message}")
|
xenfra_sdk/governance.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Xenfra Governance - Tier-Based Resource Limits.
|
|
3
|
+
|
|
4
|
+
This module defines deterministic resource limits based on user tier.
|
|
5
|
+
No AI, no guessing - just tier → limits mapping.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from enum import Enum
|
|
9
|
+
from typing import Dict
|
|
10
|
+
from pydantic import BaseModel
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class UserTier(str, Enum):
|
|
14
|
+
"""User subscription tiers."""
|
|
15
|
+
FREE = "FREE"
|
|
16
|
+
JUNIOR_SRE = "JUNIOR_SRE"
|
|
17
|
+
SENIOR_SRE = "SENIOR_SRE"
|
|
18
|
+
PRO = "PRO" # Alias for JUNIOR_SRE
|
|
19
|
+
ENTERPRISE = "ENTERPRISE" # Alias for SENIOR_SRE
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ResourceLimits(BaseModel):
|
|
23
|
+
"""Docker cgroups resource limits."""
|
|
24
|
+
memory: str = "512m" # Docker memory limit format
|
|
25
|
+
cpus: float = 0.5 # CPU cores (can be fractional)
|
|
26
|
+
memory_reserved: str = "128m" # Minimum guaranteed memory
|
|
27
|
+
cpus_reserved: float = 0.25 # Minimum guaranteed CPU
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Tier-based resource limits - deterministic mapping
|
|
31
|
+
TIER_RESOURCE_LIMITS: Dict[UserTier, ResourceLimits] = {
|
|
32
|
+
UserTier.FREE: ResourceLimits(
|
|
33
|
+
memory="512m",
|
|
34
|
+
cpus=0.5,
|
|
35
|
+
memory_reserved="128m",
|
|
36
|
+
cpus_reserved=0.25,
|
|
37
|
+
),
|
|
38
|
+
UserTier.JUNIOR_SRE: ResourceLimits(
|
|
39
|
+
memory="2g",
|
|
40
|
+
cpus=1.0,
|
|
41
|
+
memory_reserved="512m",
|
|
42
|
+
cpus_reserved=0.5,
|
|
43
|
+
),
|
|
44
|
+
UserTier.SENIOR_SRE: ResourceLimits(
|
|
45
|
+
memory="4g",
|
|
46
|
+
cpus=2.0,
|
|
47
|
+
memory_reserved="1g",
|
|
48
|
+
cpus_reserved=1.0,
|
|
49
|
+
),
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Add aliases for tiers
|
|
53
|
+
TIER_RESOURCE_LIMITS[UserTier.PRO] = TIER_RESOURCE_LIMITS[UserTier.JUNIOR_SRE]
|
|
54
|
+
TIER_RESOURCE_LIMITS[UserTier.ENTERPRISE] = TIER_RESOURCE_LIMITS[UserTier.SENIOR_SRE]
|
|
55
|
+
|
|
56
|
+
# Tier-based polling intervals (seconds)
|
|
57
|
+
TIER_POLLING_INTERVALS: Dict[UserTier, int] = {
|
|
58
|
+
UserTier.FREE: 150,
|
|
59
|
+
UserTier.JUNIOR_SRE: 60,
|
|
60
|
+
UserTier.SENIOR_SRE: 15,
|
|
61
|
+
}
|
|
62
|
+
TIER_POLLING_INTERVALS[UserTier.PRO] = TIER_POLLING_INTERVALS[UserTier.JUNIOR_SRE]
|
|
63
|
+
TIER_POLLING_INTERVALS[UserTier.ENTERPRISE] = TIER_POLLING_INTERVALS[UserTier.SENIOR_SRE]
|
|
64
|
+
|
|
65
|
+
# Tier-based node limits
|
|
66
|
+
TIER_NODE_LIMITS: Dict[UserTier, int] = {
|
|
67
|
+
UserTier.FREE: 1,
|
|
68
|
+
UserTier.JUNIOR_SRE: 10,
|
|
69
|
+
UserTier.SENIOR_SRE: 25,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
# Tier-based auto-heal limits (concurrent fixes)
|
|
73
|
+
TIER_AUTO_HEAL_LIMITS: Dict[UserTier, int] = {
|
|
74
|
+
UserTier.FREE: 0, # Manual fixes only
|
|
75
|
+
UserTier.JUNIOR_SRE: 3, # 3 auto-heal loops
|
|
76
|
+
UserTier.SENIOR_SRE: 10, # Twin isolation/sandboxing
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def get_resource_limits(tier: str) -> ResourceLimits:
|
|
81
|
+
"""
|
|
82
|
+
Get resource limits for a user tier.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
tier: User tier string (FREE, JUNIOR_SRE, SENIOR_SRE)
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
ResourceLimits object with cgroups configuration
|
|
89
|
+
"""
|
|
90
|
+
try:
|
|
91
|
+
user_tier = UserTier(tier.upper())
|
|
92
|
+
except ValueError:
|
|
93
|
+
# Unknown tier defaults to FREE
|
|
94
|
+
user_tier = UserTier.FREE
|
|
95
|
+
|
|
96
|
+
return TIER_RESOURCE_LIMITS[user_tier]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def get_polling_interval(tier: str) -> int:
|
|
100
|
+
"""
|
|
101
|
+
Get polling interval for a user tier.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
tier: User tier string
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
Polling interval in seconds
|
|
108
|
+
"""
|
|
109
|
+
try:
|
|
110
|
+
user_tier = UserTier(tier.upper())
|
|
111
|
+
except ValueError:
|
|
112
|
+
user_tier = UserTier.FREE
|
|
113
|
+
|
|
114
|
+
return TIER_POLLING_INTERVALS[user_tier]
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_node_limit(tier: str) -> int:
|
|
118
|
+
"""
|
|
119
|
+
Get maximum nodes allowed for a user tier.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
tier: User tier string
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Maximum number of nodes
|
|
126
|
+
"""
|
|
127
|
+
try:
|
|
128
|
+
user_tier = UserTier(tier.upper())
|
|
129
|
+
except ValueError:
|
|
130
|
+
user_tier = UserTier.FREE
|
|
131
|
+
|
|
132
|
+
return TIER_NODE_LIMITS[user_tier]
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_auto_heal_limit(tier: str) -> int:
|
|
136
|
+
"""
|
|
137
|
+
Get auto-heal limit for a user tier.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
tier: User tier string
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Maximum concurrent auto-heal operations
|
|
144
|
+
"""
|
|
145
|
+
try:
|
|
146
|
+
user_tier = UserTier(tier.upper())
|
|
147
|
+
except ValueError:
|
|
148
|
+
user_tier = UserTier.FREE
|
|
149
|
+
|
|
150
|
+
return TIER_AUTO_HEAL_LIMITS[user_tier]
|