labrun-checks 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- labrun_checks/__init__.py +69 -0
- labrun_checks/_api.py +47 -0
- labrun_checks/_cleanup.py +97 -0
- labrun_checks/_state.py +25 -0
- labrun_checks/_types.py +56 -0
- labrun_checks/adapters/__init__.py +23 -0
- labrun_checks/adapters/aws.py +256 -0
- labrun_checks/adapters/azure.py +9 -0
- labrun_checks/adapters/databricks.py +9 -0
- labrun_checks/adapters/snowflake.py +9 -0
- labrun_checks-0.2.0.dist-info/METADATA +10 -0
- labrun_checks-0.2.0.dist-info/RECORD +14 -0
- labrun_checks-0.2.0.dist-info/WHEEL +5 -0
- labrun_checks-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Callable
|
|
4
|
+
|
|
5
|
+
from ._api import post_checkpoint, post_cleanup_complete
|
|
6
|
+
from ._cleanup import run_cleanup
|
|
7
|
+
from ._state import get_session, set_session
|
|
8
|
+
from ._types import (
|
|
9
|
+
CheckpointResult,
|
|
10
|
+
CheckpointSession,
|
|
11
|
+
CleanupAdapter,
|
|
12
|
+
CleanupResource,
|
|
13
|
+
CleanupResult,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"register",
|
|
18
|
+
"check",
|
|
19
|
+
"cleanup",
|
|
20
|
+
"run_cleanup",
|
|
21
|
+
"CheckpointResult",
|
|
22
|
+
"CheckpointSession",
|
|
23
|
+
"CleanupAdapter",
|
|
24
|
+
"CleanupResource",
|
|
25
|
+
"CleanupResult",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def register(session_token: str, lab_id: str, credentials: dict | None = None) -> None:
|
|
30
|
+
set_session(CheckpointSession(session_token=session_token, lab_id=lab_id, credentials=credentials or {}))
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def check(checkpoint_id: int, validator_fn: Callable[[CheckpointSession], CheckpointResult]) -> CheckpointResult:
|
|
34
|
+
session = get_session()
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
result = validator_fn(session)
|
|
38
|
+
except Exception as exc:
|
|
39
|
+
result = CheckpointResult("error", error_reason=repr(exc))
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
post_checkpoint(session.session_token, session.lab_id, checkpoint_id, result)
|
|
43
|
+
except Exception as exc:
|
|
44
|
+
return CheckpointResult(
|
|
45
|
+
"error",
|
|
46
|
+
error_reason=f"Network error recording checkpoint: {repr(exc)}",
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
return result
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def cleanup(status: str) -> CheckpointResult:
|
|
53
|
+
if status not in ("clean", "dirty"):
|
|
54
|
+
return CheckpointResult(
|
|
55
|
+
"error",
|
|
56
|
+
error_reason=f"status must be 'clean' or 'dirty', got {status!r}",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
session = get_session()
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
post_cleanup_complete(session.session_token, session.lab_id, status)
|
|
63
|
+
except Exception as exc:
|
|
64
|
+
return CheckpointResult(
|
|
65
|
+
"error",
|
|
66
|
+
error_reason=f"Network error posting cleanup event: {repr(exc)}",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
return CheckpointResult("pass" if status == "clean" else "fail")
|
labrun_checks/_api.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
from ._types import CheckpointResult
|
|
9
|
+
|
|
10
|
+
_BASE_URL = os.environ.get("LABRUN_API_BASE_URL", "https://labrun.dev")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def post_checkpoint(
|
|
14
|
+
session_token: str,
|
|
15
|
+
lab_id: str,
|
|
16
|
+
checkpoint_id: int,
|
|
17
|
+
result: CheckpointResult,
|
|
18
|
+
) -> None:
|
|
19
|
+
payload = {
|
|
20
|
+
"lab_id": lab_id,
|
|
21
|
+
"checkpoint_id": checkpoint_id,
|
|
22
|
+
"status": result.status,
|
|
23
|
+
"timestamp": int(time.time()),
|
|
24
|
+
}
|
|
25
|
+
headers = {"Authorization": f"Bearer {session_token}"}
|
|
26
|
+
response = requests.post(
|
|
27
|
+
f"{_BASE_URL}/api/labs/checkpoint",
|
|
28
|
+
json=payload,
|
|
29
|
+
headers=headers,
|
|
30
|
+
timeout=10,
|
|
31
|
+
)
|
|
32
|
+
response.raise_for_status()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def post_cleanup_complete(session_token: str, lab_id: str, status: str) -> None:
|
|
36
|
+
payload = {
|
|
37
|
+
"lab_id": lab_id,
|
|
38
|
+
"status": status,
|
|
39
|
+
}
|
|
40
|
+
headers = {"Authorization": f"Bearer {session_token}"}
|
|
41
|
+
response = requests.post(
|
|
42
|
+
f"{_BASE_URL}/api/labs/cleanup-complete",
|
|
43
|
+
json=payload,
|
|
44
|
+
headers=headers,
|
|
45
|
+
timeout=10,
|
|
46
|
+
)
|
|
47
|
+
response.raise_for_status()
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""Three-phase cleanup orchestrator per RESOURCE-SAFETY-SPEC Section 3.6."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from ._state import get_session
|
|
5
|
+
from ._types import CleanupAdapter, CleanupResource, CleanupResult
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def run_cleanup(
|
|
9
|
+
manifest: list[CleanupResource],
|
|
10
|
+
adapter: CleanupAdapter | None = None,
|
|
11
|
+
) -> CleanupResult:
|
|
12
|
+
"""Run the three-phase cleanup protocol.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
manifest: Resources to clean up, ordered critical-first.
|
|
16
|
+
adapter: Cloud adapter implementing delete/describe/tag_scan.
|
|
17
|
+
Defaults to AwsCleanupAdapter if None.
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
CleanupResult with status 'clean' or 'dirty', plus any warnings/orphans.
|
|
21
|
+
"""
|
|
22
|
+
session = get_session()
|
|
23
|
+
lab_slug = session.lab_id
|
|
24
|
+
|
|
25
|
+
if adapter is None:
|
|
26
|
+
from .adapters.aws import AwsCleanupAdapter
|
|
27
|
+
adapter = AwsCleanupAdapter()
|
|
28
|
+
|
|
29
|
+
credentials = session.credentials
|
|
30
|
+
warnings: list[str] = []
|
|
31
|
+
orphans: list[str] = []
|
|
32
|
+
|
|
33
|
+
# Phase 1: Teardown
|
|
34
|
+
for resource in manifest:
|
|
35
|
+
try:
|
|
36
|
+
adapter.delete_resource(credentials, resource)
|
|
37
|
+
except Exception as exc:
|
|
38
|
+
cmd = resource.cli_delete_command or f"(no CLI command provided for {resource.type} {resource.id!r})"
|
|
39
|
+
warnings.append(
|
|
40
|
+
f"FAILED TO DELETE: {resource.type} {resource.id!r}. "
|
|
41
|
+
f"Error: {exc}. Run manually: {cmd}"
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# Phase 2: Verification scan
|
|
45
|
+
for resource in manifest:
|
|
46
|
+
try:
|
|
47
|
+
still_exists = adapter.describe_resource(credentials, resource)
|
|
48
|
+
except Exception as exc:
|
|
49
|
+
warnings.append(
|
|
50
|
+
f"VERIFICATION ERROR: Could not check {resource.type} {resource.id!r}. "
|
|
51
|
+
f"Error: {exc}"
|
|
52
|
+
)
|
|
53
|
+
continue
|
|
54
|
+
|
|
55
|
+
if still_exists:
|
|
56
|
+
cmd = resource.cli_delete_command or f"(no CLI command provided for {resource.type} {resource.id!r})"
|
|
57
|
+
warnings.append(
|
|
58
|
+
f"WARNING: {resource.type} {resource.id!r} is still alive. "
|
|
59
|
+
f"Run this to kill it: {cmd}"
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Phase 3: Tag audit
|
|
63
|
+
region = credentials.get("region", "us-east-1")
|
|
64
|
+
manifest_ids = {r.id for r in manifest}
|
|
65
|
+
try:
|
|
66
|
+
tagged_arns = adapter.tag_scan(credentials, lab_slug, region)
|
|
67
|
+
except Exception as exc:
|
|
68
|
+
warnings.append(f"TAG AUDIT ERROR: Could not scan tags. Error: {exc}")
|
|
69
|
+
tagged_arns = []
|
|
70
|
+
|
|
71
|
+
for arn in tagged_arns:
|
|
72
|
+
resource_id = _extract_resource_id_from_arn(arn)
|
|
73
|
+
if resource_id not in manifest_ids:
|
|
74
|
+
orphans.append(
|
|
75
|
+
f"ORPHAN FOUND: {arn} is tagged as belonging to this lab "
|
|
76
|
+
f"but was not in this session's manifest. It may be left over "
|
|
77
|
+
f"from a previous attempt."
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
status = "clean" if (not warnings and not orphans) else "dirty"
|
|
81
|
+
return CleanupResult(status=status, warnings=warnings, orphans=orphans)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _extract_resource_id_from_arn(arn: str) -> str:
|
|
85
|
+
"""Best-effort extraction of a human-readable resource ID from an ARN.
|
|
86
|
+
|
|
87
|
+
ARN formats vary by service:
|
|
88
|
+
arn:aws:s3:::bucket-name -> bucket-name
|
|
89
|
+
arn:aws:glue:region:acct:database/db-name -> db-name
|
|
90
|
+
arn:aws:kinesis:region:acct:stream/name -> name
|
|
91
|
+
Falls back to the full ARN if no pattern matches.
|
|
92
|
+
"""
|
|
93
|
+
if ":::" in arn:
|
|
94
|
+
return arn.split(":::")[-1]
|
|
95
|
+
if "/" in arn:
|
|
96
|
+
return arn.split("/")[-1]
|
|
97
|
+
return arn
|
labrun_checks/_state.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from ._types import CheckpointSession
|
|
6
|
+
|
|
7
|
+
_session: Optional[CheckpointSession] = None
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def get_session() -> CheckpointSession:
|
|
11
|
+
if _session is None:
|
|
12
|
+
raise RuntimeError(
|
|
13
|
+
"Call labrun_checks.register(session_token, lab_id) before using check() or cleanup()."
|
|
14
|
+
)
|
|
15
|
+
return _session
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def set_session(session: CheckpointSession) -> None:
|
|
19
|
+
global _session
|
|
20
|
+
_session = session
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def clear() -> None:
|
|
24
|
+
global _session
|
|
25
|
+
_session = None
|
labrun_checks/_types.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Literal, Optional, Protocol, runtime_checkable
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
Status = Literal["pass", "fail", "error"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class CheckpointResult:
|
|
12
|
+
status: Status
|
|
13
|
+
hint_tier_available: int = 0
|
|
14
|
+
error_reason: Optional[str] = None
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class CheckpointSession:
|
|
19
|
+
session_token: str
|
|
20
|
+
lab_id: str
|
|
21
|
+
credentials: dict = field(default_factory=dict)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class CleanupResource:
|
|
26
|
+
"""Single resource to be cleaned up. Ordering in manifest is critical-first."""
|
|
27
|
+
type: str
|
|
28
|
+
id: str
|
|
29
|
+
region: str = "us-east-1"
|
|
30
|
+
parent: Optional[str] = None
|
|
31
|
+
cli_delete_command: Optional[str] = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class CleanupResult:
|
|
36
|
+
"""Returned by run_cleanup. Notebook passes result.status to cleanup()."""
|
|
37
|
+
status: Literal["clean", "dirty"]
|
|
38
|
+
warnings: list[str] = field(default_factory=list)
|
|
39
|
+
orphans: list[str] = field(default_factory=list)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@runtime_checkable
|
|
43
|
+
class CleanupAdapter(Protocol):
|
|
44
|
+
"""Interface that cloud-specific adapters must implement."""
|
|
45
|
+
|
|
46
|
+
def delete_resource(self, credentials: dict, resource: CleanupResource) -> None:
|
|
47
|
+
"""Delete a single resource. Raise on failure. Swallow already-gone errors."""
|
|
48
|
+
...
|
|
49
|
+
|
|
50
|
+
def describe_resource(self, credentials: dict, resource: CleanupResource) -> bool:
|
|
51
|
+
"""Return True if resource still exists, False if confirmed gone."""
|
|
52
|
+
...
|
|
53
|
+
|
|
54
|
+
def tag_scan(self, credentials: dict, lab_slug: str, region: str) -> list[str]:
|
|
55
|
+
"""Return list of ARNs tagged with labrun:lab-slug=<lab_slug>."""
|
|
56
|
+
...
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from .aws import (
|
|
2
|
+
AwsCleanupAdapter,
|
|
3
|
+
make_glue_client,
|
|
4
|
+
make_kinesis_client,
|
|
5
|
+
make_s3_client,
|
|
6
|
+
make_sts_client,
|
|
7
|
+
make_tagging_client,
|
|
8
|
+
)
|
|
9
|
+
from .azure import make_azure_resource_client
|
|
10
|
+
from .databricks import make_databricks_client
|
|
11
|
+
from .snowflake import make_snowflake_connection
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"AwsCleanupAdapter",
|
|
15
|
+
"make_s3_client",
|
|
16
|
+
"make_glue_client",
|
|
17
|
+
"make_kinesis_client",
|
|
18
|
+
"make_sts_client",
|
|
19
|
+
"make_tagging_client",
|
|
20
|
+
"make_databricks_client",
|
|
21
|
+
"make_snowflake_connection",
|
|
22
|
+
"make_azure_resource_client",
|
|
23
|
+
]
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
"""AWS boto3 adapter for labrun-checks checkpoints and cleanup."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
import boto3
|
|
7
|
+
from botocore.exceptions import ClientError
|
|
8
|
+
|
|
9
|
+
from .._types import CleanupResource
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from mypy_boto3_s3 import S3Client
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
_account_id_cache: Optional[str] = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _make_client(service: str, credentials: dict):
|
|
19
|
+
return boto3.client(
|
|
20
|
+
service,
|
|
21
|
+
aws_access_key_id=credentials["aws_access_key_id"],
|
|
22
|
+
aws_secret_access_key=credentials["aws_secret_access_key"],
|
|
23
|
+
region_name=credentials.get("region", "us-east-1"),
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def make_s3_client(credentials: dict) -> "S3Client":
|
|
28
|
+
return _make_client("s3", credentials)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def make_glue_client(credentials: dict):
|
|
32
|
+
return _make_client("glue", credentials)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def make_kinesis_client(credentials: dict):
|
|
36
|
+
return _make_client("kinesis", credentials)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def make_sts_client(credentials: dict):
|
|
40
|
+
return _make_client("sts", credentials)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def make_tagging_client(credentials: dict):
|
|
44
|
+
return _make_client("resourcegroupstaggingapi", credentials)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def make_iam_client(credentials: dict):
|
|
48
|
+
return _make_client("iam", credentials)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _get_account_id(credentials: dict) -> str:
|
|
52
|
+
global _account_id_cache
|
|
53
|
+
if _account_id_cache is None:
|
|
54
|
+
sts = make_sts_client(credentials)
|
|
55
|
+
_account_id_cache = sts.get_caller_identity()["Account"]
|
|
56
|
+
return _account_id_cache
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def clear_account_id_cache() -> None:
|
|
60
|
+
global _account_id_cache
|
|
61
|
+
_account_id_cache = None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _is_already_gone(exc: ClientError) -> bool:
|
|
65
|
+
code = exc.response.get("Error", {}).get("Code", "")
|
|
66
|
+
gone_codes = {
|
|
67
|
+
"NoSuchBucket",
|
|
68
|
+
"NoSuchKey",
|
|
69
|
+
"NoSuchEntity",
|
|
70
|
+
"EntityNotFoundException",
|
|
71
|
+
"ResourceNotFoundException",
|
|
72
|
+
"ResourceNotFoundFault",
|
|
73
|
+
"StreamNotFoundFault",
|
|
74
|
+
}
|
|
75
|
+
return code in gone_codes
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class AwsCleanupAdapter:
|
|
79
|
+
"""Implements CleanupAdapter protocol for AWS resources."""
|
|
80
|
+
|
|
81
|
+
def delete_resource(self, credentials: dict, resource: CleanupResource) -> None:
|
|
82
|
+
try:
|
|
83
|
+
dispatcher = _DELETE_DISPATCH.get(resource.type)
|
|
84
|
+
if dispatcher is None:
|
|
85
|
+
raise ValueError(f"Unknown resource type: {resource.type!r}")
|
|
86
|
+
dispatcher(credentials, resource)
|
|
87
|
+
except ClientError as exc:
|
|
88
|
+
if _is_already_gone(exc):
|
|
89
|
+
return
|
|
90
|
+
raise
|
|
91
|
+
|
|
92
|
+
def describe_resource(self, credentials: dict, resource: CleanupResource) -> bool:
|
|
93
|
+
try:
|
|
94
|
+
dispatcher = _DESCRIBE_DISPATCH.get(resource.type)
|
|
95
|
+
if dispatcher is None:
|
|
96
|
+
raise ValueError(f"Unknown resource type: {resource.type!r}")
|
|
97
|
+
dispatcher(credentials, resource)
|
|
98
|
+
return True
|
|
99
|
+
except ClientError as exc:
|
|
100
|
+
if _is_already_gone(exc):
|
|
101
|
+
return False
|
|
102
|
+
raise
|
|
103
|
+
|
|
104
|
+
def tag_scan(self, credentials: dict, lab_slug: str, region: str) -> list[str]:
|
|
105
|
+
creds_with_region = {**credentials, "region": region}
|
|
106
|
+
client = make_tagging_client(creds_with_region)
|
|
107
|
+
tag_filters = [{"Key": "labrun:lab-slug", "Values": [lab_slug]}]
|
|
108
|
+
arns: list[str] = []
|
|
109
|
+
paginator = client.get_paginator("get_resources")
|
|
110
|
+
for page in paginator.paginate(TagFilters=tag_filters):
|
|
111
|
+
for item in page.get("ResourceTagMappingList", []):
|
|
112
|
+
arn = item.get("ResourceARN", "")
|
|
113
|
+
if _is_transitional_resource(creds_with_region, arn):
|
|
114
|
+
continue
|
|
115
|
+
arns.append(arn)
|
|
116
|
+
return arns
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _is_transitional_resource(credentials: dict, arn: str) -> bool:
|
|
120
|
+
"""Phase 3 false-positive guard: skip resources in a deleting state."""
|
|
121
|
+
try:
|
|
122
|
+
if ":s3:::" in arn:
|
|
123
|
+
bucket_name = arn.split(":::")[-1]
|
|
124
|
+
s3 = make_s3_client(credentials)
|
|
125
|
+
s3.head_bucket(Bucket=bucket_name)
|
|
126
|
+
return False
|
|
127
|
+
if ":glue:" in arn and "/database/" in arn:
|
|
128
|
+
db_name = arn.split("/database/")[-1].split("/")[0]
|
|
129
|
+
glue = make_glue_client(credentials)
|
|
130
|
+
glue.get_database(Name=db_name)
|
|
131
|
+
return False
|
|
132
|
+
except ClientError as exc:
|
|
133
|
+
if _is_already_gone(exc):
|
|
134
|
+
return True
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _delete_s3_bucket(credentials: dict, resource: CleanupResource) -> None:
|
|
139
|
+
s3 = make_s3_client(credentials)
|
|
140
|
+
objects = s3.list_objects_v2(Bucket=resource.id)
|
|
141
|
+
for obj in objects.get("Contents", []):
|
|
142
|
+
s3.delete_object(Bucket=resource.id, Key=obj["Key"])
|
|
143
|
+
s3.delete_bucket(Bucket=resource.id)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _delete_glue_database(credentials: dict, resource: CleanupResource) -> None:
|
|
147
|
+
glue = make_glue_client(credentials)
|
|
148
|
+
glue.delete_database(Name=resource.id)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _delete_glue_crawler(credentials: dict, resource: CleanupResource) -> None:
|
|
152
|
+
glue = make_glue_client(credentials)
|
|
153
|
+
glue.delete_crawler(Name=resource.id)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _delete_glue_table(credentials: dict, resource: CleanupResource) -> None:
|
|
157
|
+
glue = make_glue_client(credentials)
|
|
158
|
+
if resource.parent is None:
|
|
159
|
+
raise ValueError(f"Glue table {resource.id!r} requires parent (database name)")
|
|
160
|
+
glue.delete_table(DatabaseName=resource.parent, Name=resource.id)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _delete_kinesis_stream(credentials: dict, resource: CleanupResource) -> None:
|
|
164
|
+
kinesis = make_kinesis_client(credentials)
|
|
165
|
+
kinesis.delete_stream(StreamName=resource.id, EnforceConsumerDeletion=True)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def _delete_iam_role(credentials: dict, resource: CleanupResource) -> None:
|
|
169
|
+
iam = make_iam_client(credentials)
|
|
170
|
+
role_name = resource.id
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
attached = iam.list_attached_role_policies(RoleName=role_name)
|
|
174
|
+
except ClientError as exc:
|
|
175
|
+
if _is_already_gone(exc):
|
|
176
|
+
return
|
|
177
|
+
raise
|
|
178
|
+
for policy in attached.get("AttachedPolicies", []):
|
|
179
|
+
try:
|
|
180
|
+
iam.detach_role_policy(RoleName=role_name, PolicyArn=policy["PolicyArn"])
|
|
181
|
+
except ClientError as exc:
|
|
182
|
+
if _is_already_gone(exc):
|
|
183
|
+
return
|
|
184
|
+
raise
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
inline = iam.list_role_policies(RoleName=role_name)
|
|
188
|
+
except ClientError as exc:
|
|
189
|
+
if _is_already_gone(exc):
|
|
190
|
+
return
|
|
191
|
+
raise
|
|
192
|
+
for policy_name in inline.get("PolicyNames", []):
|
|
193
|
+
try:
|
|
194
|
+
iam.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
|
|
195
|
+
except ClientError as exc:
|
|
196
|
+
if _is_already_gone(exc):
|
|
197
|
+
return
|
|
198
|
+
raise
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
iam.delete_role(RoleName=role_name)
|
|
202
|
+
except ClientError as exc:
|
|
203
|
+
if _is_already_gone(exc):
|
|
204
|
+
return
|
|
205
|
+
raise
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _describe_s3_bucket(credentials: dict, resource: CleanupResource) -> None:
|
|
209
|
+
s3 = make_s3_client(credentials)
|
|
210
|
+
s3.head_bucket(Bucket=resource.id)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def _describe_glue_database(credentials: dict, resource: CleanupResource) -> None:
|
|
214
|
+
glue = make_glue_client(credentials)
|
|
215
|
+
glue.get_database(Name=resource.id)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def _describe_glue_crawler(credentials: dict, resource: CleanupResource) -> None:
|
|
219
|
+
glue = make_glue_client(credentials)
|
|
220
|
+
glue.get_crawler(Name=resource.id)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _describe_glue_table(credentials: dict, resource: CleanupResource) -> None:
|
|
224
|
+
glue = make_glue_client(credentials)
|
|
225
|
+
if resource.parent is None:
|
|
226
|
+
raise ValueError(f"Glue table {resource.id!r} requires parent (database name)")
|
|
227
|
+
glue.get_table(DatabaseName=resource.parent, Name=resource.id)
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _describe_kinesis_stream(credentials: dict, resource: CleanupResource) -> None:
|
|
231
|
+
kinesis = make_kinesis_client(credentials)
|
|
232
|
+
kinesis.describe_stream(StreamName=resource.id)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _describe_iam_role(credentials: dict, resource: CleanupResource) -> None:
|
|
236
|
+
iam = make_iam_client(credentials)
|
|
237
|
+
iam.get_role(RoleName=resource.id)
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
_DELETE_DISPATCH = {
|
|
241
|
+
"s3_bucket": _delete_s3_bucket,
|
|
242
|
+
"glue_database": _delete_glue_database,
|
|
243
|
+
"glue_crawler": _delete_glue_crawler,
|
|
244
|
+
"glue_table": _delete_glue_table,
|
|
245
|
+
"kinesis_stream": _delete_kinesis_stream,
|
|
246
|
+
"iam_role": _delete_iam_role,
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
_DESCRIBE_DISPATCH = {
|
|
250
|
+
"s3_bucket": _describe_s3_bucket,
|
|
251
|
+
"glue_database": _describe_glue_database,
|
|
252
|
+
"glue_crawler": _describe_glue_crawler,
|
|
253
|
+
"glue_table": _describe_glue_table,
|
|
254
|
+
"kinesis_stream": _describe_kinesis_stream,
|
|
255
|
+
"iam_role": _describe_iam_role,
|
|
256
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Azure adapter stub for labrun-checks. Not yet implemented."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def make_azure_resource_client(credentials: dict):
|
|
6
|
+
raise NotImplementedError(
|
|
7
|
+
"The Azure adapter is not yet available in labrun-checks. "
|
|
8
|
+
"Azure labs ship in a future release. Contact support@labrun.dev if this blocks you."
|
|
9
|
+
)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Databricks adapter stub for labrun-checks. Not yet implemented."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def make_databricks_client(credentials: dict):
|
|
6
|
+
raise NotImplementedError(
|
|
7
|
+
"The Databricks adapter is not yet available in labrun-checks. "
|
|
8
|
+
"Databricks labs ship in a future release. Contact support@labrun.dev if this blocks you."
|
|
9
|
+
)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Snowflake adapter stub for labrun-checks. Not yet implemented."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def make_snowflake_connection(credentials: dict):
|
|
6
|
+
raise NotImplementedError(
|
|
7
|
+
"The Snowflake adapter is not yet available in labrun-checks. "
|
|
8
|
+
"Snowflake labs ship in a future release. Contact support@labrun.dev if this blocks you."
|
|
9
|
+
)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: labrun-checks
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: Checkpoint validation for Labrun labs
|
|
5
|
+
Requires-Python: >=3.9
|
|
6
|
+
Requires-Dist: requests>=2.28
|
|
7
|
+
Requires-Dist: boto3>=1.26
|
|
8
|
+
Provides-Extra: dev
|
|
9
|
+
Requires-Dist: pytest>=7; extra == "dev"
|
|
10
|
+
Requires-Dist: responses>=0.23; extra == "dev"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
labrun_checks/__init__.py,sha256=lvQg4wG6zynSOPqgtKVljeNx6e6cQ54AuYHxr-G7d5c,1886
|
|
2
|
+
labrun_checks/_api.py,sha256=L4EuI6oa1QwEaJUV6oHNdrHTzsRdKSD4rTYdaLxoo_Y,1114
|
|
3
|
+
labrun_checks/_cleanup.py,sha256=6Ym_7nM4z2_-CedGEPeZH_h5RyVYGUcZJKoLxak2vWU,3447
|
|
4
|
+
labrun_checks/_state.py,sha256=mSguVvIa1rrH5yEy8lKLzOOugXeFckmX7ltmRBgt258,535
|
|
5
|
+
labrun_checks/_types.py,sha256=jLHOllr5aHo8t9jpIbAqw7UWHKY9qC00QZzuVNUwRk0,1607
|
|
6
|
+
labrun_checks/adapters/__init__.py,sha256=jUozzi6Ph2z_ZhnHudRsTfuG8sAhURtm8ZtqgRB55Ww,559
|
|
7
|
+
labrun_checks/adapters/aws.py,sha256=sTkw-GnSiGhmbFQbPIvior4YPKq2C2dMvOjzAYQw5eU,8304
|
|
8
|
+
labrun_checks/adapters/azure.py,sha256=T0dp1zuze52bidcpZYOhvjVYNLXnDWninApZBbWN9Hg,352
|
|
9
|
+
labrun_checks/adapters/databricks.py,sha256=fPb7zSdZG4lRMsQSsW6D24eU1FEdgoFmRMnYJERxuc0,363
|
|
10
|
+
labrun_checks/adapters/snowflake.py,sha256=2b6dodrIv88QOVeB1W8Y9Urx6qPEPuG0SOv77bB8S5k,363
|
|
11
|
+
labrun_checks-0.2.0.dist-info/METADATA,sha256=99M744eEDjnGMHO-gVIu3mD8ZIdTcIkuHER4v79a9cc,292
|
|
12
|
+
labrun_checks-0.2.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
13
|
+
labrun_checks-0.2.0.dist-info/top_level.txt,sha256=o5yNRTbV-f95Wz896gNhVv03C9BXr1TV9T53m7su9G8,14
|
|
14
|
+
labrun_checks-0.2.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
labrun_checks
|