stackport 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- backend/__init__.py +0 -0
- backend/aws_client.py +22 -0
- backend/cache.py +24 -0
- backend/config.py +14 -0
- backend/main.py +53 -0
- backend/routes/__init__.py +0 -0
- backend/routes/resources.py +248 -0
- backend/routes/s3.py +169 -0
- backend/routes/stats.py +158 -0
- stackport-0.1.0.dist-info/METADATA +102 -0
- stackport-0.1.0.dist-info/RECORD +19 -0
- stackport-0.1.0.dist-info/WHEEL +5 -0
- stackport-0.1.0.dist-info/entry_points.txt +2 -0
- stackport-0.1.0.dist-info/licenses/LICENSE +21 -0
- stackport-0.1.0.dist-info/top_level.txt +1 -0
- ui/dist/assets/index-CRqBg0t6.js +360 -0
- ui/dist/assets/index-Is3dIbZM.css +1 -0
- ui/dist/favicon.png +0 -0
- ui/dist/index.html +17 -0
backend/__init__.py
ADDED
|
File without changes
|
backend/aws_client.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
|
|
5
|
+
from backend.config import (
|
|
6
|
+
AWS_ACCESS_KEY_ID,
|
|
7
|
+
AWS_ENDPOINT_URL,
|
|
8
|
+
AWS_REGION,
|
|
9
|
+
AWS_SECRET_ACCESS_KEY,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@functools.lru_cache(maxsize=64)
|
|
14
|
+
def get_client(service_name: str):
|
|
15
|
+
"""Return a boto3 client configured for the target AWS-compatible endpoint."""
|
|
16
|
+
return boto3.client(
|
|
17
|
+
service_name,
|
|
18
|
+
endpoint_url=AWS_ENDPOINT_URL,
|
|
19
|
+
region_name=AWS_REGION,
|
|
20
|
+
aws_access_key_id=AWS_ACCESS_KEY_ID,
|
|
21
|
+
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
|
|
22
|
+
)
|
backend/cache.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
import time
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class TTLCache:
|
|
6
|
+
def __init__(self):
|
|
7
|
+
self._store: dict = {}
|
|
8
|
+
self._lock = threading.Lock()
|
|
9
|
+
|
|
10
|
+
def get(self, key: str):
|
|
11
|
+
with self._lock:
|
|
12
|
+
if key in self._store:
|
|
13
|
+
value, expiry = self._store[key]
|
|
14
|
+
if time.time() < expiry:
|
|
15
|
+
return value
|
|
16
|
+
del self._store[key]
|
|
17
|
+
return None
|
|
18
|
+
|
|
19
|
+
def set(self, key: str, value, ttl: float = 5):
|
|
20
|
+
with self._lock:
|
|
21
|
+
self._store[key] = (value, time.time() + ttl)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
cache = TTLCache()
|
backend/config.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
AWS_ENDPOINT_URL: str = os.environ.get("AWS_ENDPOINT_URL", "http://localhost:4566")
|
|
4
|
+
AWS_REGION: str = os.environ.get("AWS_REGION", "us-east-1")
|
|
5
|
+
AWS_ACCESS_KEY_ID: str = os.environ.get("AWS_ACCESS_KEY_ID", "test")
|
|
6
|
+
AWS_SECRET_ACCESS_KEY: str = os.environ.get("AWS_SECRET_ACCESS_KEY", "test")
|
|
7
|
+
STACKPORT_PORT: int = int(os.environ.get("STACKPORT_PORT", "8080"))
|
|
8
|
+
STACKPORT_SERVICES: str = os.environ.get(
|
|
9
|
+
"STACKPORT_SERVICES",
|
|
10
|
+
"s3,sqs,sns,dynamodb,lambda,iam,logs,ssm,secretsmanager,kinesis,events,ec2,"
|
|
11
|
+
"route53,kms,cloudformation,stepfunctions,rds,ecs,monitoring,ses,acm,wafv2,"
|
|
12
|
+
"ecr,elasticache,glue,athena,apigateway,firehose,cognito-idp,cognito-identity,"
|
|
13
|
+
"elasticmapreduce,elasticloadbalancing,elasticfilesystem,cloudfront,appsync",
|
|
14
|
+
)
|
backend/main.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
|
|
5
|
+
import uvicorn
|
|
6
|
+
from fastapi import FastAPI
|
|
7
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
8
|
+
from fastapi.responses import FileResponse
|
|
9
|
+
from fastapi.staticfiles import StaticFiles
|
|
10
|
+
|
|
11
|
+
from backend.config import STACKPORT_PORT
|
|
12
|
+
from backend.routes import resources, s3, stats
|
|
13
|
+
|
|
14
|
+
logging.basicConfig(
|
|
15
|
+
level=logging.INFO,
|
|
16
|
+
format="%(asctime)s %(name)s %(levelname)s %(message)s",
|
|
17
|
+
)
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
app = FastAPI(title="StackPort", docs_url="/api/docs")
|
|
21
|
+
|
|
22
|
+
app.add_middleware(
|
|
23
|
+
CORSMiddleware,
|
|
24
|
+
allow_origins=["*"],
|
|
25
|
+
allow_methods=["*"],
|
|
26
|
+
allow_headers=["*"],
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
app.include_router(stats.router, prefix="/api")
|
|
30
|
+
app.include_router(s3.router, prefix="/api/s3")
|
|
31
|
+
app.include_router(resources.router, prefix="/api")
|
|
32
|
+
|
|
33
|
+
# Serve UI static files — mount assets under /assets, SPA fallback for everything else
|
|
34
|
+
ui_dist = os.path.join(os.path.dirname(__file__), "..", "ui", "dist")
|
|
35
|
+
if os.path.isdir(ui_dist):
|
|
36
|
+
app.mount("/assets", StaticFiles(directory=os.path.join(ui_dist, "assets")), name="assets")
|
|
37
|
+
|
|
38
|
+
@app.get("/{path:path}")
|
|
39
|
+
def spa_fallback(path: str):
|
|
40
|
+
# Try to serve the file directly
|
|
41
|
+
file_path = os.path.join(ui_dist, path)
|
|
42
|
+
if path and os.path.isfile(file_path):
|
|
43
|
+
return FileResponse(file_path)
|
|
44
|
+
# SPA fallback: return index.html
|
|
45
|
+
return FileResponse(os.path.join(ui_dist, "index.html"))
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def cli():
|
|
49
|
+
uvicorn.run("backend.main:app", host="0.0.0.0", port=STACKPORT_PORT, reload=False)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
if __name__ == "__main__":
|
|
53
|
+
cli()
|
|
File without changes
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from fastapi import APIRouter, HTTPException
|
|
4
|
+
|
|
5
|
+
from backend.aws_client import get_client
|
|
6
|
+
from backend.cache import cache
|
|
7
|
+
from backend.routes.stats import SERVICE_REGISTRY, _METHOD_KWARGS, _count_items
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
router = APIRouter()
|
|
12
|
+
|
|
13
|
+
# Maps (service, resource_type) -> (boto3_service, method, id_param, response_key)
|
|
14
|
+
# response_key=None means return the full response (minus ResponseMetadata)
|
|
15
|
+
DESCRIBE_REGISTRY: dict[tuple[str, str], tuple[str, str, str, str | None]] = {
|
|
16
|
+
# Storage
|
|
17
|
+
("s3", "buckets"): ("s3", "list_objects_v2", "Bucket", "Contents"),
|
|
18
|
+
# Compute
|
|
19
|
+
("lambda", "functions"): ("lambda", "get_function", "FunctionName", None),
|
|
20
|
+
("ecs", "clusters"): ("ecs", "describe_clusters", "clusters", "clusters"),
|
|
21
|
+
("ecs", "task_definitions"): ("ecs", "describe_task_definition", "taskDefinition", "taskDefinition"),
|
|
22
|
+
# Database
|
|
23
|
+
("dynamodb", "tables"): ("dynamodb", "describe_table", "TableName", "Table"),
|
|
24
|
+
("rds", "db_instances"): ("rds", "describe_db_instances", "DBInstanceIdentifier", "DBInstances"),
|
|
25
|
+
("rds", "db_clusters"): ("rds", "describe_db_clusters", "DBClusterIdentifier", "DBClusters"),
|
|
26
|
+
("elasticache", "cache_clusters"): ("elasticache", "describe_cache_clusters", "CacheClusterId", None),
|
|
27
|
+
# Messaging
|
|
28
|
+
("sqs", "queues"): ("sqs", "get_queue_attributes", "QueueUrl", "Attributes"),
|
|
29
|
+
("sns", "topics"): ("sns", "get_topic_attributes", "TopicArn", "Attributes"),
|
|
30
|
+
("kinesis", "streams"): ("kinesis", "describe_stream", "StreamName", "StreamDescription"),
|
|
31
|
+
("firehose", "delivery_streams"): ("firehose", "describe_delivery_stream", "DeliveryStreamName", "DeliveryStreamDescription"),
|
|
32
|
+
("events", "rules"): ("events", "describe_rule", "Name", None),
|
|
33
|
+
("events", "event_buses"): ("events", "describe_event_bus", "Name", None),
|
|
34
|
+
# Security & Identity
|
|
35
|
+
("iam", "roles"): ("iam", "get_role", "RoleName", "Role"),
|
|
36
|
+
("iam", "users"): ("iam", "get_user", "UserName", "User"),
|
|
37
|
+
("iam", "policies"): ("iam", "get_policy", "PolicyArn", "Policy"),
|
|
38
|
+
("secretsmanager", "secrets"): ("secretsmanager", "describe_secret", "SecretId", None),
|
|
39
|
+
("kms", "keys"): ("kms", "describe_key", "KeyId", "KeyMetadata"),
|
|
40
|
+
("acm", "certificates"): ("acm", "describe_certificate", "CertificateArn", "Certificate"),
|
|
41
|
+
("cognito-idp", "user_pools"): ("cognito-idp", "describe_user_pool", "UserPoolId", "UserPool"),
|
|
42
|
+
("cognito-identity", "identity_pools"): ("cognito-identity", "describe_identity_pool", "IdentityPoolId", None),
|
|
43
|
+
# Management
|
|
44
|
+
("logs", "log_groups"): ("logs", "describe_log_groups", "logGroupNamePrefix", "logGroups"),
|
|
45
|
+
("ssm", "parameters"): ("ssm", "get_parameter", "Name", "Parameter"),
|
|
46
|
+
("cloudformation", "stacks"): ("cloudformation", "describe_stacks", "StackName", "Stacks"),
|
|
47
|
+
("stepfunctions", "state_machines"): ("stepfunctions", "describe_state_machine", "stateMachineArn", None),
|
|
48
|
+
("monitoring", "alarms"): ("cloudwatch", "describe_alarms", "AlarmNames", "MetricAlarms"),
|
|
49
|
+
("monitoring", "dashboards"): ("cloudwatch", "get_dashboard", "DashboardName", None),
|
|
50
|
+
# Networking & CDN
|
|
51
|
+
("route53", "hosted_zones"): ("route53", "get_hosted_zone", "Id", "HostedZone"),
|
|
52
|
+
("cloudfront", "distributions"): ("cloudfront", "get_distribution", "Id", "Distribution"),
|
|
53
|
+
("elasticloadbalancing", "load_balancers"): ("elbv2", "describe_load_balancers", "LoadBalancerArns", "LoadBalancers"),
|
|
54
|
+
# EC2
|
|
55
|
+
("ec2", "instances"): ("ec2", "describe_instances", "InstanceIds", "Reservations"),
|
|
56
|
+
("ec2", "vpcs"): ("ec2", "describe_vpcs", "VpcIds", "Vpcs"),
|
|
57
|
+
("ec2", "subnets"): ("ec2", "describe_subnets", "SubnetIds", "Subnets"),
|
|
58
|
+
("ec2", "security_groups"): ("ec2", "describe_security_groups", "GroupIds", "SecurityGroups"),
|
|
59
|
+
("ec2", "volumes"): ("ec2", "describe_volumes", "VolumeIds", "Volumes"),
|
|
60
|
+
("elasticfilesystem", "file_systems"): ("efs", "describe_file_systems", "FileSystemId", "FileSystems"),
|
|
61
|
+
# Containers
|
|
62
|
+
("ecr", "repositories"): ("ecr", "describe_repositories", "repositoryNames", "repositories"),
|
|
63
|
+
# Analytics & ETL
|
|
64
|
+
("glue", "databases"): ("glue", "get_database", "Name", "Database"),
|
|
65
|
+
("glue", "crawlers"): ("glue", "get_crawler", "Name", "Crawler"),
|
|
66
|
+
("athena", "workgroups"): ("athena", "get_work_group", "WorkGroup", "WorkGroup"),
|
|
67
|
+
# API
|
|
68
|
+
("apigateway", "apis"): ("apigatewayv2", "get_api", "ApiId", None),
|
|
69
|
+
("appsync", "graphql_apis"): ("appsync", "get_graphql_api", "apiId", "graphqlApi"),
|
|
70
|
+
# EMR
|
|
71
|
+
("elasticmapreduce", "clusters"): ("emr", "describe_cluster", "ClusterId", "Cluster"),
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Known ID field names for extracting a resource identifier from list results
|
|
75
|
+
_ID_FIELDS = [
|
|
76
|
+
"BucketName",
|
|
77
|
+
"FunctionName",
|
|
78
|
+
"TableName",
|
|
79
|
+
"TopicArn",
|
|
80
|
+
"QueueUrl",
|
|
81
|
+
"RoleName",
|
|
82
|
+
"UserName",
|
|
83
|
+
"PolicyName",
|
|
84
|
+
"Arn",
|
|
85
|
+
"PolicyArn",
|
|
86
|
+
"logGroupName",
|
|
87
|
+
"Name",
|
|
88
|
+
"SecretName",
|
|
89
|
+
"StreamName",
|
|
90
|
+
"RuleName",
|
|
91
|
+
"InstanceId",
|
|
92
|
+
"VpcId",
|
|
93
|
+
"SubnetId",
|
|
94
|
+
"GroupId",
|
|
95
|
+
"VolumeId",
|
|
96
|
+
"HostedZoneId",
|
|
97
|
+
"Id",
|
|
98
|
+
"KeyId",
|
|
99
|
+
"StackName",
|
|
100
|
+
"stateMachineArn",
|
|
101
|
+
"DBInstanceIdentifier",
|
|
102
|
+
"DBClusterIdentifier",
|
|
103
|
+
"clusterArn",
|
|
104
|
+
"CertificateArn",
|
|
105
|
+
"repositoryName",
|
|
106
|
+
"CacheClusterId",
|
|
107
|
+
"DeliveryStreamName",
|
|
108
|
+
"WorkGroupName",
|
|
109
|
+
"ApiId",
|
|
110
|
+
"UserPoolId",
|
|
111
|
+
"IdentityPoolId",
|
|
112
|
+
"LoadBalancerArn",
|
|
113
|
+
"FileSystemId",
|
|
114
|
+
"AlarmName",
|
|
115
|
+
"DashboardName",
|
|
116
|
+
"CrawlerName",
|
|
117
|
+
"DatabaseName",
|
|
118
|
+
"DistributionId",
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _extract_id(item) -> str:
|
|
123
|
+
"""Extract a usable ID from a list API result item."""
|
|
124
|
+
if isinstance(item, str):
|
|
125
|
+
return item
|
|
126
|
+
if isinstance(item, dict):
|
|
127
|
+
for field in _ID_FIELDS:
|
|
128
|
+
if field in item:
|
|
129
|
+
return str(item[field])
|
|
130
|
+
# Fallback: first string value
|
|
131
|
+
for v in item.values():
|
|
132
|
+
if isinstance(v, str):
|
|
133
|
+
return v
|
|
134
|
+
return str(item)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _summarize_item(item) -> dict:
|
|
138
|
+
"""Create a summary dict from a list API result item."""
|
|
139
|
+
if isinstance(item, str):
|
|
140
|
+
return {"id": item}
|
|
141
|
+
if isinstance(item, dict):
|
|
142
|
+
summary = {"id": _extract_id(item)}
|
|
143
|
+
for key, value in item.items():
|
|
144
|
+
if isinstance(value, (str, int, float, bool)) or value is None:
|
|
145
|
+
summary[key] = value
|
|
146
|
+
elif hasattr(value, "isoformat"):
|
|
147
|
+
summary[key] = value.isoformat()
|
|
148
|
+
return summary
|
|
149
|
+
return {"id": str(item)}
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@router.get("/resources/{service}")
|
|
153
|
+
def list_resources(service: str):
|
|
154
|
+
cache_key = f"resources:{service}"
|
|
155
|
+
cached = cache.get(cache_key)
|
|
156
|
+
if cached is not None:
|
|
157
|
+
return cached
|
|
158
|
+
|
|
159
|
+
registry_entries = SERVICE_REGISTRY.get(service)
|
|
160
|
+
if not registry_entries:
|
|
161
|
+
raise HTTPException(status_code=404, detail=f"Unknown service: {service}")
|
|
162
|
+
|
|
163
|
+
resources: dict[str, list[dict]] = {}
|
|
164
|
+
for resource_type, boto3_service, method_name, response_key in registry_entries:
|
|
165
|
+
try:
|
|
166
|
+
client = get_client(boto3_service)
|
|
167
|
+
method = getattr(client, method_name)
|
|
168
|
+
kwargs = _METHOD_KWARGS.get((boto3_service, method_name), {})
|
|
169
|
+
resp = method(**kwargs)
|
|
170
|
+
items = resp.get(response_key, [])
|
|
171
|
+
# Handle nested structures (e.g., cloudfront DistributionList.Items)
|
|
172
|
+
if isinstance(items, dict) and "Items" in items:
|
|
173
|
+
items = items.get("Items", []) or []
|
|
174
|
+
resources[resource_type] = [_summarize_item(item) for item in items]
|
|
175
|
+
except Exception:
|
|
176
|
+
logger.debug("Failed to list %s/%s", service, resource_type, exc_info=True)
|
|
177
|
+
resources[resource_type] = []
|
|
178
|
+
|
|
179
|
+
result = {"service": service, "resources": resources}
|
|
180
|
+
cache.set(cache_key, result, ttl=5)
|
|
181
|
+
return result
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
@router.get("/resources/{service}/{res_type}/{res_id:path}")
|
|
185
|
+
def get_resource_detail(service: str, res_type: str, res_id: str):
|
|
186
|
+
cache_key = f"detail:{service}:{res_type}:{res_id}"
|
|
187
|
+
cached = cache.get(cache_key)
|
|
188
|
+
if cached is not None:
|
|
189
|
+
return cached
|
|
190
|
+
|
|
191
|
+
lookup = DESCRIBE_REGISTRY.get((service, res_type))
|
|
192
|
+
if not lookup:
|
|
193
|
+
raise HTTPException(
|
|
194
|
+
status_code=404,
|
|
195
|
+
detail=f"No detail lookup registered for {service}/{res_type}",
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
boto3_service, method_name, id_param, response_key = lookup
|
|
199
|
+
|
|
200
|
+
# Some APIs take list parameters (e.g., InstanceIds, VpcIds)
|
|
201
|
+
_LIST_PARAMS = {
|
|
202
|
+
"InstanceIds", "VpcIds", "SubnetIds", "GroupIds", "VolumeIds",
|
|
203
|
+
"repositoryNames", "clusters", "AlarmNames", "LoadBalancerArns",
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
client = get_client(boto3_service)
|
|
208
|
+
method = getattr(client, method_name)
|
|
209
|
+
if id_param in _LIST_PARAMS:
|
|
210
|
+
resp = method(**{id_param: [res_id]})
|
|
211
|
+
else:
|
|
212
|
+
resp = method(**{id_param: res_id})
|
|
213
|
+
|
|
214
|
+
# Remove boto3 metadata
|
|
215
|
+
resp.pop("ResponseMetadata", None)
|
|
216
|
+
|
|
217
|
+
if response_key is not None:
|
|
218
|
+
detail = resp.get(response_key, resp)
|
|
219
|
+
else:
|
|
220
|
+
detail = resp
|
|
221
|
+
|
|
222
|
+
# Convert datetime objects for JSON serialization
|
|
223
|
+
detail = _serialize(detail)
|
|
224
|
+
|
|
225
|
+
result = {
|
|
226
|
+
"service": service,
|
|
227
|
+
"type": res_type,
|
|
228
|
+
"id": res_id,
|
|
229
|
+
"detail": detail,
|
|
230
|
+
}
|
|
231
|
+
cache.set(cache_key, result, ttl=5)
|
|
232
|
+
return result
|
|
233
|
+
except Exception as exc:
|
|
234
|
+
logger.warning("Failed to get detail for %s/%s/%s", service, res_type, res_id, exc_info=True)
|
|
235
|
+
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def _serialize(obj):
|
|
239
|
+
"""Recursively convert non-JSON-serializable types."""
|
|
240
|
+
if isinstance(obj, dict):
|
|
241
|
+
return {k: _serialize(v) for k, v in obj.items()}
|
|
242
|
+
if isinstance(obj, list):
|
|
243
|
+
return [_serialize(v) for v in obj]
|
|
244
|
+
if hasattr(obj, "isoformat"):
|
|
245
|
+
return obj.isoformat()
|
|
246
|
+
if isinstance(obj, bytes):
|
|
247
|
+
return obj.decode("utf-8", errors="replace")
|
|
248
|
+
return obj
|
backend/routes/s3.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from fastapi import APIRouter, Query
|
|
4
|
+
from fastapi.responses import StreamingResponse
|
|
5
|
+
|
|
6
|
+
from backend.aws_client import get_client
|
|
7
|
+
from backend.cache import cache
|
|
8
|
+
from backend.config import AWS_REGION
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
router = APIRouter()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _get_bucket_stats(bucket_name: str) -> tuple[int, int]:
|
|
16
|
+
"""Return (object_count, total_size_bytes) for a bucket. Cached 30s."""
|
|
17
|
+
cache_key = f"s3:bucket_stats:{bucket_name}"
|
|
18
|
+
cached = cache.get(cache_key)
|
|
19
|
+
if cached is not None:
|
|
20
|
+
return cached
|
|
21
|
+
|
|
22
|
+
s3 = get_client("s3")
|
|
23
|
+
paginator = s3.get_paginator("list_objects_v2")
|
|
24
|
+
obj_count = 0
|
|
25
|
+
total_size = 0
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
for page in paginator.paginate(Bucket=bucket_name):
|
|
29
|
+
for obj in page.get("Contents", []):
|
|
30
|
+
obj_count += 1
|
|
31
|
+
total_size += obj.get("Size", 0)
|
|
32
|
+
except Exception:
|
|
33
|
+
logger.debug("Failed to get bucket stats for %s", bucket_name, exc_info=True)
|
|
34
|
+
|
|
35
|
+
result = (obj_count, total_size)
|
|
36
|
+
cache.set(cache_key, result, ttl=30)
|
|
37
|
+
return result
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@router.get("/buckets")
|
|
41
|
+
def list_buckets():
|
|
42
|
+
s3 = get_client("s3")
|
|
43
|
+
response = s3.list_buckets()
|
|
44
|
+
buckets = []
|
|
45
|
+
|
|
46
|
+
for b in response.get("Buckets", []):
|
|
47
|
+
name = b["Name"]
|
|
48
|
+
obj_count, total_size = _get_bucket_stats(name)
|
|
49
|
+
|
|
50
|
+
versioning = "Disabled"
|
|
51
|
+
try:
|
|
52
|
+
ver = s3.get_bucket_versioning(Bucket=name)
|
|
53
|
+
versioning = ver.get("Status", "Disabled")
|
|
54
|
+
except Exception:
|
|
55
|
+
logger.debug("Failed to get versioning for %s", name, exc_info=True)
|
|
56
|
+
|
|
57
|
+
encryption = "Disabled"
|
|
58
|
+
try:
|
|
59
|
+
s3.get_bucket_encryption(Bucket=name)
|
|
60
|
+
encryption = "Enabled"
|
|
61
|
+
except Exception:
|
|
62
|
+
logger.debug("Failed to get encryption for %s", name, exc_info=True)
|
|
63
|
+
|
|
64
|
+
tags: dict[str, str] = {}
|
|
65
|
+
try:
|
|
66
|
+
tag_resp = s3.get_bucket_tagging(Bucket=name)
|
|
67
|
+
tags = {t["Key"]: t["Value"] for t in tag_resp.get("TagSet", [])}
|
|
68
|
+
except Exception:
|
|
69
|
+
logger.debug("Failed to get tags for %s", name, exc_info=True)
|
|
70
|
+
|
|
71
|
+
buckets.append(
|
|
72
|
+
{
|
|
73
|
+
"name": name,
|
|
74
|
+
"created": b["CreationDate"].isoformat(),
|
|
75
|
+
"region": AWS_REGION,
|
|
76
|
+
"object_count": obj_count,
|
|
77
|
+
"total_size": total_size,
|
|
78
|
+
"versioning": versioning,
|
|
79
|
+
"encryption": encryption,
|
|
80
|
+
"tags": tags,
|
|
81
|
+
}
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
return {"buckets": buckets}
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@router.get("/buckets/{name}/objects")
|
|
88
|
+
def list_objects(
|
|
89
|
+
name: str,
|
|
90
|
+
prefix: str = Query(default="", description="Key prefix filter"),
|
|
91
|
+
delimiter: str = Query(default="/", description="Hierarchy delimiter"),
|
|
92
|
+
):
|
|
93
|
+
s3 = get_client("s3")
|
|
94
|
+
paginator = s3.get_paginator("list_objects_v2")
|
|
95
|
+
|
|
96
|
+
folders: list[str] = []
|
|
97
|
+
files: list[dict] = []
|
|
98
|
+
|
|
99
|
+
paginate_params: dict = {"Bucket": name, "Prefix": prefix}
|
|
100
|
+
if delimiter:
|
|
101
|
+
paginate_params["Delimiter"] = delimiter
|
|
102
|
+
|
|
103
|
+
for page in paginator.paginate(**paginate_params):
|
|
104
|
+
for cp in page.get("CommonPrefixes", []):
|
|
105
|
+
folders.append(cp["Prefix"])
|
|
106
|
+
for obj in page.get("Contents", []):
|
|
107
|
+
key = obj["Key"]
|
|
108
|
+
if key == prefix:
|
|
109
|
+
continue
|
|
110
|
+
file_name = key[len(prefix) :] if prefix else key
|
|
111
|
+
files.append(
|
|
112
|
+
{
|
|
113
|
+
"key": key,
|
|
114
|
+
"name": file_name,
|
|
115
|
+
"size": obj["Size"],
|
|
116
|
+
"content_type": "application/octet-stream",
|
|
117
|
+
"etag": obj["ETag"].strip('"'),
|
|
118
|
+
"last_modified": obj["LastModified"].isoformat(),
|
|
119
|
+
}
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"bucket": name,
|
|
124
|
+
"prefix": prefix,
|
|
125
|
+
"delimiter": delimiter,
|
|
126
|
+
"folders": folders,
|
|
127
|
+
"files": files,
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
@router.get("/buckets/{name}/objects/{key:path}")
|
|
132
|
+
def get_object_detail(
|
|
133
|
+
name: str,
|
|
134
|
+
key: str,
|
|
135
|
+
download: int = Query(default=0, description="Set to 1 to download the object"),
|
|
136
|
+
):
|
|
137
|
+
s3 = get_client("s3")
|
|
138
|
+
|
|
139
|
+
if download == 1:
|
|
140
|
+
resp = s3.get_object(Bucket=name, Key=key)
|
|
141
|
+
filename = key.rsplit("/", 1)[-1] or key
|
|
142
|
+
return StreamingResponse(
|
|
143
|
+
resp["Body"],
|
|
144
|
+
media_type=resp.get("ContentType", "application/octet-stream"),
|
|
145
|
+
headers={"Content-Disposition": f'attachment; filename="{filename}"'},
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
resp = s3.head_object(Bucket=name, Key=key)
|
|
149
|
+
|
|
150
|
+
tags: dict[str, str] = {}
|
|
151
|
+
try:
|
|
152
|
+
tag_resp = s3.get_object_tagging(Bucket=name, Key=key)
|
|
153
|
+
tags = {t["Key"]: t["Value"] for t in tag_resp.get("TagSet", [])}
|
|
154
|
+
except Exception:
|
|
155
|
+
logger.debug("Failed to get object tags for %s/%s", name, key, exc_info=True)
|
|
156
|
+
|
|
157
|
+
return {
|
|
158
|
+
"bucket": name,
|
|
159
|
+
"key": key,
|
|
160
|
+
"size": resp["ContentLength"],
|
|
161
|
+
"content_type": resp.get("ContentType", "application/octet-stream"),
|
|
162
|
+
"content_encoding": resp.get("ContentEncoding"),
|
|
163
|
+
"etag": resp["ETag"].strip('"'),
|
|
164
|
+
"last_modified": resp["LastModified"].isoformat(),
|
|
165
|
+
"version_id": resp.get("VersionId"),
|
|
166
|
+
"metadata": resp.get("Metadata", {}),
|
|
167
|
+
"preserved_headers": {},
|
|
168
|
+
"tags": tags,
|
|
169
|
+
}
|