pytest-allure-host 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytest_allure_host/__init__.py +6 -0
- pytest_allure_host/__main__.py +4 -0
- pytest_allure_host/cli.py +104 -0
- pytest_allure_host/config.py +141 -0
- pytest_allure_host/plugin.py +127 -0
- pytest_allure_host/publisher.py +716 -0
- pytest_allure_host/utils.py +119 -0
- pytest_allure_host-0.1.1.dist-info/METADATA +305 -0
- pytest_allure_host-0.1.1.dist-info/RECORD +12 -0
- pytest_allure_host-0.1.1.dist-info/WHEEL +4 -0
- pytest_allure_host-0.1.1.dist-info/entry_points.txt +6 -0
- pytest_allure_host-0.1.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,716 @@
|
|
1
|
+
"""Report publishing primitives (generate, upload, atomic latest swap).
|
2
|
+
|
3
|
+
Responsible for:
|
4
|
+
* Generating Allure report (pulling prior history first)
|
5
|
+
* Uploading run report to S3 (run prefix) + atomic promotion to latest/
|
6
|
+
* Writing manifest (runs/index.json) + human HTML index + trend viewer
|
7
|
+
* Retention (max_keep_runs) + directory placeholder objects
|
8
|
+
|
9
|
+
The trend viewer (runs/trend.html) is a small dependency‑free canvas page
|
10
|
+
visualising passed / failed / broken counts across historical runs using
|
11
|
+
Allure's history-trend.json.
|
12
|
+
"""
|
13
|
+
|
14
|
+
from __future__ import annotations
|
15
|
+
|
16
|
+
import json
|
17
|
+
import shutil
|
18
|
+
import subprocess # nosec B404
|
19
|
+
from collections.abc import Iterable
|
20
|
+
from dataclasses import dataclass
|
21
|
+
from pathlib import Path
|
22
|
+
from time import time
|
23
|
+
|
24
|
+
import boto3
|
25
|
+
from botocore.exceptions import ClientError
|
26
|
+
|
27
|
+
from .utils import (
|
28
|
+
PublishConfig,
|
29
|
+
branch_root,
|
30
|
+
cache_control_for_key,
|
31
|
+
compute_dir_size,
|
32
|
+
guess_content_type,
|
33
|
+
merge_manifest,
|
34
|
+
)
|
35
|
+
|
36
|
+
# --------------------------------------------------------------------------------------
|
37
|
+
# Paths helper
|
38
|
+
# --------------------------------------------------------------------------------------
|
39
|
+
|
40
|
+
|
41
|
+
@dataclass
|
42
|
+
class Paths:
|
43
|
+
"""Filesystem layout helper.
|
44
|
+
|
45
|
+
Backwards compatibility: tests (and prior API) may pass explicit
|
46
|
+
'report=' and 'results=' paths. If omitted we derive them from base.
|
47
|
+
"""
|
48
|
+
|
49
|
+
base: Path = Path(".")
|
50
|
+
report: Path | None = None
|
51
|
+
results: Path | None = None
|
52
|
+
|
53
|
+
def __post_init__(self) -> None: # derive defaults if not provided
|
54
|
+
if self.results is None:
|
55
|
+
self.results = self.base / "allure-results"
|
56
|
+
if self.report is None:
|
57
|
+
self.report = self.base / "allure-report"
|
58
|
+
|
59
|
+
|
60
|
+
# --------------------------------------------------------------------------------------
|
61
|
+
# S3 helpers
|
62
|
+
# --------------------------------------------------------------------------------------
|
63
|
+
|
64
|
+
|
65
|
+
def _s3(cfg: PublishConfig): # allow custom endpoint (tests / local)
|
66
|
+
endpoint = getattr(cfg, "s3_endpoint", None)
|
67
|
+
if endpoint:
|
68
|
+
return boto3.client("s3", endpoint_url=endpoint)
|
69
|
+
return boto3.client("s3")
|
70
|
+
|
71
|
+
|
72
|
+
def list_keys(bucket: str, prefix: str, endpoint: str | None = None) -> Iterable[str]:
|
73
|
+
s3 = boto3.client("s3", endpoint_url=endpoint) if endpoint else boto3.client("s3")
|
74
|
+
paginator = s3.get_paginator("list_objects_v2")
|
75
|
+
for page in paginator.paginate(Bucket=bucket, Prefix=prefix):
|
76
|
+
for obj in page.get("Contents", []) or []:
|
77
|
+
key = obj.get("Key")
|
78
|
+
if key:
|
79
|
+
yield key
|
80
|
+
|
81
|
+
|
82
|
+
def delete_prefix(bucket: str, prefix: str, endpoint: str | None = None) -> None:
|
83
|
+
keys = list(list_keys(bucket, prefix, endpoint))
|
84
|
+
if not keys:
|
85
|
+
return
|
86
|
+
s3 = boto3.client("s3", endpoint_url=endpoint) if endpoint else boto3.client("s3")
|
87
|
+
# Batch delete 1000 at a time
|
88
|
+
for i in range(0, len(keys), 1000):
|
89
|
+
batch = keys[i : i + 1000]
|
90
|
+
if not batch:
|
91
|
+
continue
|
92
|
+
s3.delete_objects(
|
93
|
+
Bucket=bucket,
|
94
|
+
Delete={"Objects": [{"Key": k} for k in batch], "Quiet": True},
|
95
|
+
)
|
96
|
+
|
97
|
+
|
98
|
+
# --------------------------------------------------------------------------------------
|
99
|
+
# Report generation & history preservation
|
100
|
+
# --------------------------------------------------------------------------------------
|
101
|
+
|
102
|
+
|
103
|
+
def pull_history(cfg: PublishConfig, paths: Paths) -> None:
|
104
|
+
"""Download previous latest/history/ to seed new history for trends."""
|
105
|
+
s3 = _s3(cfg)
|
106
|
+
root = branch_root(cfg.prefix, cfg.project, cfg.branch)
|
107
|
+
history_prefix = f"{root}/latest/history/"
|
108
|
+
local_history = paths.results / "history"
|
109
|
+
if local_history.exists():
|
110
|
+
shutil.rmtree(local_history)
|
111
|
+
local_history.mkdir(parents=True, exist_ok=True)
|
112
|
+
|
113
|
+
# List objects and download those under history/
|
114
|
+
try:
|
115
|
+
for key in list_keys(cfg.bucket, history_prefix):
|
116
|
+
rel = key[len(history_prefix) :]
|
117
|
+
if not rel: # skip directory placeholder
|
118
|
+
continue
|
119
|
+
dest = local_history / rel
|
120
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
121
|
+
s3.download_file(cfg.bucket, key, str(dest))
|
122
|
+
except ClientError:
|
123
|
+
# best‑effort; history absence is fine
|
124
|
+
pass
|
125
|
+
|
126
|
+
|
127
|
+
def ensure_allure_cli() -> None:
|
128
|
+
"""Ensure the allure binary is discoverable; raise if not."""
|
129
|
+
path = shutil.which("allure")
|
130
|
+
if not path:
|
131
|
+
raise RuntimeError("Allure CLI not found in PATH (install allure-commandline)")
|
132
|
+
|
133
|
+
|
134
|
+
def generate_report(paths: Paths) -> None:
|
135
|
+
if not paths.results.exists() or not any(paths.results.iterdir()):
|
136
|
+
raise RuntimeError("allure-results is missing or empty")
|
137
|
+
if paths.report.exists():
|
138
|
+
shutil.rmtree(paths.report)
|
139
|
+
ensure_allure_cli()
|
140
|
+
allure_path = shutil.which("allure")
|
141
|
+
if not allure_path: # defensive
|
142
|
+
raise RuntimeError("Allure CLI unexpectedly missing")
|
143
|
+
# Validate discovered binary path before executing (Bandit B603 mitigation)
|
144
|
+
exec_path = Path(allure_path).resolve()
|
145
|
+
if not exec_path.is_file() or exec_path.name != "allure": # pragma: no cover
|
146
|
+
raise RuntimeError(f"Unexpected allure executable: {exec_path}")
|
147
|
+
# Safety: allure_path validated above; args are static & derived from
|
148
|
+
# controlled paths (no user-provided injection surface).
|
149
|
+
cmd = [
|
150
|
+
allure_path,
|
151
|
+
"generate",
|
152
|
+
str(paths.results),
|
153
|
+
"--clean",
|
154
|
+
"-o",
|
155
|
+
str(paths.report),
|
156
|
+
]
|
157
|
+
try:
|
158
|
+
# Security justification (S603/B603):
|
159
|
+
# * shell=False (no shell interpolation)
|
160
|
+
# * Executable path resolved & filename checked above
|
161
|
+
# * Arguments are constant literals + vetted filesystem paths
|
162
|
+
# * No user-controlled strings reach the command list
|
163
|
+
# * Capturing output allows safe error surfacing without exposing
|
164
|
+
# uncontrolled stderr directly to logs if later sanitized.
|
165
|
+
subprocess.run( # noqa: S603 # nosec B603 - validated binary
|
166
|
+
cmd,
|
167
|
+
check=True,
|
168
|
+
capture_output=True,
|
169
|
+
text=True,
|
170
|
+
)
|
171
|
+
# Optionally could log completed.stdout at debug level elsewhere.
|
172
|
+
except subprocess.CalledProcessError as e: # pragma: no cover - error path
|
173
|
+
raise RuntimeError(
|
174
|
+
"Allure report generation failed: exit code "
|
175
|
+
f"{e.returncode}\nSTDOUT:\n{(e.stdout or '').strip()}\n"
|
176
|
+
f"STDERR:\n{(e.stderr or '').strip()}"
|
177
|
+
) from e
|
178
|
+
|
179
|
+
|
180
|
+
# --------------------------------------------------------------------------------------
|
181
|
+
# Upload primitives
|
182
|
+
# --------------------------------------------------------------------------------------
|
183
|
+
|
184
|
+
|
185
|
+
def upload_dir(cfg: PublishConfig, root_dir: Path, key_prefix: str) -> None:
|
186
|
+
s3 = _s3(cfg)
|
187
|
+
for p in root_dir.rglob("*"):
|
188
|
+
if not p.is_file():
|
189
|
+
continue
|
190
|
+
rel = p.relative_to(root_dir).as_posix()
|
191
|
+
key = f"{key_prefix}{rel}"
|
192
|
+
extra: dict[str, str] = {"CacheControl": cache_control_for_key(key)}
|
193
|
+
ctype = guess_content_type(p)
|
194
|
+
if ctype:
|
195
|
+
extra["ContentType"] = ctype
|
196
|
+
if cfg.ttl_days is not None:
|
197
|
+
extra["Tagging"] = f"ttl-days={cfg.ttl_days}"
|
198
|
+
s3.upload_file(str(p), cfg.bucket, key, ExtraArgs=extra)
|
199
|
+
|
200
|
+
|
201
|
+
# --------------------------------------------------------------------------------------
|
202
|
+
# Two‑phase latest swap
|
203
|
+
# --------------------------------------------------------------------------------------
|
204
|
+
|
205
|
+
|
206
|
+
def two_phase_update_latest(cfg: PublishConfig, report_dir: Path) -> None:
|
207
|
+
root = branch_root(cfg.prefix, cfg.project, cfg.branch)
|
208
|
+
tmp_prefix = f"{root}/latest_tmp/"
|
209
|
+
latest_prefix = f"{root}/latest/"
|
210
|
+
|
211
|
+
# 1. Upload to tmp
|
212
|
+
upload_dir(cfg, report_dir, tmp_prefix)
|
213
|
+
# 2. Remove existing latest
|
214
|
+
delete_prefix(cfg.bucket, latest_prefix, getattr(cfg, "s3_endpoint", None))
|
215
|
+
# 3. Copy tmp → latest
|
216
|
+
s3 = _s3(cfg)
|
217
|
+
for key in list_keys(
|
218
|
+
cfg.bucket,
|
219
|
+
tmp_prefix,
|
220
|
+
getattr(cfg, "s3_endpoint", None),
|
221
|
+
):
|
222
|
+
rel = key[len(tmp_prefix) :]
|
223
|
+
dest_key = f"{latest_prefix}{rel}"
|
224
|
+
s3.copy({"Bucket": cfg.bucket, "Key": key}, cfg.bucket, dest_key)
|
225
|
+
# 4. Validate & repair index if missing
|
226
|
+
_validate_and_repair_latest(cfg, report_dir, latest_prefix)
|
227
|
+
# 5. Write readiness marker + directory placeholder
|
228
|
+
_write_latest_marker(cfg, latest_prefix)
|
229
|
+
_ensure_directory_placeholder(cfg, report_dir / "index.html", latest_prefix)
|
230
|
+
# 6. Delete tmp
|
231
|
+
delete_prefix(cfg.bucket, tmp_prefix, getattr(cfg, "s3_endpoint", None))
|
232
|
+
|
233
|
+
|
234
|
+
def _validate_and_repair_latest(cfg: PublishConfig, report_dir: Path, latest_prefix: str) -> None:
|
235
|
+
s3 = _s3(cfg)
|
236
|
+
try:
|
237
|
+
s3.head_object(Bucket=cfg.bucket, Key=f"{latest_prefix}index.html")
|
238
|
+
return
|
239
|
+
except ClientError:
|
240
|
+
pass
|
241
|
+
idx = report_dir / "index.html"
|
242
|
+
if not idx.exists():
|
243
|
+
return
|
244
|
+
extra = {
|
245
|
+
"CacheControl": cache_control_for_key(f"{latest_prefix}index.html"),
|
246
|
+
"ContentType": guess_content_type(idx) or "text/html",
|
247
|
+
}
|
248
|
+
if cfg.ttl_days is not None:
|
249
|
+
extra["Tagging"] = f"ttl-days={cfg.ttl_days}"
|
250
|
+
s3.upload_file(
|
251
|
+
str(idx),
|
252
|
+
cfg.bucket,
|
253
|
+
f"{latest_prefix}index.html",
|
254
|
+
ExtraArgs=extra,
|
255
|
+
)
|
256
|
+
|
257
|
+
|
258
|
+
def _write_latest_marker(cfg: PublishConfig, latest_prefix: str) -> None:
|
259
|
+
_s3(cfg).put_object(
|
260
|
+
Bucket=cfg.bucket,
|
261
|
+
Key=f"{latest_prefix}LATEST_READY",
|
262
|
+
Body=b"",
|
263
|
+
CacheControl="no-cache",
|
264
|
+
ContentType="text/plain",
|
265
|
+
)
|
266
|
+
|
267
|
+
|
268
|
+
# --------------------------------------------------------------------------------------
|
269
|
+
# Manifest + HTML index + trend viewer
|
270
|
+
# --------------------------------------------------------------------------------------
|
271
|
+
|
272
|
+
|
273
|
+
def _extract_summary_counts(report_dir: Path) -> dict | None:
|
274
|
+
summary = report_dir / "widgets" / "summary.json"
|
275
|
+
if not summary.exists():
|
276
|
+
return None
|
277
|
+
try:
|
278
|
+
data = json.loads(summary.read_text("utf-8"))
|
279
|
+
except Exception:
|
280
|
+
return None
|
281
|
+
stats = data.get("statistic") or {}
|
282
|
+
if not isinstance(stats, dict): # corrupt
|
283
|
+
return None
|
284
|
+
return {k: stats.get(k) for k in ("passed", "failed", "broken") if k in stats}
|
285
|
+
|
286
|
+
|
287
|
+
def write_manifest(cfg: PublishConfig, paths: Paths) -> None:
|
288
|
+
s3 = _s3(cfg)
|
289
|
+
root = branch_root(cfg.prefix, cfg.project, cfg.branch)
|
290
|
+
manifest_key = f"{root}/runs/index.json"
|
291
|
+
|
292
|
+
existing = None
|
293
|
+
try:
|
294
|
+
body = s3.get_object(Bucket=cfg.bucket, Key=manifest_key)["Body"].read()
|
295
|
+
existing = json.loads(body)
|
296
|
+
except Exception:
|
297
|
+
existing = None
|
298
|
+
|
299
|
+
entry = {
|
300
|
+
"run_id": cfg.run_id,
|
301
|
+
"time": int(time()),
|
302
|
+
"size": compute_dir_size(paths.report),
|
303
|
+
"project": cfg.project,
|
304
|
+
"branch": cfg.branch,
|
305
|
+
}
|
306
|
+
if getattr(cfg, "context_url", None):
|
307
|
+
entry["context_url"] = cfg.context_url
|
308
|
+
counts = _extract_summary_counts(paths.report)
|
309
|
+
if counts:
|
310
|
+
entry.update(counts)
|
311
|
+
manifest = merge_manifest(existing, entry)
|
312
|
+
s3.put_object(
|
313
|
+
Bucket=cfg.bucket,
|
314
|
+
Key=manifest_key,
|
315
|
+
Body=json.dumps(manifest, indent=2).encode("utf-8"),
|
316
|
+
ContentType="application/json",
|
317
|
+
CacheControl="no-cache",
|
318
|
+
)
|
319
|
+
|
320
|
+
latest_payload = {
|
321
|
+
"run_id": cfg.run_id,
|
322
|
+
"run_url": cfg.url_run(),
|
323
|
+
"latest_url": cfg.url_latest(),
|
324
|
+
"project": cfg.project,
|
325
|
+
"branch": cfg.branch,
|
326
|
+
}
|
327
|
+
s3.put_object(
|
328
|
+
Bucket=cfg.bucket,
|
329
|
+
Key=f"{root}/latest.json",
|
330
|
+
Body=json.dumps(latest_payload, indent=2).encode("utf-8"),
|
331
|
+
ContentType="application/json",
|
332
|
+
CacheControl="no-cache",
|
333
|
+
)
|
334
|
+
|
335
|
+
# runs/index.html
|
336
|
+
index_html = _build_runs_index_html(manifest, latest_payload, cfg)
|
337
|
+
s3.put_object(
|
338
|
+
Bucket=cfg.bucket,
|
339
|
+
Key=f"{root}/runs/index.html",
|
340
|
+
Body=index_html,
|
341
|
+
ContentType="text/html; charset=utf-8",
|
342
|
+
CacheControl="no-cache",
|
343
|
+
)
|
344
|
+
|
345
|
+
# runs/trend.html
|
346
|
+
trend_html = _build_trend_viewer_html(cfg)
|
347
|
+
s3.put_object(
|
348
|
+
Bucket=cfg.bucket,
|
349
|
+
Key=f"{root}/runs/trend.html",
|
350
|
+
Body=trend_html,
|
351
|
+
ContentType="text/html; charset=utf-8",
|
352
|
+
CacheControl="no-cache",
|
353
|
+
)
|
354
|
+
|
355
|
+
|
356
|
+
def _format_epoch_utc(epoch: int) -> str:
|
357
|
+
from datetime import datetime, timezone
|
358
|
+
|
359
|
+
try:
|
360
|
+
return datetime.fromtimestamp(epoch, tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
361
|
+
except Exception: # pragma: no cover - defensive
|
362
|
+
return "-"
|
363
|
+
|
364
|
+
|
365
|
+
def _format_bytes(n: int) -> str:
|
366
|
+
step = 1024.0
|
367
|
+
units = ["B", "KB", "MB", "GB", "TB"]
|
368
|
+
v = float(n)
|
369
|
+
for u in units:
|
370
|
+
if v < step:
|
371
|
+
return f"{v:.1f}{u}" if u != "B" else f"{int(v)}B"
|
372
|
+
v /= step
|
373
|
+
return f"{v:.1f}PB"
|
374
|
+
|
375
|
+
|
376
|
+
def _build_runs_index_html(
|
377
|
+
manifest: dict,
|
378
|
+
latest_payload: dict,
|
379
|
+
cfg: PublishConfig,
|
380
|
+
row_cap: int = 500,
|
381
|
+
) -> bytes:
|
382
|
+
runs_list = manifest.get("runs", [])
|
383
|
+
runs_sorted = sorted(runs_list, key=lambda r: r.get("time", 0), reverse=True)
|
384
|
+
rows: list[str] = []
|
385
|
+
for rinfo in runs_sorted[:row_cap]:
|
386
|
+
rid = rinfo.get("run_id", "?")
|
387
|
+
size = rinfo.get("size") or 0
|
388
|
+
t = rinfo.get("time") or 0
|
389
|
+
human_time = _format_epoch_utc(t)
|
390
|
+
pretty_size = _format_bytes(size)
|
391
|
+
passed = rinfo.get("passed")
|
392
|
+
failed = rinfo.get("failed")
|
393
|
+
broken = rinfo.get("broken")
|
394
|
+
if passed is None and failed is None and broken is None:
|
395
|
+
summary = "-"
|
396
|
+
else:
|
397
|
+
summary = f"{passed or 0}/{failed or 0}/{broken or 0}"
|
398
|
+
ctx_url = rinfo.get("context_url")
|
399
|
+
if ctx_url:
|
400
|
+
ctx_cell = f"<a href='{ctx_url}' target='_blank' rel='noopener'>link</a>"
|
401
|
+
else:
|
402
|
+
ctx_cell = "-"
|
403
|
+
rows.append(
|
404
|
+
f"<tr><td><code>{rid}</code></td><td>{t}</td>"
|
405
|
+
f"<td>{human_time}</td><td title='{size}'>{pretty_size}</td>"
|
406
|
+
f"<td>{summary}</td><td>{ctx_cell}</td>"
|
407
|
+
f"<td><a href='../{rid}/'>run</a></td>"
|
408
|
+
"<td><a href='../latest/'>latest</a></td></tr>"
|
409
|
+
)
|
410
|
+
table_rows = "\n".join(rows) if rows else "<tr><td colspan='8'>No runs yet</td></tr>"
|
411
|
+
title = f"Allure Runs: {cfg.project} / {cfg.branch}"
|
412
|
+
nav = (
|
413
|
+
"<nav class='quick-links'><strong>Latest:</strong> "
|
414
|
+
"<a href='../latest/'>root</a>"
|
415
|
+
"<a href='../latest/#/graphs'>graphs</a>"
|
416
|
+
"<a href='../latest/#/timeline'>timeline</a>"
|
417
|
+
"<a href='../latest/history/history-trend.json'>history-json</a>"
|
418
|
+
"<a href='trend.html'>trend-view</a>"
|
419
|
+
"</nav>"
|
420
|
+
)
|
421
|
+
parts: list[str] = [
|
422
|
+
"<!doctype html><html><head><meta charset='utf-8'>",
|
423
|
+
f"<title>{title}</title>",
|
424
|
+
"<style>",
|
425
|
+
"body{font-family:system-ui;margin:1.5rem;}",
|
426
|
+
"table{border-collapse:collapse;width:100%;}",
|
427
|
+
(
|
428
|
+
"th,td{padding:.35rem .55rem;border-bottom:1px solid #ddd;" # noqa: E501
|
429
|
+
"font-size:14px;}"
|
430
|
+
),
|
431
|
+
(
|
432
|
+
"th{text-align:left;background:#f8f8f8;}" # noqa: E501
|
433
|
+
"tr:hover{background:#f5f5f5;}"
|
434
|
+
),
|
435
|
+
"tbody tr:first-child{background:#fffbe6;}",
|
436
|
+
"tbody tr:first-child code::before{content:'★ ';color:#d18f00;}",
|
437
|
+
"code{background:#f2f2f2;padding:2px 4px;border-radius:3px;}",
|
438
|
+
"footer{margin-top:1rem;font-size:12px;color:#666;}",
|
439
|
+
(
|
440
|
+
"a{color:#0366d6;text-decoration:none;}" # noqa: E501
|
441
|
+
"a:hover{text-decoration:underline;}"
|
442
|
+
),
|
443
|
+
"nav.quick-links{margin:.25rem 0 1rem;font-size:14px;}",
|
444
|
+
"nav.quick-links a{margin-right:.65rem;}",
|
445
|
+
"</style></head><body>",
|
446
|
+
f"<h1>{title}</h1>",
|
447
|
+
nav,
|
448
|
+
"<table><thead><tr>",
|
449
|
+
("<th>Run ID</th><th>Epoch</th><th>UTC Time</th><th>Size</th>"),
|
450
|
+
("<th>P/F/B</th><th>Context</th><th>Run</th><th>Latest</th></tr></thead><tbody>"),
|
451
|
+
table_rows,
|
452
|
+
"</tbody></table>",
|
453
|
+
(
|
454
|
+
f"<footer>Updated {latest_payload.get('run_id', '?')} • "
|
455
|
+
f"{cfg.project}/{cfg.branch}</footer>"
|
456
|
+
),
|
457
|
+
"</body></html>",
|
458
|
+
]
|
459
|
+
return "".join(parts).encode("utf-8")
|
460
|
+
|
461
|
+
|
462
|
+
def _build_trend_viewer_html(cfg: PublishConfig) -> bytes:
|
463
|
+
title = f"Run History Trend: {cfg.project} / {cfg.branch}"
|
464
|
+
json_url = "../latest/history/history-trend.json"
|
465
|
+
parts: list[str] = [
|
466
|
+
"<!doctype html><html><head><meta charset='utf-8'>",
|
467
|
+
f"<title>{title}</title>",
|
468
|
+
"<style>",
|
469
|
+
"body{font-family:system-ui;margin:1.25rem;}",
|
470
|
+
"h1{margin-top:0;}",
|
471
|
+
"#meta{font-size:12px;color:#666;margin-bottom:1rem;}",
|
472
|
+
"canvas{max-width:100%;border:1px solid #ddd;background:#fff;}",
|
473
|
+
"a{color:#0366d6;text-decoration:none;}",
|
474
|
+
"a:hover{text-decoration:underline;}",
|
475
|
+
"table{border-collapse:collapse;margin-top:1rem;font-size:12px;}",
|
476
|
+
"th,td{padding:4px 6px;border:1px solid #ccc;}",
|
477
|
+
(
|
478
|
+
".legend-swatch{display:inline-block;width:10px;height:10px;"
|
479
|
+
"margin-right:4px;border-radius:2px;}"
|
480
|
+
),
|
481
|
+
"</style></head><body>",
|
482
|
+
f"<h1>{title}</h1>",
|
483
|
+
(
|
484
|
+
"<div id='meta'>Data source: <code>latest/history/history-"
|
485
|
+
"trend.json</code> · <a href='index.html'>back to runs</a></div>"
|
486
|
+
),
|
487
|
+
"<canvas id='trend' width='900' height='300'></canvas>",
|
488
|
+
"<div id='legend'></div>",
|
489
|
+
(
|
490
|
+
"<table id='raw'><thead><tr><th>Label</th><th>Total</th><th>Passed" # noqa: E501
|
491
|
+
"</th><th>Failed</th><th>Broken</th><th>Skipped</th><th>Unknown" # noqa: E501
|
492
|
+
"</th></tr></thead><tbody></tbody></table>"
|
493
|
+
),
|
494
|
+
"<script>\n(async function(){\n",
|
495
|
+
f" const resp = await fetch('{json_url}');\n",
|
496
|
+
(
|
497
|
+
" if(!resp.ok){document.body.insertAdjacentHTML('beforeend'," # noqa: E501
|
498
|
+
"'<p style=\\'color:red\\'>Failed to fetch trend JSON ('+resp.status+')</p>');return;}\n" # noqa: E501
|
499
|
+
),
|
500
|
+
" const data = await resp.json();\n",
|
501
|
+
(
|
502
|
+
" if(!Array.isArray(data)){document.body.insertAdjacentHTML('beforeend'," # noqa: E501
|
503
|
+
"'<p>No trend data.</p>');return;}\n" # noqa: E501
|
504
|
+
),
|
505
|
+
# Sanitize & enrich: fallback label if reportName/buildOrder missing
|
506
|
+
(
|
507
|
+
" const stats = data\n"
|
508
|
+
" .filter(d=>d&&typeof d==='object')\n"
|
509
|
+
" .map((d,i)=>{\n"
|
510
|
+
" const st = (d.statistic && typeof d.statistic==='object') ?" # noqa: E501
|
511
|
+
" d.statistic : {};\n"
|
512
|
+
" const lbl = d.reportName || d.buildOrder || st.name ||" # noqa: E501
|
513
|
+
" (i+1);\n"
|
514
|
+
" return {label: String(lbl), ...st};\n"
|
515
|
+
" });\n"
|
516
|
+
),
|
517
|
+
(
|
518
|
+
" if(!stats.length){document.body.insertAdjacentHTML('beforeend','<p>No usable trend entries.</p>');return;}\n" # noqa: E501
|
519
|
+
),
|
520
|
+
" const cvs=document.getElementById('trend');\n",
|
521
|
+
" const ctx=cvs.getContext('2d');\n",
|
522
|
+
(
|
523
|
+
" const colors={passed:'#2e7d32',failed:'#d32f2f',broken:'#ff9800'};\n" # noqa: E501
|
524
|
+
),
|
525
|
+
" const keys=['passed','failed','broken'];\n",
|
526
|
+
(
|
527
|
+
" const max=Math.max(1,...stats.map(s=>Math.max(...keys.map(k=>s[k]||0))));\n" # noqa: E501
|
528
|
+
),
|
529
|
+
(
|
530
|
+
" const pad=30;const w=cvs.width-pad*2;const h=cvs.height-pad*2;\n" # noqa: E501
|
531
|
+
),
|
532
|
+
(
|
533
|
+
" ctx.clearRect(0,0,cvs.width,cvs.height);ctx.font='12px system-ui';ctx.strokeStyle='#999';ctx.beginPath();ctx.moveTo(pad,pad);ctx.lineTo(pad,pad+h);ctx.lineTo(pad+w,pad+h);ctx.stroke();\n" # noqa: E501
|
534
|
+
),
|
535
|
+
" const stepX = stats.length>1 ? w/(stats.length-1) : 0;\n",
|
536
|
+
" function y(v){return pad + h - (v/max)*h;}\n",
|
537
|
+
(
|
538
|
+
" keys.forEach(k=>{ctx.beginPath();ctx.strokeStyle=colors[k];stats.forEach((s,i)=>{const x=pad+i*stepX;const yy=y(s[k]||0);if(i===0)ctx.moveTo(x,yy);else ctx.lineTo(x,yy);});ctx.stroke();});\n" # noqa: E501
|
539
|
+
),
|
540
|
+
(
|
541
|
+
" stats.forEach((s,i)=>{const x=pad+i*stepX;keys.forEach(k=>{const v=s[k]||0;const yy=y(v);ctx.fillStyle=colors[k];ctx.beginPath();ctx.arc(x,yy,3,0,Math.PI*2);ctx.fill();});ctx.fillStyle='#222';ctx.fillText(String(s.label), x-10, pad+h+14);});\n" # noqa: E501
|
542
|
+
),
|
543
|
+
(
|
544
|
+
" const legend=document.getElementById('legend');legend.innerHTML=keys.map(k=>`<span class='legend-swatch' style='background:${colors[k]}'></span>${k}`).join(' ');\n" # noqa: E501
|
545
|
+
),
|
546
|
+
(
|
547
|
+
" const tbody=document.querySelector('#raw tbody');tbody.innerHTML=stats.map(s=>`<tr><td>${s.label}</td><td>${s.total||''}</td><td>${s.passed||''}</td><td>${s.failed||''}</td><td>${s.broken||''}</td><td>${s.skipped||''}</td><td>${s.unknown||''}</td></tr>`).join('');\n" # noqa: E501
|
548
|
+
),
|
549
|
+
"})();\n</script>",
|
550
|
+
"</body></html>",
|
551
|
+
]
|
552
|
+
return "".join(parts).encode("utf-8")
|
553
|
+
|
554
|
+
|
555
|
+
# --------------------------------------------------------------------------------------
|
556
|
+
# Retention cleanup & directory placeholder
|
557
|
+
# --------------------------------------------------------------------------------------
|
558
|
+
|
559
|
+
|
560
|
+
def cleanup_old_runs(cfg: PublishConfig, keep: int) -> None:
|
561
|
+
if keep is None or keep <= 0:
|
562
|
+
return
|
563
|
+
s3 = _s3(cfg)
|
564
|
+
root = branch_root(cfg.prefix, cfg.project, cfg.branch)
|
565
|
+
# list immediate children (run prefixes)
|
566
|
+
paginator = s3.get_paginator("list_objects_v2")
|
567
|
+
run_prefixes: list[str] = []
|
568
|
+
for page in paginator.paginate(Bucket=cfg.bucket, Prefix=f"{root}/", Delimiter="/"):
|
569
|
+
for cp in page.get("CommonPrefixes", []) or []:
|
570
|
+
pfx = cp.get("Prefix")
|
571
|
+
if not pfx:
|
572
|
+
continue
|
573
|
+
name = pfx.rsplit("/", 2)[-2]
|
574
|
+
if name in {"latest", "runs"}:
|
575
|
+
continue
|
576
|
+
is_ts = len(name) == 15 and name[8] == "-" and name.replace("-", "").isdigit()
|
577
|
+
if is_ts:
|
578
|
+
run_prefixes.append(pfx)
|
579
|
+
run_prefixes.sort(reverse=True)
|
580
|
+
for old in run_prefixes[keep:]:
|
581
|
+
delete_prefix(cfg.bucket, old, getattr(cfg, "s3_endpoint", None))
|
582
|
+
|
583
|
+
|
584
|
+
def _ensure_directory_placeholder(cfg: PublishConfig, index_file: Path, dir_prefix: str) -> None:
|
585
|
+
if not index_file.exists() or not dir_prefix.endswith("/"):
|
586
|
+
return
|
587
|
+
body = index_file.read_bytes()
|
588
|
+
extra = {"CacheControl": "no-cache", "ContentType": "text/html"}
|
589
|
+
if cfg.ttl_days is not None:
|
590
|
+
extra["Tagging"] = f"ttl-days={cfg.ttl_days}"
|
591
|
+
try:
|
592
|
+
_s3(cfg).put_object(
|
593
|
+
Bucket=cfg.bucket,
|
594
|
+
Key=dir_prefix,
|
595
|
+
Body=body,
|
596
|
+
CacheControl=extra["CacheControl"],
|
597
|
+
ContentType=extra["ContentType"],
|
598
|
+
)
|
599
|
+
except ClientError as e: # pragma: no cover – best effort
|
600
|
+
print(f"Placeholder upload skipped: {e}")
|
601
|
+
|
602
|
+
|
603
|
+
# --------------------------------------------------------------------------------------
|
604
|
+
# Preflight / Dry run / Publish orchestration
|
605
|
+
# --------------------------------------------------------------------------------------
|
606
|
+
|
607
|
+
|
608
|
+
def preflight(
|
609
|
+
cfg: PublishConfig,
|
610
|
+
paths: Paths | None = None,
|
611
|
+
check_allure: bool = True,
|
612
|
+
) -> dict:
|
613
|
+
paths = paths or Paths()
|
614
|
+
results = {
|
615
|
+
"allure_cli": False,
|
616
|
+
"allure_results": False,
|
617
|
+
"s3_bucket": False,
|
618
|
+
}
|
619
|
+
|
620
|
+
if check_allure:
|
621
|
+
try:
|
622
|
+
ensure_allure_cli()
|
623
|
+
results["allure_cli"] = True
|
624
|
+
except Exception:
|
625
|
+
results["allure_cli"] = False
|
626
|
+
else:
|
627
|
+
results["allure_cli"] = True
|
628
|
+
|
629
|
+
try:
|
630
|
+
results_dir = paths.results
|
631
|
+
results["allure_results"] = results_dir.exists() and any(results_dir.iterdir())
|
632
|
+
except OSError:
|
633
|
+
results["allure_results"] = False
|
634
|
+
|
635
|
+
try:
|
636
|
+
s3 = _s3(cfg)
|
637
|
+
s3.head_bucket(Bucket=cfg.bucket)
|
638
|
+
s3.list_objects_v2(Bucket=cfg.bucket, Prefix=cfg.s3_latest_prefix, MaxKeys=1)
|
639
|
+
results["s3_bucket"] = True
|
640
|
+
except ClientError:
|
641
|
+
results["s3_bucket"] = False
|
642
|
+
return results
|
643
|
+
|
644
|
+
|
645
|
+
def plan_dry_run(cfg: PublishConfig, paths: Paths | None = None) -> dict:
|
646
|
+
paths = paths or Paths()
|
647
|
+
samples = []
|
648
|
+
if paths.report.exists():
|
649
|
+
for i, p in enumerate(paths.report.rglob("*")):
|
650
|
+
if i >= 20:
|
651
|
+
break
|
652
|
+
if p.is_file():
|
653
|
+
rel = p.relative_to(paths.report).as_posix()
|
654
|
+
key_run = f"{cfg.s3_run_prefix}{rel}"
|
655
|
+
samples.append(
|
656
|
+
{
|
657
|
+
"file": rel,
|
658
|
+
"run_key": key_run,
|
659
|
+
"cache": cache_control_for_key(key_run),
|
660
|
+
}
|
661
|
+
)
|
662
|
+
else:
|
663
|
+
samples.append({"note": "Report missing; would run allure generate."})
|
664
|
+
root = branch_root(cfg.prefix, cfg.project, cfg.branch)
|
665
|
+
return {
|
666
|
+
"bucket": cfg.bucket,
|
667
|
+
"run_prefix": cfg.s3_run_prefix,
|
668
|
+
"latest_prefix": f"{root}/latest_tmp/",
|
669
|
+
"run_url": cfg.url_run(),
|
670
|
+
"latest_url": cfg.url_latest(),
|
671
|
+
"context_url": getattr(cfg, "context_url", None),
|
672
|
+
"samples": samples,
|
673
|
+
}
|
674
|
+
|
675
|
+
|
676
|
+
def publish(cfg: PublishConfig, paths: Paths | None = None) -> dict:
|
677
|
+
paths = paths or Paths()
|
678
|
+
pull_history(cfg, paths)
|
679
|
+
generate_report(paths)
|
680
|
+
upload_dir(cfg, paths.report, cfg.s3_run_prefix)
|
681
|
+
_ensure_directory_placeholder(cfg, paths.report / "index.html", cfg.s3_run_prefix)
|
682
|
+
two_phase_update_latest(cfg, paths.report)
|
683
|
+
try:
|
684
|
+
write_manifest(cfg, paths)
|
685
|
+
except ClientError as e: # pragma: no cover – non fatal
|
686
|
+
print(f"Manifest write skipped: {e}")
|
687
|
+
try: # retention cleanup
|
688
|
+
if getattr(cfg, "max_keep_runs", None):
|
689
|
+
cleanup_old_runs(cfg, int(cfg.max_keep_runs))
|
690
|
+
except Exception as e: # pragma: no cover
|
691
|
+
print(f"Cleanup skipped: {e}")
|
692
|
+
|
693
|
+
files_count = sum(1 for p in paths.report.rglob("*") if p.is_file())
|
694
|
+
return {
|
695
|
+
"run_url": cfg.url_run(),
|
696
|
+
"latest_url": cfg.url_latest(),
|
697
|
+
"bucket": cfg.bucket,
|
698
|
+
"run_prefix": cfg.s3_run_prefix,
|
699
|
+
"latest_prefix": cfg.s3_latest_prefix,
|
700
|
+
"report_size_bytes": compute_dir_size(paths.report),
|
701
|
+
"report_files": files_count,
|
702
|
+
}
|
703
|
+
|
704
|
+
|
705
|
+
__all__ = [
|
706
|
+
"Paths",
|
707
|
+
"pull_history",
|
708
|
+
"generate_report",
|
709
|
+
"upload_dir",
|
710
|
+
"two_phase_update_latest",
|
711
|
+
"write_manifest",
|
712
|
+
"cleanup_old_runs",
|
713
|
+
"preflight",
|
714
|
+
"plan_dry_run",
|
715
|
+
"publish",
|
716
|
+
]
|