scriptgini 1.3.1__tar.gz → 1.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {scriptgini-1.3.1 → scriptgini-1.4.0}/PKG-INFO +2 -2
  2. {scriptgini-1.3.1 → scriptgini-1.4.0}/README.md +1 -1
  3. scriptgini-1.4.0/app/__init__.py +3 -0
  4. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/config.py +1 -0
  5. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/main.py +15 -1
  6. scriptgini-1.4.0/app/routers/analytics.py +198 -0
  7. scriptgini-1.4.0/app/routers/reports.py +188 -0
  8. scriptgini-1.4.0/app/schemas/analytics.py +59 -0
  9. scriptgini-1.4.0/app/schemas/reports.py +39 -0
  10. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/tasks.py +44 -7
  11. {scriptgini-1.3.1 → scriptgini-1.4.0}/pyproject.toml +1 -1
  12. {scriptgini-1.3.1 → scriptgini-1.4.0}/scriptgini.egg-info/PKG-INFO +2 -2
  13. {scriptgini-1.3.1 → scriptgini-1.4.0}/scriptgini.egg-info/SOURCES.txt +4 -1
  14. scriptgini-1.4.0/tests/test_sprint5_reporting_analytics.py +661 -0
  15. scriptgini-1.3.1/app/__init__.py +0 -3
  16. scriptgini-1.3.1/app/routers/analytics.py +0 -73
  17. scriptgini-1.3.1/app/schemas/analytics.py +0 -27
  18. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/agents/__init__.py +0 -0
  19. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/agents/prompts.py +0 -0
  20. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/agents/script_gini_agent.py +0 -0
  21. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/cache.py +0 -0
  22. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/celery_app.py +0 -0
  23. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/database.py +0 -0
  24. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/llm/__init__.py +0 -0
  25. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/llm/provider.py +0 -0
  26. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/__init__.py +0 -0
  27. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/api_key.py +0 -0
  28. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/bulk_job.py +0 -0
  29. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/execution_job.py +0 -0
  30. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/generated_script.py +0 -0
  31. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/membership.py +0 -0
  32. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/organization.py +0 -0
  33. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/project.py +0 -0
  34. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/script_run.py +0 -0
  35. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/test_case.py +0 -0
  36. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/models/user.py +0 -0
  37. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/__init__.py +0 -0
  38. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/api_key.py +0 -0
  39. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/auth.py +0 -0
  40. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/bulk_jobs.py +0 -0
  41. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/demo.py +0 -0
  42. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/execution.py +0 -0
  43. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/organizations.py +0 -0
  44. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/projects.py +0 -0
  45. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/scripts.py +0 -0
  46. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/routers/test_cases.py +0 -0
  47. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/__init__.py +0 -0
  48. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/api_key.py +0 -0
  49. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/auth.py +0 -0
  50. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/bulk_job.py +0 -0
  51. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/execution.py +0 -0
  52. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/generated_script.py +0 -0
  53. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/membership.py +0 -0
  54. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/organization.py +0 -0
  55. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/project.py +0 -0
  56. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/schemas/test_case.py +0 -0
  57. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/services/api_key.py +0 -0
  58. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/services/auth.py +0 -0
  59. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/services/auth_dependencies.py +0 -0
  60. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/services/git_export.py +0 -0
  61. {scriptgini-1.3.1 → scriptgini-1.4.0}/app/services/rbac.py +0 -0
  62. {scriptgini-1.3.1 → scriptgini-1.4.0}/scriptgini.egg-info/dependency_links.txt +0 -0
  63. {scriptgini-1.3.1 → scriptgini-1.4.0}/scriptgini.egg-info/top_level.txt +0 -0
  64. {scriptgini-1.3.1 → scriptgini-1.4.0}/setup.cfg +0 -0
  65. {scriptgini-1.3.1 → scriptgini-1.4.0}/tests/test_api.py +0 -0
  66. {scriptgini-1.3.1 → scriptgini-1.4.0}/tests/test_auth.py +0 -0
  67. {scriptgini-1.3.1 → scriptgini-1.4.0}/tests/test_coverage.py +0 -0
  68. {scriptgini-1.3.1 → scriptgini-1.4.0}/tests/test_infra_services_coverage.py +0 -0
  69. {scriptgini-1.3.1 → scriptgini-1.4.0}/tests/test_sprint2_rbac.py +0 -0
  70. {scriptgini-1.3.1 → scriptgini-1.4.0}/tests/test_sprint3_execution.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scriptgini
3
- Version: 1.3.1
3
+ Version: 1.4.0
4
4
  Summary: Agentic AI system that converts functional test cases into automation test scripts.
5
5
  Author: ScriptGini Team
6
6
  License: Proprietary
@@ -443,7 +443,7 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
443
443
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
444
444
  | **Sprint 3** | Durable Execution | 34-40pts | ✅ Completed (Redis + Celery queue foundation) |
445
445
  | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
446
- | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
446
+ | **Sprint 5** | Reporting & Analytics | 28-34pts | Completed (Reports APIs, trends/flakiness, retention cleanup) |
447
447
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
448
448
 
449
449
  ---
@@ -429,7 +429,7 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
429
429
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
430
430
  | **Sprint 3** | Durable Execution | 34-40pts | ✅ Completed (Redis + Celery queue foundation) |
431
431
  | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
432
- | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
432
+ | **Sprint 5** | Reporting & Analytics | 28-34pts | Completed (Reports APIs, trends/flakiness, retention cleanup) |
433
433
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
434
434
 
435
435
  ---
@@ -0,0 +1,3 @@
1
+ __version__ = "1.4.0"
2
+ __api_version__ = "v1.4.0"
3
+
@@ -40,6 +40,7 @@ class Settings(BaseSettings):
40
40
  SCRIPT_GENERATION_TIMEOUT_SECONDS: int = 180
41
41
  PLAYWRIGHT_RUN_HEADED: bool = True
42
42
  SCRIPT_EXECUTION_TIMEOUT_SECONDS: int = 300
43
+ ARTIFACT_RETENTION_DAYS: int = 30
43
44
  EXECUTION_ENV_ALLOWED_KEYS: str = "PATH,SYSTEMROOT,TEMP,TMP,HOME,USERPROFILE,PYTHONPATH,PYTHONHOME,PYTHONIOENCODING"
44
45
  EXECUTION_ENV_ALLOWED_PREFIXES: str = "PLAYWRIGHT_"
45
46
  SKIP_REVIEW_FOR_OLLAMA: bool = True
@@ -14,7 +14,19 @@ from fastapi.staticfiles import StaticFiles
14
14
  from app import __version__
15
15
  from app.config import settings
16
16
  from app.llm.provider import get_llm_diagnostics
17
- from app.routers import projects, test_cases, scripts, bulk_jobs, analytics, demo, auth, api_key, organizations, execution
17
+ from app.routers import (
18
+ analytics,
19
+ api_key,
20
+ auth,
21
+ bulk_jobs,
22
+ demo,
23
+ execution,
24
+ organizations,
25
+ projects,
26
+ reports,
27
+ scripts,
28
+ test_cases,
29
+ )
18
30
 
19
31
  logging.basicConfig(level=logging.DEBUG if settings.DEBUG else logging.INFO)
20
32
  logger = logging.getLogger(__name__)
@@ -148,11 +160,13 @@ app.include_router(test_cases.router, prefix="/api/v1")
148
160
  app.include_router(scripts.router, prefix="/api/v1")
149
161
  app.include_router(bulk_jobs.router, prefix="/api/v1")
150
162
  app.include_router(analytics.router, prefix="/api/v1")
163
+ app.include_router(analytics.insights_router, prefix="/api/v1")
151
164
  app.include_router(demo.router, prefix="/api/v1")
152
165
  app.include_router(auth.router, prefix="/api/v1")
153
166
  app.include_router(api_key.router, prefix="/api/v1")
154
167
  app.include_router(organizations.router, prefix="/api/v1")
155
168
  app.include_router(execution.router, prefix="/api/v1")
169
+ app.include_router(reports.router, prefix="/api/v1")
156
170
  app.mount("/static", StaticFiles(directory=static_dir), name="static")
157
171
 
158
172
 
@@ -0,0 +1,198 @@
1
+ from datetime import date, datetime, time, timezone
2
+
3
+ from sqlalchemy import case, func
4
+ from sqlalchemy.orm import Session
5
+ from fastapi import APIRouter, Depends, HTTPException
6
+
7
+ from app.database import get_db
8
+ from app.models.project import Project
9
+ from app.models.script_run import ScriptRun, ScriptRunStatus
10
+ from app.models.test_case import TestCase
11
+ from app.schemas.analytics import (
12
+ FlakinessItemResponse,
13
+ FlakinessResponse,
14
+ RecentFailureResponse,
15
+ RunAnalyticsResponse,
16
+ TrendPointResponse,
17
+ TrendsResponse,
18
+ )
19
+
20
+ router = APIRouter(prefix="/projects/{project_id}/analytics", tags=["Run Analytics"])
21
+ insights_router = APIRouter(prefix="/analytics", tags=["Run Analytics"])
22
+
23
+
24
+ @router.get("/runs", response_model=RunAnalyticsResponse)
25
+ def get_run_analytics(project_id: int, db: Session = Depends(get_db)):
26
+ project = db.query(Project).filter(Project.id == project_id).first()
27
+ if not project:
28
+ raise HTTPException(status_code=404, detail="Project not found")
29
+
30
+ aggregate = (
31
+ db.query(
32
+ func.count(ScriptRun.id),
33
+ func.sum(case((ScriptRun.success.is_(True), 1), else_=0)),
34
+ func.sum(case((ScriptRun.status == ScriptRunStatus.failed, 1), else_=0)),
35
+ func.sum(case((ScriptRun.status == ScriptRunStatus.timed_out, 1), else_=0)),
36
+ func.avg(ScriptRun.duration_seconds),
37
+ )
38
+ .filter(ScriptRun.project_id == project_id)
39
+ .one()
40
+ )
41
+
42
+ total_runs = int(aggregate[0] or 0)
43
+ success_runs = int(aggregate[1] or 0)
44
+ failed_runs = int(aggregate[2] or 0)
45
+ timed_out_runs = int(aggregate[3] or 0)
46
+ avg_duration = float(aggregate[4] or 0.0)
47
+
48
+ failed_items = (
49
+ db.query(ScriptRun, TestCase.title)
50
+ .outerjoin(TestCase, TestCase.id == ScriptRun.test_case_id)
51
+ .filter(ScriptRun.project_id == project_id)
52
+ .filter(ScriptRun.success.is_(False))
53
+ .order_by(ScriptRun.created_at.desc(), ScriptRun.id.desc())
54
+ .limit(10)
55
+ .all()
56
+ )
57
+
58
+ recent_failures = [
59
+ RecentFailureResponse(
60
+ run_id=run.id,
61
+ script_id=run.script_id,
62
+ test_case_id=run.test_case_id,
63
+ test_case_title=tc_title,
64
+ exit_code=run.exit_code,
65
+ duration_seconds=run.duration_seconds,
66
+ stderr_excerpt=(run.stderr or "")[:240],
67
+ created_at=run.created_at,
68
+ )
69
+ for run, tc_title in failed_items
70
+ ]
71
+
72
+ success_rate = round((success_runs / total_runs) * 100, 2) if total_runs else 0.0
73
+
74
+ return RunAnalyticsResponse(
75
+ project_id=project_id,
76
+ total_runs=total_runs,
77
+ success_runs=success_runs,
78
+ failed_runs=failed_runs,
79
+ timed_out_runs=timed_out_runs,
80
+ success_rate=success_rate,
81
+ average_duration_seconds=round(avg_duration, 2),
82
+ recent_failures=recent_failures,
83
+ )
84
+
85
+
86
+ def _resolve_datetime_range(start_date: date | None, end_date: date | None) -> tuple[datetime | None, datetime | None]:
87
+ start_dt = datetime.combine(start_date, time.min, tzinfo=timezone.utc) if start_date else None
88
+ end_dt = datetime.combine(end_date, time.max, tzinfo=timezone.utc) if end_date else None
89
+ return start_dt, end_dt
90
+
91
+
92
+ @insights_router.get("/trends", response_model=TrendsResponse)
93
+ def get_trends(
94
+ project_id: int | None = None,
95
+ start_date: date | None = None,
96
+ end_date: date | None = None,
97
+ db: Session = Depends(get_db),
98
+ ):
99
+ if project_id is not None:
100
+ project = db.query(Project).filter(Project.id == project_id).first()
101
+ if not project:
102
+ raise HTTPException(status_code=404, detail="Project not found")
103
+
104
+ start_dt, end_dt = _resolve_datetime_range(start_date, end_date)
105
+ bucket_expr = func.date(ScriptRun.created_at)
106
+ query = (
107
+ db.query(
108
+ bucket_expr.label("bucket"),
109
+ func.count(ScriptRun.id).label("total_runs"),
110
+ func.sum(case((ScriptRun.success.is_(True), 1), else_=0)).label("success_runs"),
111
+ func.avg(ScriptRun.duration_seconds).label("avg_duration"),
112
+ )
113
+ .group_by(bucket_expr)
114
+ .order_by(bucket_expr.asc())
115
+ )
116
+
117
+ if project_id is not None:
118
+ query = query.filter(ScriptRun.project_id == project_id)
119
+ if start_dt is not None:
120
+ query = query.filter(ScriptRun.created_at >= start_dt)
121
+ if end_dt is not None:
122
+ query = query.filter(ScriptRun.created_at <= end_dt)
123
+
124
+ points: list[TrendPointResponse] = []
125
+ for row in query.all():
126
+ total_runs = int(row.total_runs or 0)
127
+ success_runs = int(row.success_runs or 0)
128
+ success_rate = round((success_runs / total_runs) * 100, 2) if total_runs else 0.0
129
+ points.append(
130
+ TrendPointResponse(
131
+ bucket=str(row.bucket),
132
+ total_runs=total_runs,
133
+ success_rate=success_rate,
134
+ average_duration_seconds=round(float(row.avg_duration or 0.0), 2),
135
+ )
136
+ )
137
+
138
+ return TrendsResponse(project_id=project_id, start_date=start_dt, end_date=end_dt, points=points)
139
+
140
+
141
+ @insights_router.get("/flakiness", response_model=FlakinessResponse)
142
+ def get_flakiness(
143
+ project_id: int | None = None,
144
+ start_date: date | None = None,
145
+ end_date: date | None = None,
146
+ min_runs: int = 3,
147
+ db: Session = Depends(get_db),
148
+ ):
149
+ if project_id is not None:
150
+ project = db.query(Project).filter(Project.id == project_id).first()
151
+ if not project:
152
+ raise HTTPException(status_code=404, detail="Project not found")
153
+
154
+ min_runs = max(1, min_runs)
155
+ start_dt, end_dt = _resolve_datetime_range(start_date, end_date)
156
+
157
+ query = db.query(
158
+ ScriptRun.script_id.label("script_id"),
159
+ ScriptRun.test_case_id.label("test_case_id"),
160
+ func.count(ScriptRun.id).label("total_runs"),
161
+ func.sum(case((ScriptRun.success.is_(False), 1), else_=0)).label("failed_runs"),
162
+ func.max(case((ScriptRun.success.is_(False), ScriptRun.created_at), else_=None)).label("last_failure_at"),
163
+ ).group_by(ScriptRun.script_id, ScriptRun.test_case_id)
164
+
165
+ if project_id is not None:
166
+ query = query.filter(ScriptRun.project_id == project_id)
167
+ if start_dt is not None:
168
+ query = query.filter(ScriptRun.created_at >= start_dt)
169
+ if end_dt is not None:
170
+ query = query.filter(ScriptRun.created_at <= end_dt)
171
+
172
+ rows = query.having(func.count(ScriptRun.id) >= min_runs).all()
173
+ items: list[FlakinessItemResponse] = []
174
+ for row in rows:
175
+ total_runs = int(row.total_runs or 0)
176
+ failed_runs = int(row.failed_runs or 0)
177
+ flakiness_score = round((failed_runs / total_runs) * 100, 2) if total_runs else 0.0
178
+ confidence_score = round(min(1.0, total_runs / 10.0) * 100, 2)
179
+ items.append(
180
+ FlakinessItemResponse(
181
+ script_id=int(row.script_id),
182
+ test_case_id=int(row.test_case_id),
183
+ total_runs=total_runs,
184
+ failed_runs=failed_runs,
185
+ flakiness_score=flakiness_score,
186
+ confidence_score=confidence_score,
187
+ last_failure_at=row.last_failure_at,
188
+ )
189
+ )
190
+
191
+ items.sort(key=lambda item: (-item.flakiness_score, -item.confidence_score, -item.total_runs))
192
+ return FlakinessResponse(
193
+ project_id=project_id,
194
+ start_date=start_dt,
195
+ end_date=end_dt,
196
+ min_runs=min_runs,
197
+ items=items,
198
+ )
@@ -0,0 +1,188 @@
1
+ import base64
2
+ from datetime import datetime, timezone
3
+ from io import BytesIO
4
+
5
+ from fastapi import APIRouter, Depends, HTTPException
6
+ from fastapi.responses import StreamingResponse
7
+ from sqlalchemy.orm import Session
8
+
9
+ from app.config import settings
10
+ from app.database import get_db
11
+ from app.models.execution_job import ExecutionJob
12
+ from app.schemas.reports import ArtifactItemResponse, ArtifactListResponse, ExecutionLogsResponse, ExecutionReportResponse
13
+ from app.services import rbac as rbac_service
14
+ from app.services.auth_dependencies import require_auth_with_scopes
15
+
16
+ router = APIRouter(prefix="/reports", tags=["Reports"])
17
+
18
+
19
+ def _ensure_project_access(db: Session, project_id: int, user_id: int) -> None:
20
+ rbac_service.ensure_project_exists(db, project_id)
21
+ existing_members = rbac_service.list_project_members(db, project_id)
22
+ if not existing_members:
23
+ return
24
+
25
+ membership = rbac_service.get_user_project_membership(db, project_id, user_id)
26
+ if membership is None or membership.role not in rbac_service.READ_ROLES:
27
+ raise HTTPException(status_code=403, detail="Insufficient project role")
28
+
29
+
30
+ def _get_execution_or_404(execution_id: int, db: Session) -> ExecutionJob:
31
+ job = db.query(ExecutionJob).filter(ExecutionJob.id == execution_id).first()
32
+ if not job:
33
+ raise HTTPException(status_code=404, detail="Execution job not found")
34
+ return job
35
+
36
+
37
+ def _result_payload(job: ExecutionJob) -> dict:
38
+ return job.result_payload if isinstance(job.result_payload, dict) else {}
39
+
40
+
41
+ def _artifact_items(job: ExecutionJob) -> list[ArtifactItemResponse]:
42
+ payload = _result_payload(job)
43
+ raw_artifacts = payload.get("artifacts")
44
+ if not isinstance(raw_artifacts, list):
45
+ return []
46
+
47
+ items: list[ArtifactItemResponse] = []
48
+ for idx, raw in enumerate(raw_artifacts, start=1):
49
+ if not isinstance(raw, dict):
50
+ continue
51
+ artifact_id = str(raw.get("id") or idx)
52
+ filename = str(raw.get("filename") or f"artifact-{artifact_id}.bin")
53
+ artifact_type = str(raw.get("type") or "generic")
54
+ content_type = str(raw.get("content_type") or "application/octet-stream")
55
+ content = raw.get("content")
56
+ size_bytes = len(content.encode("utf-8")) if isinstance(content, str) else int(raw.get("size_bytes") or 0)
57
+ items.append(
58
+ ArtifactItemResponse(
59
+ id=artifact_id,
60
+ type=artifact_type,
61
+ filename=filename,
62
+ content_type=content_type,
63
+ size_bytes=size_bytes,
64
+ created_at=job.completed_at,
65
+ )
66
+ )
67
+ return items
68
+
69
+
70
+ @router.get("/{execution_id}", response_model=ExecutionReportResponse)
71
+ def get_execution_report(
72
+ execution_id: int,
73
+ current_user=Depends(require_auth_with_scopes({"execution:read"})),
74
+ db: Session = Depends(get_db),
75
+ ):
76
+ job = _get_execution_or_404(execution_id, db)
77
+ _ensure_project_access(db, job.project_id, current_user.id)
78
+
79
+ duration_seconds = 0.0
80
+ if job.started_at and job.completed_at:
81
+ duration_seconds = max(0.0, (job.completed_at - job.started_at).total_seconds())
82
+ elif job.started_at:
83
+ now = datetime.now(timezone.utc)
84
+ if job.started_at.tzinfo is None:
85
+ now = now.replace(tzinfo=None)
86
+ duration_seconds = max(0.0, (now - job.started_at).total_seconds())
87
+
88
+ payload = _result_payload(job)
89
+ duration_seconds = float(payload.get("duration_seconds") or duration_seconds)
90
+
91
+ return ExecutionReportResponse(
92
+ execution_id=job.id,
93
+ project_id=job.project_id,
94
+ script_id=job.script_id,
95
+ test_case_id=job.test_case_id,
96
+ status=job.status,
97
+ duration_seconds=duration_seconds,
98
+ started_at=job.started_at,
99
+ completed_at=job.completed_at,
100
+ cancelled_at=job.cancelled_at,
101
+ error_message=job.error_message,
102
+ )
103
+
104
+
105
+ @router.get("/{execution_id}/logs", response_model=ExecutionLogsResponse)
106
+ def get_execution_logs(
107
+ execution_id: int,
108
+ current_user=Depends(require_auth_with_scopes({"execution:read"})),
109
+ db: Session = Depends(get_db),
110
+ ):
111
+ job = _get_execution_or_404(execution_id, db)
112
+ _ensure_project_access(db, job.project_id, current_user.id)
113
+
114
+ payload = _result_payload(job)
115
+ stdout = payload.get("stdout")
116
+ stderr = payload.get("stderr")
117
+
118
+ if not isinstance(stdout, str):
119
+ stdout = ""
120
+ if not isinstance(stderr, str):
121
+ stderr = ""
122
+
123
+ return ExecutionLogsResponse(execution_id=job.id, stdout=stdout, stderr=stderr)
124
+
125
+
126
+ @router.get("/{execution_id}/artifacts", response_model=ArtifactListResponse)
127
+ def list_execution_artifacts(
128
+ execution_id: int,
129
+ current_user=Depends(require_auth_with_scopes({"execution:read"})),
130
+ db: Session = Depends(get_db),
131
+ ):
132
+ job = _get_execution_or_404(execution_id, db)
133
+ _ensure_project_access(db, job.project_id, current_user.id)
134
+
135
+ items = _artifact_items(job)
136
+ return ArtifactListResponse(
137
+ execution_id=job.id,
138
+ retention_days=max(1, settings.ARTIFACT_RETENTION_DAYS),
139
+ artifacts=items,
140
+ )
141
+
142
+
143
+ @router.get("/{execution_id}/download/{artifact_id}")
144
+ def download_execution_artifact(
145
+ execution_id: int,
146
+ artifact_id: str,
147
+ current_user=Depends(require_auth_with_scopes({"execution:read"})),
148
+ db: Session = Depends(get_db),
149
+ ):
150
+ job = _get_execution_or_404(execution_id, db)
151
+ _ensure_project_access(db, job.project_id, current_user.id)
152
+
153
+ payload = _result_payload(job)
154
+ raw_artifacts = payload.get("artifacts")
155
+ if not isinstance(raw_artifacts, list):
156
+ raise HTTPException(status_code=404, detail="Artifact not found")
157
+
158
+ selected: dict | None = None
159
+ for idx, raw in enumerate(raw_artifacts, start=1):
160
+ if not isinstance(raw, dict):
161
+ continue
162
+ current_id = str(raw.get("id") or idx)
163
+ if current_id == artifact_id:
164
+ selected = raw
165
+ break
166
+
167
+ if selected is None:
168
+ raise HTTPException(status_code=404, detail="Artifact not found")
169
+
170
+ content = selected.get("content")
171
+ if not isinstance(content, str):
172
+ raise HTTPException(status_code=404, detail="Artifact content unavailable")
173
+
174
+ encoding = str(selected.get("encoding") or "utf-8").lower()
175
+ if encoding == "base64":
176
+ try:
177
+ raw_bytes = base64.b64decode(content, validate=True)
178
+ except ValueError as exc:
179
+ raise HTTPException(status_code=400, detail="Invalid artifact encoding") from exc
180
+ else:
181
+ raw_bytes = content.encode("utf-8")
182
+
183
+ filename = str(selected.get("filename") or f"artifact-{artifact_id}.bin")
184
+ content_type = str(selected.get("content_type") or "application/octet-stream")
185
+
186
+ response = StreamingResponse(BytesIO(raw_bytes), media_type=content_type)
187
+ response.headers["Content-Disposition"] = f'attachment; filename="{filename}"'
188
+ return response
@@ -0,0 +1,59 @@
1
+ from datetime import datetime
2
+
3
+ from pydantic import BaseModel, ConfigDict
4
+
5
+
6
+ class RecentFailureResponse(BaseModel):
7
+ run_id: int
8
+ script_id: int
9
+ test_case_id: int
10
+ test_case_title: str | None = None
11
+ exit_code: int
12
+ duration_seconds: float
13
+ stderr_excerpt: str
14
+ created_at: datetime
15
+
16
+ model_config = ConfigDict(from_attributes=True)
17
+
18
+
19
+ class RunAnalyticsResponse(BaseModel):
20
+ project_id: int
21
+ total_runs: int
22
+ success_runs: int
23
+ failed_runs: int
24
+ timed_out_runs: int
25
+ success_rate: float
26
+ average_duration_seconds: float
27
+ recent_failures: list[RecentFailureResponse]
28
+
29
+
30
+ class TrendPointResponse(BaseModel):
31
+ bucket: str
32
+ total_runs: int
33
+ success_rate: float
34
+ average_duration_seconds: float
35
+
36
+
37
+ class TrendsResponse(BaseModel):
38
+ project_id: int | None = None
39
+ start_date: datetime | None = None
40
+ end_date: datetime | None = None
41
+ points: list[TrendPointResponse]
42
+
43
+
44
+ class FlakinessItemResponse(BaseModel):
45
+ script_id: int
46
+ test_case_id: int
47
+ total_runs: int
48
+ failed_runs: int
49
+ flakiness_score: float
50
+ confidence_score: float
51
+ last_failure_at: datetime | None = None
52
+
53
+
54
+ class FlakinessResponse(BaseModel):
55
+ project_id: int | None = None
56
+ start_date: datetime | None = None
57
+ end_date: datetime | None = None
58
+ min_runs: int
59
+ items: list[FlakinessItemResponse]
@@ -0,0 +1,39 @@
1
+ from datetime import datetime
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from app.models.execution_job import ExecutionJobStatus
6
+
7
+
8
+ class ExecutionReportResponse(BaseModel):
9
+ execution_id: int
10
+ project_id: int
11
+ script_id: int
12
+ test_case_id: int
13
+ status: ExecutionJobStatus
14
+ duration_seconds: float
15
+ started_at: datetime | None = None
16
+ completed_at: datetime | None = None
17
+ cancelled_at: datetime | None = None
18
+ error_message: str | None = None
19
+
20
+
21
+ class ExecutionLogsResponse(BaseModel):
22
+ execution_id: int
23
+ stdout: str
24
+ stderr: str
25
+
26
+
27
+ class ArtifactItemResponse(BaseModel):
28
+ id: str
29
+ type: str
30
+ filename: str
31
+ content_type: str
32
+ size_bytes: int
33
+ created_at: datetime | None = None
34
+
35
+
36
+ class ArtifactListResponse(BaseModel):
37
+ execution_id: int
38
+ retention_days: int
39
+ artifacts: list[ArtifactItemResponse]
@@ -5,13 +5,14 @@ These tasks are executed by Celery workers and should not block the API.
5
5
  Examples: Script generation, test execution, file processing, etc.
6
6
  """
7
7
 
8
- from datetime import datetime, timezone
8
+ from datetime import datetime, timedelta, timezone
9
9
  import logging
10
10
 
11
11
  from celery import shared_task
12
12
  from app.config import settings
13
13
  from app.database import SessionLocal
14
14
  from app.models.execution_job import ExecutionJob, ExecutionJobStatus, can_transition
15
+ from app.models.script_run import ScriptRun
15
16
 
16
17
  logger = logging.getLogger(__name__)
17
18
 
@@ -245,11 +246,47 @@ def cleanup_old_artifacts(days: int = 30):
245
246
  """
246
247
  db = SessionLocal()
247
248
  try:
248
- logger.info(f"Cleaning up artifacts older than {days} days")
249
-
250
- # TODO: Implement cleanup logic
251
- # Delete old execution records, logs, artifacts beyond retention period
252
-
253
- return {"status": "completed", "removed_count": 0}
249
+ retention_days = max(1, int(days))
250
+ if not hasattr(db, "query"):
251
+ logger.warning("Skipping artifact cleanup because session does not support query operations")
252
+ return {
253
+ "status": "completed",
254
+ "removed_count": 0,
255
+ "removed_script_runs": 0,
256
+ "removed_execution_jobs": 0,
257
+ "retention_days": retention_days,
258
+ }
259
+
260
+ cutoff = datetime.now(timezone.utc) - timedelta(days=retention_days)
261
+ logger.info("Cleaning up artifacts older than %s days", retention_days)
262
+
263
+ removed_script_runs = (
264
+ db.query(ScriptRun)
265
+ .filter(ScriptRun.created_at < cutoff)
266
+ .delete(synchronize_session=False)
267
+ )
268
+ removed_execution_jobs = (
269
+ db.query(ExecutionJob)
270
+ .filter(
271
+ ExecutionJob.updated_at < cutoff,
272
+ ExecutionJob.status.in_(
273
+ [
274
+ ExecutionJobStatus.completed,
275
+ ExecutionJobStatus.failed,
276
+ ExecutionJobStatus.cancelled,
277
+ ]
278
+ ),
279
+ )
280
+ .delete(synchronize_session=False)
281
+ )
282
+ db.commit()
283
+
284
+ return {
285
+ "status": "completed",
286
+ "removed_count": int(removed_script_runs + removed_execution_jobs),
287
+ "removed_script_runs": int(removed_script_runs),
288
+ "removed_execution_jobs": int(removed_execution_jobs),
289
+ "retention_days": retention_days,
290
+ }
254
291
  finally:
255
292
  db.close()
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "scriptgini"
7
- version = "1.3.1"
7
+ version = "1.4.0"
8
8
  description = "Agentic AI system that converts functional test cases into automation test scripts."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.11"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scriptgini
3
- Version: 1.3.1
3
+ Version: 1.4.0
4
4
  Summary: Agentic AI system that converts functional test cases into automation test scripts.
5
5
  Author: ScriptGini Team
6
6
  License: Proprietary
@@ -443,7 +443,7 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
443
443
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
444
444
  | **Sprint 3** | Durable Execution | 34-40pts | ✅ Completed (Redis + Celery queue foundation) |
445
445
  | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
446
- | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
446
+ | **Sprint 5** | Reporting & Analytics | 28-34pts | Completed (Reports APIs, trends/flakiness, retention cleanup) |
447
447
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
448
448
 
449
449
  ---
@@ -32,6 +32,7 @@ app/routers/demo.py
32
32
  app/routers/execution.py
33
33
  app/routers/organizations.py
34
34
  app/routers/projects.py
35
+ app/routers/reports.py
35
36
  app/routers/scripts.py
36
37
  app/routers/test_cases.py
37
38
  app/schemas/__init__.py
@@ -44,6 +45,7 @@ app/schemas/generated_script.py
44
45
  app/schemas/membership.py
45
46
  app/schemas/organization.py
46
47
  app/schemas/project.py
48
+ app/schemas/reports.py
47
49
  app/schemas/test_case.py
48
50
  app/services/api_key.py
49
51
  app/services/auth.py
@@ -59,4 +61,5 @@ tests/test_auth.py
59
61
  tests/test_coverage.py
60
62
  tests/test_infra_services_coverage.py
61
63
  tests/test_sprint2_rbac.py
62
- tests/test_sprint3_execution.py
64
+ tests/test_sprint3_execution.py
65
+ tests/test_sprint5_reporting_analytics.py