scriptgini 1.3.0__tar.gz → 1.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {scriptgini-1.3.0 → scriptgini-1.4.0}/PKG-INFO +5 -5
  2. {scriptgini-1.3.0 → scriptgini-1.4.0}/README.md +4 -4
  3. scriptgini-1.4.0/app/__init__.py +3 -0
  4. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/celery_app.py +2 -2
  5. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/config.py +29 -0
  6. scriptgini-1.4.0/app/main.py +192 -0
  7. scriptgini-1.4.0/app/routers/analytics.py +198 -0
  8. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/bulk_jobs.py +18 -5
  9. scriptgini-1.4.0/app/routers/reports.py +188 -0
  10. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/scripts.py +19 -11
  11. scriptgini-1.4.0/app/schemas/analytics.py +59 -0
  12. scriptgini-1.4.0/app/schemas/reports.py +39 -0
  13. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/tasks.py +117 -11
  14. {scriptgini-1.3.0 → scriptgini-1.4.0}/pyproject.toml +1 -1
  15. {scriptgini-1.3.0 → scriptgini-1.4.0}/scriptgini.egg-info/PKG-INFO +5 -5
  16. {scriptgini-1.3.0 → scriptgini-1.4.0}/scriptgini.egg-info/SOURCES.txt +4 -1
  17. {scriptgini-1.3.0 → scriptgini-1.4.0}/tests/test_api.py +132 -0
  18. {scriptgini-1.3.0 → scriptgini-1.4.0}/tests/test_coverage.py +63 -15
  19. {scriptgini-1.3.0 → scriptgini-1.4.0}/tests/test_infra_services_coverage.py +119 -2
  20. scriptgini-1.4.0/tests/test_sprint5_reporting_analytics.py +661 -0
  21. scriptgini-1.3.0/app/__init__.py +0 -3
  22. scriptgini-1.3.0/app/main.py +0 -84
  23. scriptgini-1.3.0/app/routers/analytics.py +0 -73
  24. scriptgini-1.3.0/app/schemas/analytics.py +0 -27
  25. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/agents/__init__.py +0 -0
  26. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/agents/prompts.py +0 -0
  27. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/agents/script_gini_agent.py +0 -0
  28. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/cache.py +0 -0
  29. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/database.py +0 -0
  30. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/llm/__init__.py +0 -0
  31. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/llm/provider.py +0 -0
  32. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/__init__.py +0 -0
  33. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/api_key.py +0 -0
  34. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/bulk_job.py +0 -0
  35. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/execution_job.py +0 -0
  36. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/generated_script.py +0 -0
  37. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/membership.py +0 -0
  38. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/organization.py +0 -0
  39. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/project.py +0 -0
  40. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/script_run.py +0 -0
  41. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/test_case.py +0 -0
  42. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/models/user.py +0 -0
  43. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/__init__.py +0 -0
  44. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/api_key.py +0 -0
  45. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/auth.py +0 -0
  46. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/demo.py +0 -0
  47. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/execution.py +0 -0
  48. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/organizations.py +0 -0
  49. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/projects.py +0 -0
  50. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/routers/test_cases.py +0 -0
  51. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/__init__.py +0 -0
  52. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/api_key.py +0 -0
  53. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/auth.py +0 -0
  54. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/bulk_job.py +0 -0
  55. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/execution.py +0 -0
  56. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/generated_script.py +0 -0
  57. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/membership.py +0 -0
  58. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/organization.py +0 -0
  59. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/project.py +0 -0
  60. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/schemas/test_case.py +0 -0
  61. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/services/api_key.py +0 -0
  62. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/services/auth.py +0 -0
  63. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/services/auth_dependencies.py +0 -0
  64. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/services/git_export.py +0 -0
  65. {scriptgini-1.3.0 → scriptgini-1.4.0}/app/services/rbac.py +0 -0
  66. {scriptgini-1.3.0 → scriptgini-1.4.0}/scriptgini.egg-info/dependency_links.txt +0 -0
  67. {scriptgini-1.3.0 → scriptgini-1.4.0}/scriptgini.egg-info/top_level.txt +0 -0
  68. {scriptgini-1.3.0 → scriptgini-1.4.0}/setup.cfg +0 -0
  69. {scriptgini-1.3.0 → scriptgini-1.4.0}/tests/test_auth.py +0 -0
  70. {scriptgini-1.3.0 → scriptgini-1.4.0}/tests/test_sprint2_rbac.py +0 -0
  71. {scriptgini-1.3.0 → scriptgini-1.4.0}/tests/test_sprint3_execution.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scriptgini
3
- Version: 1.3.0
3
+ Version: 1.4.0
4
4
  Summary: Agentic AI system that converts functional test cases into automation test scripts.
5
5
  Author: ScriptGini Team
6
6
  License: Proprietary
@@ -16,7 +16,7 @@ Description-Content-Type: text/markdown
16
16
 
17
17
  > **Enterprise-grade Agentic AI system that converts functional test cases into high-quality, review-ready automation test scripts.**
18
18
 
19
- Current release: v1.3.0 (Sprint 3)
19
+ Current release: v1.3.1 (Sprint 4 hardening increment)
20
20
 
21
21
  ---
22
22
 
@@ -441,9 +441,9 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
441
441
  |--------|-------|--------|--------|
442
442
  | **Sprint 1** | IAM Core | 30-36pts | 🟡 Core delivered (auth hardening pending) |
443
443
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
444
- | **Sprint 3** | Durable Execution | 34-40pts | 🔲 Pending (Redis + Celery/Arq setup) |
445
- | **Sprint 4** | Security & Hardening | 30-36pts | 🔲 Pending (Container sandbox, audit logging) |
446
- | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
444
+ | **Sprint 3** | Durable Execution | 34-40pts | Completed (Redis + Celery queue foundation) |
445
+ | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
446
+ | **Sprint 5** | Reporting & Analytics | 28-34pts | Completed (Reports APIs, trends/flakiness, retention cleanup) |
447
447
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
448
448
 
449
449
  ---
@@ -2,7 +2,7 @@
2
2
 
3
3
  > **Enterprise-grade Agentic AI system that converts functional test cases into high-quality, review-ready automation test scripts.**
4
4
 
5
- Current release: v1.3.0 (Sprint 3)
5
+ Current release: v1.3.1 (Sprint 4 hardening increment)
6
6
 
7
7
  ---
8
8
 
@@ -427,9 +427,9 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
427
427
  |--------|-------|--------|--------|
428
428
  | **Sprint 1** | IAM Core | 30-36pts | 🟡 Core delivered (auth hardening pending) |
429
429
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
430
- | **Sprint 3** | Durable Execution | 34-40pts | 🔲 Pending (Redis + Celery/Arq setup) |
431
- | **Sprint 4** | Security & Hardening | 30-36pts | 🔲 Pending (Container sandbox, audit logging) |
432
- | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
430
+ | **Sprint 3** | Durable Execution | 34-40pts | Completed (Redis + Celery queue foundation) |
431
+ | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
432
+ | **Sprint 5** | Reporting & Analytics | 28-34pts | Completed (Reports APIs, trends/flakiness, retention cleanup) |
433
433
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
434
434
 
435
435
  ---
@@ -0,0 +1,3 @@
1
+ __version__ = "1.4.0"
2
+ __api_version__ = "v1.4.0"
3
+
@@ -16,8 +16,8 @@ celery_app.conf.update(
16
16
  timezone="UTC",
17
17
  enable_utc=True,
18
18
  task_track_started=True,
19
- task_time_limit=30 * 60, # 30 minute hard time limit
20
- task_soft_time_limit=25 * 60, # 25 minute soft time limit
19
+ task_time_limit=settings.CELERY_TASK_HARD_TIMEOUT,
20
+ task_soft_time_limit=settings.CELERY_TASK_TIMEOUT,
21
21
  worker_prefetch_multiplier=4,
22
22
  worker_max_tasks_per_child=1000,
23
23
  )
@@ -8,6 +8,11 @@ class Settings(BaseSettings):
8
8
  # App
9
9
  APP_NAME: str = "ScriptGini"
10
10
  DEBUG: bool = False
11
+ CORS_ALLOWED_ORIGINS: str = "http://localhost:3000,http://127.0.0.1:3000"
12
+ RATE_LIMIT_REQUESTS: int = 120
13
+ RATE_LIMIT_WINDOW_SECONDS: int = 60
14
+ RATE_LIMIT_EXEMPT_PATHS: str = "/health,/docs,/redoc,/openapi.json,/static"
15
+ SECURITY_AUDIT_LOG_ENABLED: bool = True
11
16
 
12
17
  # Database (PostgreSQL)
13
18
  # Format: postgresql://user:password@host:port/dbname
@@ -22,6 +27,7 @@ class Settings(BaseSettings):
22
27
  CELERY_BROKER_URL: str = "redis://localhost:6379/1" # Different DB for task queue
23
28
  CELERY_RESULT_BACKEND: str = "redis://localhost:6379/2" # Different DB for results
24
29
  CELERY_TASK_TIMEOUT: int = 600 # 10 minutes default task timeout
30
+ CELERY_TASK_HARD_TIMEOUT: int = 660
25
31
  CELERY_MAX_RETRIES: int = 3
26
32
  JWT_SECRET_KEY: str = "your-secret-key-change-in-production"
27
33
  JWT_ALGORITHM: str = "HS256"
@@ -34,6 +40,9 @@ class Settings(BaseSettings):
34
40
  SCRIPT_GENERATION_TIMEOUT_SECONDS: int = 180
35
41
  PLAYWRIGHT_RUN_HEADED: bool = True
36
42
  SCRIPT_EXECUTION_TIMEOUT_SECONDS: int = 300
43
+ ARTIFACT_RETENTION_DAYS: int = 30
44
+ EXECUTION_ENV_ALLOWED_KEYS: str = "PATH,SYSTEMROOT,TEMP,TMP,HOME,USERPROFILE,PYTHONPATH,PYTHONHOME,PYTHONIOENCODING"
45
+ EXECUTION_ENV_ALLOWED_PREFIXES: str = "PLAYWRIGHT_"
37
46
  SKIP_REVIEW_FOR_OLLAMA: bool = True
38
47
  USE_LLM_INTENT_ANALYSIS: bool = True
39
48
 
@@ -71,5 +80,25 @@ class Settings(BaseSettings):
71
80
  AWS_REGION_NAME: str = "us-east-1"
72
81
  BEDROCK_MODEL_ID: str = "anthropic.claude-3-5-sonnet-20241022-v2:0"
73
82
 
83
+ @staticmethod
84
+ def _split_csv(value: str) -> list[str]:
85
+ return [entry.strip() for entry in value.split(",") if entry.strip()]
86
+
87
+ @property
88
+ def cors_allowed_origins_list(self) -> list[str]:
89
+ return self._split_csv(self.CORS_ALLOWED_ORIGINS)
90
+
91
+ @property
92
+ def rate_limit_exempt_paths_list(self) -> list[str]:
93
+ return self._split_csv(self.RATE_LIMIT_EXEMPT_PATHS)
94
+
95
+ @property
96
+ def execution_env_allowed_keys_set(self) -> set[str]:
97
+ return set(self._split_csv(self.EXECUTION_ENV_ALLOWED_KEYS))
98
+
99
+ @property
100
+ def execution_env_allowed_prefixes_tuple(self) -> tuple[str, ...]:
101
+ return tuple(self._split_csv(self.EXECUTION_ENV_ALLOWED_PREFIXES))
102
+
74
103
 
75
104
  settings = Settings()
@@ -0,0 +1,192 @@
1
+ import logging
2
+ import threading
3
+ import time
4
+ from contextlib import asynccontextmanager
5
+ from pathlib import Path
6
+ from uuid import uuid4
7
+
8
+ from fastapi import FastAPI, Request
9
+ from fastapi.responses import JSONResponse
10
+ from fastapi.responses import FileResponse
11
+ from fastapi.middleware.cors import CORSMiddleware
12
+ from fastapi.staticfiles import StaticFiles
13
+
14
+ from app import __version__
15
+ from app.config import settings
16
+ from app.llm.provider import get_llm_diagnostics
17
+ from app.routers import (
18
+ analytics,
19
+ api_key,
20
+ auth,
21
+ bulk_jobs,
22
+ demo,
23
+ execution,
24
+ organizations,
25
+ projects,
26
+ reports,
27
+ scripts,
28
+ test_cases,
29
+ )
30
+
31
+ logging.basicConfig(level=logging.DEBUG if settings.DEBUG else logging.INFO)
32
+ logger = logging.getLogger(__name__)
33
+
34
+ static_dir = Path(__file__).resolve().parent / "static"
35
+ _RATE_LIMIT_BUCKETS: dict[str, tuple[int, float]] = {}
36
+ _RATE_LIMIT_LOCK = threading.Lock()
37
+
38
+
39
+ def _client_ip(request: Request) -> str:
40
+ forwarded_for = request.headers.get("x-forwarded-for", "")
41
+ if forwarded_for:
42
+ return forwarded_for.split(",")[0].strip()
43
+ return request.client.host if request.client else "unknown"
44
+
45
+
46
+ def _is_exempt_path(path: str) -> bool:
47
+ for exempt in settings.rate_limit_exempt_paths_list:
48
+ if path == exempt or path.startswith(exempt):
49
+ return True
50
+ return False
51
+
52
+
53
+ def _check_rate_limit(key: str, now: float) -> tuple[bool, int, int]:
54
+ window_seconds = max(1, settings.RATE_LIMIT_WINDOW_SECONDS)
55
+ max_requests = max(1, settings.RATE_LIMIT_REQUESTS)
56
+ window_start = now - window_seconds
57
+ with _RATE_LIMIT_LOCK:
58
+ current_count, reset_at = _RATE_LIMIT_BUCKETS.get(key, (0, now + window_seconds))
59
+ if reset_at <= now:
60
+ current_count = 0
61
+ reset_at = now + window_seconds
62
+ current_count += 1
63
+ _RATE_LIMIT_BUCKETS[key] = (current_count, reset_at)
64
+
65
+ remaining = max(0, max_requests - current_count)
66
+ retry_after = max(0, int(reset_at - now))
67
+ return current_count <= max_requests, remaining, retry_after
68
+
69
+
70
+ def _security_audit(event: str, request: Request, status_code: int, request_id: str) -> None:
71
+ if not settings.SECURITY_AUDIT_LOG_ENABLED:
72
+ return
73
+ logger.info(
74
+ "security_audit event=%s request_id=%s method=%s path=%s status=%s ip=%s user_agent=%s",
75
+ event,
76
+ request_id,
77
+ request.method,
78
+ request.url.path,
79
+ status_code,
80
+ _client_ip(request),
81
+ request.headers.get("user-agent", "unknown"),
82
+ )
83
+
84
+
85
+ @asynccontextmanager
86
+ async def lifespan(_: FastAPI):
87
+ diagnostics = get_llm_diagnostics()
88
+ logger.info(
89
+ "Runtime LLM default: provider=%s model=%s api_key_env=%s api_key_present=%s api_key=%s",
90
+ diagnostics["provider"],
91
+ diagnostics["model"],
92
+ diagnostics["api_key_env"],
93
+ diagnostics["api_key_present"],
94
+ diagnostics["api_key_masked"],
95
+ )
96
+ yield
97
+
98
+ app = FastAPI(
99
+ title=settings.APP_NAME,
100
+ description=(
101
+ "Enterprise-grade Agentic AI system that converts functional test cases "
102
+ "into high-quality automation scripts."
103
+ ),
104
+ version=__version__,
105
+ lifespan=lifespan,
106
+ )
107
+
108
+ app.add_middleware(
109
+ CORSMiddleware,
110
+ allow_origins=settings.cors_allowed_origins_list,
111
+ allow_credentials=True,
112
+ allow_methods=["*"],
113
+ allow_headers=["*"],
114
+ )
115
+
116
+
117
+ @app.middleware("http")
118
+ async def apply_security_controls(request: Request, call_next):
119
+ request_id = request.headers.get("X-Request-ID") or str(uuid4())
120
+ request.state.request_id = request_id
121
+ path = request.url.path
122
+ now = time.time()
123
+
124
+ if not _is_exempt_path(path):
125
+ key = f"{_client_ip(request)}:{path}"
126
+ allowed, remaining, retry_after = _check_rate_limit(key, now)
127
+ if not allowed:
128
+ _security_audit("rate_limit_block", request, 429, request_id)
129
+ return JSONResponse(
130
+ status_code=429,
131
+ content={"detail": "Rate limit exceeded", "request_id": request_id},
132
+ headers={
133
+ "X-Request-ID": request_id,
134
+ "X-RateLimit-Limit": str(max(1, settings.RATE_LIMIT_REQUESTS)),
135
+ "X-RateLimit-Remaining": "0",
136
+ "Retry-After": str(retry_after),
137
+ },
138
+ )
139
+
140
+ response = await call_next(request)
141
+ response.headers["X-Request-ID"] = request_id
142
+
143
+ if not _is_exempt_path(path):
144
+ response.headers["X-RateLimit-Limit"] = str(max(1, settings.RATE_LIMIT_REQUESTS))
145
+ response.headers["X-RateLimit-Remaining"] = str(response.headers.get("X-RateLimit-Remaining", ""))
146
+ if not response.headers["X-RateLimit-Remaining"]:
147
+ key = f"{_client_ip(request)}:{path}"
148
+ with _RATE_LIMIT_LOCK:
149
+ count, _ = _RATE_LIMIT_BUCKETS.get(key, (0, now))
150
+ response.headers["X-RateLimit-Remaining"] = str(max(0, max(1, settings.RATE_LIMIT_REQUESTS) - count))
151
+
152
+ if path.startswith("/api/v1/auth") or path.startswith("/api/v1/execution"):
153
+ if response.status_code >= 400 or request.method in {"POST", "PUT", "PATCH", "DELETE"}:
154
+ _security_audit("security_sensitive_request", request, response.status_code, request_id)
155
+
156
+ return response
157
+
158
+ app.include_router(projects.router, prefix="/api/v1")
159
+ app.include_router(test_cases.router, prefix="/api/v1")
160
+ app.include_router(scripts.router, prefix="/api/v1")
161
+ app.include_router(bulk_jobs.router, prefix="/api/v1")
162
+ app.include_router(analytics.router, prefix="/api/v1")
163
+ app.include_router(analytics.insights_router, prefix="/api/v1")
164
+ app.include_router(demo.router, prefix="/api/v1")
165
+ app.include_router(auth.router, prefix="/api/v1")
166
+ app.include_router(api_key.router, prefix="/api/v1")
167
+ app.include_router(organizations.router, prefix="/api/v1")
168
+ app.include_router(execution.router, prefix="/api/v1")
169
+ app.include_router(reports.router, prefix="/api/v1")
170
+ app.mount("/static", StaticFiles(directory=static_dir), name="static")
171
+
172
+
173
+ @app.get("/api/v1/runtime/llm", tags=["Runtime"])
174
+ def runtime_llm():
175
+ default_diagnostics = get_llm_diagnostics()
176
+ return {
177
+ "default_provider": default_diagnostics["provider"],
178
+ "default_model": default_diagnostics["model"],
179
+ "provider_diagnostics": {
180
+ provider: get_llm_diagnostics(provider) for provider in ["openai", "openrouter", "gemini", "ollama", "bedrock"]
181
+ },
182
+ }
183
+
184
+
185
+ @app.get("/", include_in_schema=False)
186
+ def index():
187
+ return FileResponse(static_dir / "index.html")
188
+
189
+
190
+ @app.get("/health", tags=["Health"])
191
+ def health():
192
+ return {"status": "ok", "app": settings.APP_NAME}
@@ -0,0 +1,198 @@
1
+ from datetime import date, datetime, time, timezone
2
+
3
+ from sqlalchemy import case, func
4
+ from sqlalchemy.orm import Session
5
+ from fastapi import APIRouter, Depends, HTTPException
6
+
7
+ from app.database import get_db
8
+ from app.models.project import Project
9
+ from app.models.script_run import ScriptRun, ScriptRunStatus
10
+ from app.models.test_case import TestCase
11
+ from app.schemas.analytics import (
12
+ FlakinessItemResponse,
13
+ FlakinessResponse,
14
+ RecentFailureResponse,
15
+ RunAnalyticsResponse,
16
+ TrendPointResponse,
17
+ TrendsResponse,
18
+ )
19
+
20
+ router = APIRouter(prefix="/projects/{project_id}/analytics", tags=["Run Analytics"])
21
+ insights_router = APIRouter(prefix="/analytics", tags=["Run Analytics"])
22
+
23
+
24
+ @router.get("/runs", response_model=RunAnalyticsResponse)
25
+ def get_run_analytics(project_id: int, db: Session = Depends(get_db)):
26
+ project = db.query(Project).filter(Project.id == project_id).first()
27
+ if not project:
28
+ raise HTTPException(status_code=404, detail="Project not found")
29
+
30
+ aggregate = (
31
+ db.query(
32
+ func.count(ScriptRun.id),
33
+ func.sum(case((ScriptRun.success.is_(True), 1), else_=0)),
34
+ func.sum(case((ScriptRun.status == ScriptRunStatus.failed, 1), else_=0)),
35
+ func.sum(case((ScriptRun.status == ScriptRunStatus.timed_out, 1), else_=0)),
36
+ func.avg(ScriptRun.duration_seconds),
37
+ )
38
+ .filter(ScriptRun.project_id == project_id)
39
+ .one()
40
+ )
41
+
42
+ total_runs = int(aggregate[0] or 0)
43
+ success_runs = int(aggregate[1] or 0)
44
+ failed_runs = int(aggregate[2] or 0)
45
+ timed_out_runs = int(aggregate[3] or 0)
46
+ avg_duration = float(aggregate[4] or 0.0)
47
+
48
+ failed_items = (
49
+ db.query(ScriptRun, TestCase.title)
50
+ .outerjoin(TestCase, TestCase.id == ScriptRun.test_case_id)
51
+ .filter(ScriptRun.project_id == project_id)
52
+ .filter(ScriptRun.success.is_(False))
53
+ .order_by(ScriptRun.created_at.desc(), ScriptRun.id.desc())
54
+ .limit(10)
55
+ .all()
56
+ )
57
+
58
+ recent_failures = [
59
+ RecentFailureResponse(
60
+ run_id=run.id,
61
+ script_id=run.script_id,
62
+ test_case_id=run.test_case_id,
63
+ test_case_title=tc_title,
64
+ exit_code=run.exit_code,
65
+ duration_seconds=run.duration_seconds,
66
+ stderr_excerpt=(run.stderr or "")[:240],
67
+ created_at=run.created_at,
68
+ )
69
+ for run, tc_title in failed_items
70
+ ]
71
+
72
+ success_rate = round((success_runs / total_runs) * 100, 2) if total_runs else 0.0
73
+
74
+ return RunAnalyticsResponse(
75
+ project_id=project_id,
76
+ total_runs=total_runs,
77
+ success_runs=success_runs,
78
+ failed_runs=failed_runs,
79
+ timed_out_runs=timed_out_runs,
80
+ success_rate=success_rate,
81
+ average_duration_seconds=round(avg_duration, 2),
82
+ recent_failures=recent_failures,
83
+ )
84
+
85
+
86
+ def _resolve_datetime_range(start_date: date | None, end_date: date | None) -> tuple[datetime | None, datetime | None]:
87
+ start_dt = datetime.combine(start_date, time.min, tzinfo=timezone.utc) if start_date else None
88
+ end_dt = datetime.combine(end_date, time.max, tzinfo=timezone.utc) if end_date else None
89
+ return start_dt, end_dt
90
+
91
+
92
+ @insights_router.get("/trends", response_model=TrendsResponse)
93
+ def get_trends(
94
+ project_id: int | None = None,
95
+ start_date: date | None = None,
96
+ end_date: date | None = None,
97
+ db: Session = Depends(get_db),
98
+ ):
99
+ if project_id is not None:
100
+ project = db.query(Project).filter(Project.id == project_id).first()
101
+ if not project:
102
+ raise HTTPException(status_code=404, detail="Project not found")
103
+
104
+ start_dt, end_dt = _resolve_datetime_range(start_date, end_date)
105
+ bucket_expr = func.date(ScriptRun.created_at)
106
+ query = (
107
+ db.query(
108
+ bucket_expr.label("bucket"),
109
+ func.count(ScriptRun.id).label("total_runs"),
110
+ func.sum(case((ScriptRun.success.is_(True), 1), else_=0)).label("success_runs"),
111
+ func.avg(ScriptRun.duration_seconds).label("avg_duration"),
112
+ )
113
+ .group_by(bucket_expr)
114
+ .order_by(bucket_expr.asc())
115
+ )
116
+
117
+ if project_id is not None:
118
+ query = query.filter(ScriptRun.project_id == project_id)
119
+ if start_dt is not None:
120
+ query = query.filter(ScriptRun.created_at >= start_dt)
121
+ if end_dt is not None:
122
+ query = query.filter(ScriptRun.created_at <= end_dt)
123
+
124
+ points: list[TrendPointResponse] = []
125
+ for row in query.all():
126
+ total_runs = int(row.total_runs or 0)
127
+ success_runs = int(row.success_runs or 0)
128
+ success_rate = round((success_runs / total_runs) * 100, 2) if total_runs else 0.0
129
+ points.append(
130
+ TrendPointResponse(
131
+ bucket=str(row.bucket),
132
+ total_runs=total_runs,
133
+ success_rate=success_rate,
134
+ average_duration_seconds=round(float(row.avg_duration or 0.0), 2),
135
+ )
136
+ )
137
+
138
+ return TrendsResponse(project_id=project_id, start_date=start_dt, end_date=end_dt, points=points)
139
+
140
+
141
+ @insights_router.get("/flakiness", response_model=FlakinessResponse)
142
+ def get_flakiness(
143
+ project_id: int | None = None,
144
+ start_date: date | None = None,
145
+ end_date: date | None = None,
146
+ min_runs: int = 3,
147
+ db: Session = Depends(get_db),
148
+ ):
149
+ if project_id is not None:
150
+ project = db.query(Project).filter(Project.id == project_id).first()
151
+ if not project:
152
+ raise HTTPException(status_code=404, detail="Project not found")
153
+
154
+ min_runs = max(1, min_runs)
155
+ start_dt, end_dt = _resolve_datetime_range(start_date, end_date)
156
+
157
+ query = db.query(
158
+ ScriptRun.script_id.label("script_id"),
159
+ ScriptRun.test_case_id.label("test_case_id"),
160
+ func.count(ScriptRun.id).label("total_runs"),
161
+ func.sum(case((ScriptRun.success.is_(False), 1), else_=0)).label("failed_runs"),
162
+ func.max(case((ScriptRun.success.is_(False), ScriptRun.created_at), else_=None)).label("last_failure_at"),
163
+ ).group_by(ScriptRun.script_id, ScriptRun.test_case_id)
164
+
165
+ if project_id is not None:
166
+ query = query.filter(ScriptRun.project_id == project_id)
167
+ if start_dt is not None:
168
+ query = query.filter(ScriptRun.created_at >= start_dt)
169
+ if end_dt is not None:
170
+ query = query.filter(ScriptRun.created_at <= end_dt)
171
+
172
+ rows = query.having(func.count(ScriptRun.id) >= min_runs).all()
173
+ items: list[FlakinessItemResponse] = []
174
+ for row in rows:
175
+ total_runs = int(row.total_runs or 0)
176
+ failed_runs = int(row.failed_runs or 0)
177
+ flakiness_score = round((failed_runs / total_runs) * 100, 2) if total_runs else 0.0
178
+ confidence_score = round(min(1.0, total_runs / 10.0) * 100, 2)
179
+ items.append(
180
+ FlakinessItemResponse(
181
+ script_id=int(row.script_id),
182
+ test_case_id=int(row.test_case_id),
183
+ total_runs=total_runs,
184
+ failed_runs=failed_runs,
185
+ flakiness_score=flakiness_score,
186
+ confidence_score=confidence_score,
187
+ last_failure_at=row.last_failure_at,
188
+ )
189
+ )
190
+
191
+ items.sort(key=lambda item: (-item.flakiness_score, -item.confidence_score, -item.total_runs))
192
+ return FlakinessResponse(
193
+ project_id=project_id,
194
+ start_date=start_dt,
195
+ end_date=end_dt,
196
+ min_runs=min_runs,
197
+ items=items,
198
+ )
@@ -2,7 +2,7 @@ import logging
2
2
  import subprocess
3
3
  import sys
4
4
 
5
- from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, status
5
+ from fastapi import APIRouter, Depends, HTTPException, status
6
6
  from sqlalchemy.orm import Session
7
7
 
8
8
  from app.database import get_db
@@ -13,6 +13,7 @@ from app.models.project import Project
13
13
  from app.models.script_run import ScriptRunStatus
14
14
  from app.models.test_case import TestCase
15
15
  from app.schemas.bulk_job import BulkGenerateRequest, BulkJobResponse, BulkRunRequest
16
+ from app.tasks import process_bulk_execution_job, process_bulk_generation_job
16
17
  from app.routers.scripts import (
17
18
  _create_script_run,
18
19
  _get_project_or_404,
@@ -218,7 +219,6 @@ def _run_bulk_execution(job_id: int, request: BulkRunRequest):
218
219
  def bulk_generate_scripts(
219
220
  project_id: int,
220
221
  payload: BulkGenerateRequest,
221
- background_tasks: BackgroundTasks,
222
222
  db: Session = Depends(get_db),
223
223
  ):
224
224
  _get_project_or_404(project_id, db)
@@ -238,7 +238,14 @@ def bulk_generate_scripts(
238
238
  _refresh_job_counts(job, db)
239
239
  db.commit()
240
240
 
241
- background_tasks.add_task(_run_bulk_generation, job.id, payload)
241
+ try:
242
+ process_bulk_generation_job.delay(job.id, payload.model_dump())
243
+ except Exception as exc:
244
+ logger.exception("Failed to enqueue bulk generation job_id=%s", job.id)
245
+ job.status = BulkJobStatus.failed
246
+ db.commit()
247
+ raise HTTPException(status_code=503, detail="Failed to enqueue bulk generation") from exc
248
+
242
249
  return _serialize_bulk_job(job, db)
243
250
 
244
251
 
@@ -246,7 +253,6 @@ def bulk_generate_scripts(
246
253
  def bulk_run_scripts(
247
254
  project_id: int,
248
255
  payload: BulkRunRequest,
249
- background_tasks: BackgroundTasks,
250
256
  db: Session = Depends(get_db),
251
257
  ):
252
258
  _get_project_or_404(project_id, db)
@@ -266,7 +272,14 @@ def bulk_run_scripts(
266
272
  _refresh_job_counts(job, db)
267
273
  db.commit()
268
274
 
269
- background_tasks.add_task(_run_bulk_execution, job.id, payload)
275
+ try:
276
+ process_bulk_execution_job.delay(job.id, payload.model_dump())
277
+ except Exception as exc:
278
+ logger.exception("Failed to enqueue bulk run job_id=%s", job.id)
279
+ job.status = BulkJobStatus.failed
280
+ db.commit()
281
+ raise HTTPException(status_code=503, detail="Failed to enqueue bulk run") from exc
282
+
270
283
  return _serialize_bulk_job(job, db)
271
284
 
272
285