scriptgini 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/llm/provider.py ADDED
@@ -0,0 +1,192 @@
1
+ """
2
+ LLM Provider Factory for ScriptGini.
3
+
4
+ Supports: OpenAI, Ollama (local), OpenRouter, Google Gemini, AWS Bedrock.
5
+ Each provider returns a LangChain BaseChatModel so the agent layer is
6
+ provider-agnostic.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import hashlib
11
+ import logging
12
+ from typing import Literal
13
+
14
+ from langchain_core.language_models import BaseChatModel
15
+
16
+ from app.config import settings
17
+
18
+ LLMProvider = Literal["openai", "ollama", "openrouter", "gemini", "bedrock"]
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ def _mask_secret(secret: str) -> str:
23
+ value = (secret or "").strip()
24
+ if not value:
25
+ return "<missing>"
26
+ if len(value) <= 8:
27
+ return "****"
28
+ digest = hashlib.sha256(value.encode("utf-8")).hexdigest()[:8]
29
+ return f"{value[:4]}...{value[-4:]}(len={len(value)},sha256={digest})"
30
+
31
+
32
+ def _provider_secret(provider: LLMProvider) -> tuple[str | None, str]:
33
+ if provider == "openai":
34
+ return "OPENAI_API_KEY", settings.OPENAI_API_KEY
35
+ if provider == "openrouter":
36
+ return "OPENROUTER_API_KEY", settings.OPENROUTER_API_KEY
37
+ if provider == "gemini":
38
+ return "GOOGLE_API_KEY", settings.GOOGLE_API_KEY
39
+ return None, ""
40
+
41
+
42
+ def _default_model_for_provider(provider: LLMProvider) -> str:
43
+ if provider == "openai":
44
+ return settings.OPENAI_MODEL
45
+ if provider == "ollama":
46
+ return settings.OLLAMA_MODEL
47
+ if provider == "openrouter":
48
+ return settings.OPENROUTER_MODEL
49
+ if provider == "gemini":
50
+ return settings.GEMINI_MODEL
51
+ return settings.BEDROCK_MODEL_ID
52
+
53
+
54
+ def get_llm_diagnostics(provider: LLMProvider | None = None, model: str | None = None) -> dict[str, str | bool]:
55
+ resolved_provider: LLMProvider = provider or settings.DEFAULT_LLM_PROVIDER
56
+ resolved_model = model or _default_model_for_provider(resolved_provider)
57
+ secret_env, secret_value = _provider_secret(resolved_provider)
58
+ has_secret = bool(secret_value.strip()) if secret_env else True
59
+ return {
60
+ "provider": resolved_provider,
61
+ "model": resolved_model,
62
+ "api_key_env": secret_env or "<not-required>",
63
+ "api_key_present": has_secret,
64
+ "api_key_masked": _mask_secret(secret_value) if secret_env else "<not-required>",
65
+ }
66
+
67
+
68
+ def get_llm(
69
+ provider: LLMProvider | None = None,
70
+ *,
71
+ model: str | None = None,
72
+ temperature: float = 0.1,
73
+ ) -> BaseChatModel:
74
+ """
75
+ Return a LangChain chat model for the requested provider.
76
+
77
+ Args:
78
+ provider: One of 'openai', 'ollama', 'openrouter', 'gemini', 'bedrock'.
79
+ Defaults to settings.DEFAULT_LLM_PROVIDER.
80
+ model: Override the model name. Falls back to per-provider default.
81
+ temperature: Sampling temperature (low value keeps output deterministic).
82
+ """
83
+ provider = provider or settings.DEFAULT_LLM_PROVIDER
84
+ diagnostics = get_llm_diagnostics(provider, model)
85
+ logger.info(
86
+ "LLM pick: provider=%s model=%s api_key_env=%s api_key_present=%s api_key=%s",
87
+ diagnostics["provider"],
88
+ diagnostics["model"],
89
+ diagnostics["api_key_env"],
90
+ diagnostics["api_key_present"],
91
+ diagnostics["api_key_masked"],
92
+ )
93
+
94
+ if provider == "openai":
95
+ return _openai(model or settings.OPENAI_MODEL, temperature)
96
+ if provider == "ollama":
97
+ return _ollama(model or settings.OLLAMA_MODEL, temperature)
98
+ if provider == "openrouter":
99
+ return _openrouter(model or settings.OPENROUTER_MODEL, temperature)
100
+ if provider == "gemini":
101
+ return _gemini(model or settings.GEMINI_MODEL, temperature)
102
+ if provider == "bedrock":
103
+ return _bedrock(model or settings.BEDROCK_MODEL_ID, temperature)
104
+
105
+ raise ValueError(f"Unknown LLM provider: {provider!r}")
106
+
107
+
108
+ # ---------------------------------------------------------------------------
109
+ # Provider implementations
110
+ # ---------------------------------------------------------------------------
111
+
112
+ def _openai(model: str, temperature: float) -> BaseChatModel:
113
+ from langchain_openai import ChatOpenAI
114
+
115
+ kwargs = {
116
+ "model": model,
117
+ "temperature": temperature,
118
+ "timeout": settings.LLM_REQUEST_TIMEOUT_SECONDS,
119
+ }
120
+ if settings.OPENAI_API_KEY.strip():
121
+ kwargs["api_key"] = settings.OPENAI_API_KEY
122
+
123
+ return ChatOpenAI(
124
+ **kwargs,
125
+ )
126
+
127
+
128
+ def _ollama(model: str, temperature: float) -> BaseChatModel:
129
+ from langchain_ollama import ChatOllama
130
+
131
+ timeout_seconds = settings.OLLAMA_REQUEST_TIMEOUT_SECONDS
132
+ return ChatOllama(
133
+ model=model,
134
+ temperature=temperature,
135
+ base_url=settings.OLLAMA_BASE_URL,
136
+ num_predict=settings.OLLAMA_NUM_PREDICT,
137
+ sync_client_kwargs={"timeout": timeout_seconds},
138
+ async_client_kwargs={"timeout": timeout_seconds},
139
+ )
140
+
141
+
142
+ def _openrouter(model: str, temperature: float) -> BaseChatModel:
143
+ # OpenRouter exposes an OpenAI-compatible API endpoint.
144
+ from langchain_openai import ChatOpenAI
145
+
146
+ kwargs = {
147
+ "model": model,
148
+ "temperature": temperature,
149
+ "base_url": settings.OPENROUTER_BASE_URL,
150
+ "timeout": settings.LLM_REQUEST_TIMEOUT_SECONDS,
151
+ "default_headers": {
152
+ "HTTP-Referer": "https://scriptgini.local",
153
+ "X-Title": "ScriptGini",
154
+ },
155
+ }
156
+ if settings.OPENROUTER_API_KEY.strip():
157
+ kwargs["api_key"] = settings.OPENROUTER_API_KEY
158
+
159
+ return ChatOpenAI(
160
+ **kwargs,
161
+ )
162
+
163
+
164
+ def _gemini(model: str, temperature: float) -> BaseChatModel:
165
+ from langchain_google_genai import ChatGoogleGenerativeAI
166
+
167
+ normalized_model = model.removeprefix("models/")
168
+
169
+ kwargs = {
170
+ "model": normalized_model,
171
+ "temperature": temperature,
172
+ }
173
+ if settings.GOOGLE_API_KEY.strip():
174
+ kwargs["google_api_key"] = settings.GOOGLE_API_KEY
175
+
176
+ return ChatGoogleGenerativeAI(**kwargs)
177
+
178
+
179
+ def _bedrock(model: str, temperature: float) -> BaseChatModel:
180
+ import boto3
181
+ from langchain_aws import ChatBedrock
182
+
183
+ boto_session = boto3.Session(
184
+ aws_access_key_id=settings.AWS_ACCESS_KEY_ID or None,
185
+ aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY or None,
186
+ region_name=settings.AWS_REGION_NAME,
187
+ )
188
+ return ChatBedrock(
189
+ model_id=model,
190
+ client=boto_session.client("bedrock-runtime"),
191
+ model_kwargs={"temperature": temperature},
192
+ )
app/main.py ADDED
@@ -0,0 +1,76 @@
1
+ import logging
2
+ from pathlib import Path
3
+
4
+ from fastapi import FastAPI
5
+ from fastapi.responses import FileResponse
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+ from fastapi.staticfiles import StaticFiles
8
+
9
+ from app.config import settings
10
+ from app.llm.provider import get_llm_diagnostics
11
+ from app.routers import projects, test_cases, scripts, bulk_jobs, analytics, demo
12
+
13
+ logging.basicConfig(level=logging.DEBUG if settings.DEBUG else logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ static_dir = Path(__file__).resolve().parent / "static"
17
+
18
+ app = FastAPI(
19
+ title=settings.APP_NAME,
20
+ description=(
21
+ "Enterprise-grade Agentic AI system that converts functional test cases "
22
+ "into high-quality automation scripts."
23
+ ),
24
+ version="1.0.4",
25
+ )
26
+
27
+ app.add_middleware(
28
+ CORSMiddleware,
29
+ allow_origins=["*"],
30
+ allow_credentials=True,
31
+ allow_methods=["*"],
32
+ allow_headers=["*"],
33
+ )
34
+
35
+ app.include_router(projects.router, prefix="/api/v1")
36
+ app.include_router(test_cases.router, prefix="/api/v1")
37
+ app.include_router(scripts.router, prefix="/api/v1")
38
+ app.include_router(bulk_jobs.router, prefix="/api/v1")
39
+ app.include_router(analytics.router, prefix="/api/v1")
40
+ app.include_router(demo.router, prefix="/api/v1")
41
+ app.mount("/static", StaticFiles(directory=static_dir), name="static")
42
+
43
+
44
+ @app.on_event("startup")
45
+ def log_llm_runtime_config() -> None:
46
+ diagnostics = get_llm_diagnostics()
47
+ logger.info(
48
+ "Runtime LLM default: provider=%s model=%s api_key_env=%s api_key_present=%s api_key=%s",
49
+ diagnostics["provider"],
50
+ diagnostics["model"],
51
+ diagnostics["api_key_env"],
52
+ diagnostics["api_key_present"],
53
+ diagnostics["api_key_masked"],
54
+ )
55
+
56
+
57
+ @app.get("/api/v1/runtime/llm", tags=["Runtime"])
58
+ def runtime_llm():
59
+ default_diagnostics = get_llm_diagnostics()
60
+ return {
61
+ "default_provider": default_diagnostics["provider"],
62
+ "default_model": default_diagnostics["model"],
63
+ "provider_diagnostics": {
64
+ provider: get_llm_diagnostics(provider) for provider in ["openai", "openrouter", "gemini", "ollama", "bedrock"]
65
+ },
66
+ }
67
+
68
+
69
+ @app.get("/", include_in_schema=False)
70
+ def index():
71
+ return FileResponse(static_dir / "index.html")
72
+
73
+
74
+ @app.get("/health", tags=["Health"])
75
+ def health():
76
+ return {"status": "ok", "app": settings.APP_NAME}
app/models/__init__.py ADDED
File without changes
app/models/bulk_job.py ADDED
@@ -0,0 +1,67 @@
1
+ import enum
2
+ from datetime import datetime, timezone
3
+
4
+ from sqlalchemy import DateTime, Enum as SAEnum, ForeignKey, Integer, String, Text
5
+ from sqlalchemy.orm import Mapped, mapped_column
6
+
7
+ from app.database import Base
8
+
9
+
10
+ class BulkJobKind(str, enum.Enum):
11
+ generate = "generate"
12
+ run = "run"
13
+
14
+
15
+ class BulkJobStatus(str, enum.Enum):
16
+ pending = "pending"
17
+ running = "running"
18
+ completed = "completed"
19
+ failed = "failed"
20
+
21
+
22
+ class BulkJobItemStatus(str, enum.Enum):
23
+ pending = "pending"
24
+ running = "running"
25
+ completed = "completed"
26
+ failed = "failed"
27
+ skipped = "skipped"
28
+
29
+
30
+ class BulkJob(Base):
31
+ __tablename__ = "bulk_jobs"
32
+
33
+ id: Mapped[int] = mapped_column(primary_key=True, index=True)
34
+ project_id: Mapped[int] = mapped_column(ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True)
35
+ kind: Mapped[BulkJobKind] = mapped_column(SAEnum(BulkJobKind), nullable=False)
36
+ status: Mapped[BulkJobStatus] = mapped_column(SAEnum(BulkJobStatus), nullable=False, default=BulkJobStatus.pending)
37
+ total_items: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
38
+ completed_items: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
39
+ failed_items: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
40
+ skipped_items: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
41
+ created_at: Mapped[datetime] = mapped_column(
42
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
43
+ )
44
+ updated_at: Mapped[datetime] = mapped_column(
45
+ DateTime(timezone=True),
46
+ default=lambda: datetime.now(timezone.utc),
47
+ onupdate=lambda: datetime.now(timezone.utc),
48
+ )
49
+
50
+
51
+ class BulkJobItem(Base):
52
+ __tablename__ = "bulk_job_items"
53
+
54
+ id: Mapped[int] = mapped_column(primary_key=True, index=True)
55
+ job_id: Mapped[int] = mapped_column(ForeignKey("bulk_jobs.id", ondelete="CASCADE"), nullable=False, index=True)
56
+ test_case_id: Mapped[int] = mapped_column(ForeignKey("test_cases.id", ondelete="CASCADE"), nullable=False, index=True)
57
+ script_id: Mapped[int | None] = mapped_column(ForeignKey("generated_scripts.id", ondelete="SET NULL"), nullable=True, index=True)
58
+ status: Mapped[BulkJobItemStatus] = mapped_column(SAEnum(BulkJobItemStatus), nullable=False, default=BulkJobItemStatus.pending)
59
+ message: Mapped[str | None] = mapped_column(Text, nullable=True)
60
+ created_at: Mapped[datetime] = mapped_column(
61
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
62
+ )
63
+ updated_at: Mapped[datetime] = mapped_column(
64
+ DateTime(timezone=True),
65
+ default=lambda: datetime.now(timezone.utc),
66
+ onupdate=lambda: datetime.now(timezone.utc),
67
+ )
@@ -0,0 +1,39 @@
1
+ import enum
2
+ from datetime import datetime, timezone
3
+
4
+ from sqlalchemy import String, Text, DateTime, ForeignKey, Enum as SAEnum, Integer
5
+ from sqlalchemy.orm import Mapped, mapped_column
6
+
7
+ from app.database import Base
8
+
9
+
10
+ class ScriptStatus(str, enum.Enum):
11
+ pending = "pending"
12
+ generating = "generating"
13
+ completed = "completed"
14
+ failed = "failed"
15
+
16
+
17
+ class GeneratedScript(Base):
18
+ __tablename__ = "generated_scripts"
19
+
20
+ id: Mapped[int] = mapped_column(primary_key=True, index=True)
21
+ project_id: Mapped[int] = mapped_column(ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True)
22
+ test_case_id: Mapped[int] = mapped_column(ForeignKey("test_cases.id", ondelete="CASCADE"), nullable=False, index=True)
23
+ framework: Mapped[str] = mapped_column(String(100), nullable=False)
24
+ llm_provider: Mapped[str] = mapped_column(String(100), nullable=False)
25
+ llm_model: Mapped[str] = mapped_column(String(255), nullable=False)
26
+ script_content: Mapped[str | None] = mapped_column(Text, nullable=True)
27
+ status: Mapped[ScriptStatus] = mapped_column(
28
+ SAEnum(ScriptStatus), default=ScriptStatus.pending, nullable=False
29
+ )
30
+ error_message: Mapped[str | None] = mapped_column(Text, nullable=True)
31
+ token_usage: Mapped[int | None] = mapped_column(Integer, nullable=True)
32
+ created_at: Mapped[datetime] = mapped_column(
33
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
34
+ )
35
+ updated_at: Mapped[datetime] = mapped_column(
36
+ DateTime(timezone=True),
37
+ default=lambda: datetime.now(timezone.utc),
38
+ onupdate=lambda: datetime.now(timezone.utc),
39
+ )
app/models/project.py ADDED
@@ -0,0 +1,46 @@
1
+ import enum
2
+ from datetime import datetime, timezone
3
+
4
+ from sqlalchemy import String, Text, DateTime, Enum as SAEnum
5
+ from sqlalchemy.orm import Mapped, mapped_column
6
+
7
+ from app.database import Base
8
+
9
+
10
+ class TestFramework(str, enum.Enum):
11
+ playwright_python = "playwright_python"
12
+ selenium_python = "selenium_python"
13
+ uft_vbscript = "uft_vbscript"
14
+ cypress_js = "cypress_js"
15
+
16
+
17
+ class SelectorPreference(str, enum.Enum):
18
+ role = "role"
19
+ label = "label"
20
+ testid = "testid"
21
+ css = "css"
22
+ xpath = "xpath"
23
+
24
+
25
+ class Project(Base):
26
+ __tablename__ = "projects"
27
+
28
+ id: Mapped[int] = mapped_column(primary_key=True, index=True)
29
+ name: Mapped[str] = mapped_column(String(255), nullable=False)
30
+ description: Mapped[str | None] = mapped_column(Text, nullable=True)
31
+ aut_base_url: Mapped[str] = mapped_column(String(2048), nullable=False)
32
+ default_framework: Mapped[TestFramework] = mapped_column(
33
+ SAEnum(TestFramework), default=TestFramework.playwright_python, nullable=False
34
+ )
35
+ selector_preference: Mapped[SelectorPreference] = mapped_column(
36
+ SAEnum(SelectorPreference), default=SelectorPreference.role, nullable=False
37
+ )
38
+ auth_hints: Mapped[str | None] = mapped_column(Text, nullable=True)
39
+ created_at: Mapped[datetime] = mapped_column(
40
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
41
+ )
42
+ updated_at: Mapped[datetime] = mapped_column(
43
+ DateTime(timezone=True),
44
+ default=lambda: datetime.now(timezone.utc),
45
+ onupdate=lambda: datetime.now(timezone.utc),
46
+ )
@@ -0,0 +1,32 @@
1
+ import enum
2
+ from datetime import datetime, timezone
3
+
4
+ from sqlalchemy import Boolean, DateTime, Enum as SAEnum, Float, ForeignKey, Integer, String, Text
5
+ from sqlalchemy.orm import Mapped, mapped_column
6
+
7
+ from app.database import Base
8
+
9
+
10
+ class ScriptRunStatus(str, enum.Enum):
11
+ completed = "completed"
12
+ failed = "failed"
13
+ timed_out = "timed_out"
14
+
15
+
16
+ class ScriptRun(Base):
17
+ __tablename__ = "script_runs"
18
+
19
+ id: Mapped[int] = mapped_column(primary_key=True, index=True)
20
+ script_id: Mapped[int] = mapped_column(ForeignKey("generated_scripts.id", ondelete="CASCADE"), nullable=False, index=True)
21
+ project_id: Mapped[int] = mapped_column(ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True)
22
+ test_case_id: Mapped[int] = mapped_column(ForeignKey("test_cases.id", ondelete="CASCADE"), nullable=False, index=True)
23
+ status: Mapped[ScriptRunStatus] = mapped_column(SAEnum(ScriptRunStatus), nullable=False)
24
+ success: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)
25
+ exit_code: Mapped[int] = mapped_column(Integer, nullable=False)
26
+ stdout: Mapped[str] = mapped_column(Text, nullable=False, default="")
27
+ stderr: Mapped[str] = mapped_column(Text, nullable=False, default="")
28
+ duration_seconds: Mapped[float] = mapped_column(Float, nullable=False)
29
+ command: Mapped[str] = mapped_column(String(1024), nullable=False)
30
+ created_at: Mapped[datetime] = mapped_column(
31
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
32
+ )
@@ -0,0 +1,34 @@
1
+ import enum
2
+ from datetime import datetime, timezone
3
+
4
+ from sqlalchemy import String, Text, DateTime, ForeignKey, Enum as SAEnum
5
+ from sqlalchemy.orm import Mapped, mapped_column
6
+
7
+ from app.database import Base
8
+
9
+
10
+ class TestCaseFormat(str, enum.Enum):
11
+ step_based = "step_based"
12
+ bdd = "bdd"
13
+
14
+
15
+ class TestCase(Base):
16
+ __tablename__ = "test_cases"
17
+
18
+ id: Mapped[int] = mapped_column(primary_key=True, index=True)
19
+ project_id: Mapped[int] = mapped_column(ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True)
20
+ title: Mapped[str] = mapped_column(String(512), nullable=False)
21
+ format: Mapped[TestCaseFormat] = mapped_column(
22
+ SAEnum(TestCaseFormat), default=TestCaseFormat.step_based, nullable=False
23
+ )
24
+ content: Mapped[str] = mapped_column(Text, nullable=False)
25
+ preconditions: Mapped[str | None] = mapped_column(Text, nullable=True)
26
+ test_data_hints: Mapped[str | None] = mapped_column(Text, nullable=True)
27
+ created_at: Mapped[datetime] = mapped_column(
28
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
29
+ )
30
+ updated_at: Mapped[datetime] = mapped_column(
31
+ DateTime(timezone=True),
32
+ default=lambda: datetime.now(timezone.utc),
33
+ onupdate=lambda: datetime.now(timezone.utc),
34
+ )
File without changes
@@ -0,0 +1,73 @@
1
+ from sqlalchemy import case, func
2
+ from sqlalchemy.orm import Session
3
+ from fastapi import APIRouter, Depends, HTTPException
4
+
5
+ from app.database import get_db
6
+ from app.models.project import Project
7
+ from app.models.script_run import ScriptRun, ScriptRunStatus
8
+ from app.models.test_case import TestCase
9
+ from app.schemas.analytics import RunAnalyticsResponse, RecentFailureResponse
10
+
11
+ router = APIRouter(prefix="/projects/{project_id}/analytics", tags=["Run Analytics"])
12
+
13
+
14
+ @router.get("/runs", response_model=RunAnalyticsResponse)
15
+ def get_run_analytics(project_id: int, db: Session = Depends(get_db)):
16
+ project = db.query(Project).filter(Project.id == project_id).first()
17
+ if not project:
18
+ raise HTTPException(status_code=404, detail="Project not found")
19
+
20
+ aggregate = (
21
+ db.query(
22
+ func.count(ScriptRun.id),
23
+ func.sum(case((ScriptRun.success.is_(True), 1), else_=0)),
24
+ func.sum(case((ScriptRun.status == ScriptRunStatus.failed, 1), else_=0)),
25
+ func.sum(case((ScriptRun.status == ScriptRunStatus.timed_out, 1), else_=0)),
26
+ func.avg(ScriptRun.duration_seconds),
27
+ )
28
+ .filter(ScriptRun.project_id == project_id)
29
+ .one()
30
+ )
31
+
32
+ total_runs = int(aggregate[0] or 0)
33
+ success_runs = int(aggregate[1] or 0)
34
+ failed_runs = int(aggregate[2] or 0)
35
+ timed_out_runs = int(aggregate[3] or 0)
36
+ avg_duration = float(aggregate[4] or 0.0)
37
+
38
+ failed_items = (
39
+ db.query(ScriptRun, TestCase.title)
40
+ .outerjoin(TestCase, TestCase.id == ScriptRun.test_case_id)
41
+ .filter(ScriptRun.project_id == project_id)
42
+ .filter(ScriptRun.success.is_(False))
43
+ .order_by(ScriptRun.created_at.desc(), ScriptRun.id.desc())
44
+ .limit(10)
45
+ .all()
46
+ )
47
+
48
+ recent_failures = [
49
+ RecentFailureResponse(
50
+ run_id=run.id,
51
+ script_id=run.script_id,
52
+ test_case_id=run.test_case_id,
53
+ test_case_title=tc_title,
54
+ exit_code=run.exit_code,
55
+ duration_seconds=run.duration_seconds,
56
+ stderr_excerpt=(run.stderr or "")[:240],
57
+ created_at=run.created_at,
58
+ )
59
+ for run, tc_title in failed_items
60
+ ]
61
+
62
+ success_rate = round((success_runs / total_runs) * 100, 2) if total_runs else 0.0
63
+
64
+ return RunAnalyticsResponse(
65
+ project_id=project_id,
66
+ total_runs=total_runs,
67
+ success_runs=success_runs,
68
+ failed_runs=failed_runs,
69
+ timed_out_runs=timed_out_runs,
70
+ success_rate=success_rate,
71
+ average_duration_seconds=round(avg_duration, 2),
72
+ recent_failures=recent_failures,
73
+ )