scriptgini 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/routers/scripts.py ADDED
@@ -0,0 +1,549 @@
1
+ import ast
2
+ from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
3
+ import logging
4
+ import os
5
+ import shlex
6
+ import socket
7
+ import subprocess
8
+ import sys
9
+ import tempfile
10
+ import time
11
+ from pathlib import Path
12
+ from urllib.parse import urlparse
13
+
14
+ from fastapi import APIRouter, Depends, HTTPException, status, BackgroundTasks
15
+ from sqlalchemy.orm import Session
16
+
17
+ from app.database import get_db
18
+ from app.config import settings
19
+ from app.models.project import Project
20
+ from app.models.test_case import TestCase
21
+ from app.models.generated_script import GeneratedScript, ScriptStatus
22
+ from app.models.script_run import ScriptRun, ScriptRunStatus
23
+ from app.schemas.generated_script import GenerateScriptRequest, GeneratedScriptResponse, ScriptRunResponse
24
+ from app.agents.script_gini_agent import run_agent
25
+ from app.llm.provider import LLMProvider, get_llm_diagnostics
26
+ from app.services.git_export import export_generated_script
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ _BLOCKED_IMPORT_ROOTS = {
31
+ "subprocess",
32
+ "socket",
33
+ "shutil",
34
+ "requests",
35
+ "httpx",
36
+ "ftplib",
37
+ "paramiko",
38
+ }
39
+ _BLOCKED_BUILTIN_CALLS = {"exec", "eval", "compile", "__import__"}
40
+ _MAX_OUTPUT_CHARS = 20_000
41
+
42
+
43
+ def _run_agent_with_deadline(**kwargs) -> dict:
44
+ executor = ThreadPoolExecutor(max_workers=1)
45
+ future = executor.submit(run_agent, **kwargs)
46
+ try:
47
+ return future.result(timeout=settings.SCRIPT_GENERATION_TIMEOUT_SECONDS)
48
+ except FutureTimeoutError as exc:
49
+ future.cancel()
50
+ raise TimeoutError(
51
+ "Script generation exceeded "
52
+ f"{settings.SCRIPT_GENERATION_TIMEOUT_SECONDS}s deadline. "
53
+ "Try a smaller model or increase SCRIPT_GENERATION_TIMEOUT_SECONDS in .env."
54
+ ) from exc
55
+ finally:
56
+ executor.shutdown(wait=False, cancel_futures=True)
57
+
58
+ router = APIRouter(
59
+ prefix="/projects/{project_id}/test-cases/{tc_id}/scripts",
60
+ tags=["Scripts"],
61
+ )
62
+
63
+
64
+ def _looks_like_connection_error(message: str | None) -> bool:
65
+ if not message:
66
+ return False
67
+ lowered = message.lower()
68
+ indicators = (
69
+ "connection error",
70
+ "api connection",
71
+ "connection refused",
72
+ "failed to connect",
73
+ "max retries exceeded",
74
+ "name resolution",
75
+ "temporary failure in name resolution",
76
+ "timed out",
77
+ )
78
+ return any(token in lowered for token in indicators)
79
+
80
+
81
+ def _is_ollama_reachable() -> bool:
82
+ parsed = urlparse(settings.OLLAMA_BASE_URL)
83
+ host = parsed.hostname or "localhost"
84
+ port = parsed.port or 11434
85
+ try:
86
+ with socket.create_connection((host, port), timeout=1.5):
87
+ return True
88
+ except OSError:
89
+ return False
90
+
91
+
92
+ def _provider_readiness(provider: LLMProvider) -> tuple[bool, str]:
93
+ if provider == "openai":
94
+ if not settings.OPENAI_API_KEY.strip():
95
+ return False, "OpenAI API key is missing. Set OPENAI_API_KEY in .env."
96
+ return True, "OpenAI provider is configured."
97
+
98
+ if provider == "openrouter":
99
+ if not settings.OPENROUTER_API_KEY.strip():
100
+ return False, "OpenRouter API key is missing. Set OPENROUTER_API_KEY in .env."
101
+ return True, "OpenRouter provider is configured."
102
+
103
+ if provider == "gemini":
104
+ if not settings.GOOGLE_API_KEY.strip():
105
+ return False, "Google API key is missing. Set GOOGLE_API_KEY in .env."
106
+ return True, "Gemini provider is configured."
107
+
108
+ if provider == "bedrock":
109
+ if not settings.AWS_REGION_NAME.strip():
110
+ return False, "AWS region is missing. Set AWS_REGION_NAME in .env."
111
+ return True, "Bedrock provider appears configured."
112
+
113
+ if provider == "ollama":
114
+ if _is_ollama_reachable():
115
+ return True, "Ollama service is reachable."
116
+ return False, (
117
+ "Ollama is not reachable at "
118
+ f"{settings.OLLAMA_BASE_URL}. Start Ollama (for example: 'ollama serve') or choose a cloud provider with API key."
119
+ )
120
+
121
+ return False, f"Unknown provider: {provider}"
122
+
123
+
124
+ @router.get("/providers/{provider}/ready")
125
+ def provider_ready(project_id: int, tc_id: int, provider: LLMProvider, db: Session = Depends(get_db)):
126
+ _get_project_or_404(project_id, db)
127
+ _get_tc_or_404(project_id, tc_id, db)
128
+ ok, detail = _provider_readiness(provider)
129
+ return {"ready": ok, "detail": detail}
130
+
131
+
132
+ def _get_project_or_404(project_id: int, db: Session) -> Project:
133
+ project = db.query(Project).filter(Project.id == project_id).first()
134
+ if not project:
135
+ raise HTTPException(status_code=404, detail="Project not found")
136
+ return project
137
+
138
+
139
+ def _get_tc_or_404(project_id: int, tc_id: int, db: Session) -> TestCase:
140
+ tc = db.query(TestCase).filter(TestCase.project_id == project_id, TestCase.id == tc_id).first()
141
+ if not tc:
142
+ raise HTTPException(status_code=404, detail="Test case not found")
143
+ return tc
144
+
145
+
146
+ def _get_script_or_404(project_id: int, tc_id: int, script_id: int, db: Session) -> GeneratedScript:
147
+ script = db.query(GeneratedScript).filter(
148
+ GeneratedScript.id == script_id,
149
+ GeneratedScript.project_id == project_id,
150
+ GeneratedScript.test_case_id == tc_id,
151
+ ).first()
152
+ if not script:
153
+ raise HTTPException(status_code=404, detail="Script not found")
154
+ return script
155
+
156
+
157
+ def _run_python_script(script_content: str) -> dict:
158
+ _validate_script_safety(script_content)
159
+
160
+ start_time = time.perf_counter()
161
+ with tempfile.TemporaryDirectory(prefix="scriptgini-run-") as temp_dir:
162
+ script_path = Path(temp_dir) / "generated_scenario.py"
163
+ script_path.write_text(script_content, encoding="utf-8")
164
+ command = _build_script_command(script_path, script_content)
165
+
166
+ result = subprocess.run(
167
+ command,
168
+ capture_output=True,
169
+ text=True,
170
+ cwd=temp_dir,
171
+ timeout=settings.SCRIPT_EXECUTION_TIMEOUT_SECONDS,
172
+ env=_build_restricted_env(),
173
+ )
174
+
175
+ duration = time.perf_counter() - start_time
176
+ return {
177
+ "success": result.returncode == 0,
178
+ "exit_code": result.returncode,
179
+ "stdout": result.stdout[:_MAX_OUTPUT_CHARS],
180
+ "stderr": result.stderr[:_MAX_OUTPUT_CHARS],
181
+ "duration_seconds": round(duration, 2),
182
+ "command": shlex.join(command),
183
+ }
184
+
185
+
186
+ def _build_script_command(script_path: Path, script_content: str) -> list[str]:
187
+ if _uses_pytest_playwright(script_content):
188
+ command = [sys.executable, "-m", "pytest", script_path.name, "-s", "--browser", "chromium"]
189
+ if settings.PLAYWRIGHT_RUN_HEADED:
190
+ command.append("--headed")
191
+ return command
192
+ return [sys.executable, "-I", str(script_path)]
193
+
194
+
195
+ def _uses_pytest_playwright(script_content: str) -> bool:
196
+ try:
197
+ tree = ast.parse(script_content)
198
+ except SyntaxError:
199
+ return False
200
+
201
+ for node in tree.body:
202
+ if isinstance(node, ast.FunctionDef) and node.name.startswith("test_"):
203
+ arg_names = {arg.arg for arg in node.args.args}
204
+ if {"page", "browser", "context"} & arg_names:
205
+ return True
206
+ return False
207
+
208
+
209
+ def _build_restricted_env() -> dict:
210
+ allowed_exact = {"PATH", "SYSTEMROOT", "TEMP", "TMP", "HOME", "USERPROFILE"}
211
+ allowed_prefixes = ("PLAYWRIGHT_", "PYTHON")
212
+ env = {}
213
+ for key, value in os.environ.items():
214
+ if key in allowed_exact or key.startswith(allowed_prefixes):
215
+ env[key] = value
216
+ env["PLAYWRIGHT_HEADLESS"] = "0" if settings.PLAYWRIGHT_RUN_HEADED else "1"
217
+ return env
218
+
219
+
220
+ def _validate_script_safety(script_content: str) -> None:
221
+ try:
222
+ tree = ast.parse(script_content)
223
+ except SyntaxError as exc:
224
+ raise ValueError(f"Generated script has syntax issues: {exc}") from exc
225
+
226
+ for node in ast.walk(tree):
227
+ if isinstance(node, ast.Import):
228
+ for alias in node.names:
229
+ root = alias.name.split(".")[0]
230
+ if root in _BLOCKED_IMPORT_ROOTS:
231
+ raise ValueError(f"Unsafe import detected: {root}")
232
+ if isinstance(node, ast.ImportFrom):
233
+ module_root = (node.module or "").split(".")[0]
234
+ if module_root in _BLOCKED_IMPORT_ROOTS:
235
+ raise ValueError(f"Unsafe import detected: {module_root}")
236
+ if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):
237
+ if node.func.id in _BLOCKED_BUILTIN_CALLS:
238
+ raise ValueError(f"Unsafe builtin call detected: {node.func.id}")
239
+
240
+
241
+ def _create_script_run(db: Session, script: GeneratedScript, run_result: dict, status: ScriptRunStatus) -> ScriptRun:
242
+ run_record = ScriptRun(
243
+ script_id=script.id,
244
+ project_id=script.project_id,
245
+ test_case_id=script.test_case_id,
246
+ status=status,
247
+ success=run_result["success"],
248
+ exit_code=run_result["exit_code"],
249
+ stdout=run_result.get("stdout") or "",
250
+ stderr=run_result.get("stderr") or "",
251
+ duration_seconds=run_result["duration_seconds"],
252
+ command=run_result["command"],
253
+ )
254
+ db.add(run_record)
255
+ db.commit()
256
+ db.refresh(run_record)
257
+ return run_record
258
+
259
+
260
+ def _run_generation(script_id: int, project_id: int | Project, tc_id: int | TestCase, request_data: dict | object):
261
+ """Background task: invoke the LangGraph agent and persist the result."""
262
+ from app.database import SessionLocal
263
+
264
+ resolved_project_id = project_id.id if isinstance(project_id, Project) else int(project_id)
265
+ resolved_tc_id = tc_id.id if isinstance(tc_id, TestCase) else int(tc_id)
266
+ resolved_request_data = request_data.model_dump() if hasattr(request_data, "model_dump") else request_data
267
+
268
+ db = SessionLocal()
269
+ try:
270
+ script_record = db.query(GeneratedScript).filter(GeneratedScript.id == script_id).first()
271
+ if not script_record:
272
+ return
273
+
274
+ project = db.query(Project).filter(Project.id == resolved_project_id).first()
275
+ tc = db.query(TestCase).filter(TestCase.id == resolved_tc_id, TestCase.project_id == resolved_project_id).first()
276
+ if not project or not tc:
277
+ script_record.status = ScriptStatus.failed
278
+ script_record.error_message = "Project or test case not found"
279
+ db.commit()
280
+ return
281
+
282
+ script_record.status = ScriptStatus.generating
283
+ db.commit()
284
+
285
+ request_framework = resolved_request_data.get("framework")
286
+ framework = request_framework or project.default_framework.value
287
+ llm_provider = resolved_request_data["llm_provider"]
288
+ llm_model = resolved_request_data.get("llm_model")
289
+ diagnostics = get_llm_diagnostics(llm_provider, llm_model)
290
+ logger.info(
291
+ "Script generation start: script_id=%s provider=%s model=%s api_key_env=%s api_key_present=%s api_key=%s",
292
+ script_id,
293
+ diagnostics["provider"],
294
+ diagnostics["model"],
295
+ diagnostics["api_key_env"],
296
+ diagnostics["api_key_present"],
297
+ diagnostics["api_key_masked"],
298
+ )
299
+ result = _run_agent_with_deadline(
300
+ test_case_title=tc.title,
301
+ test_case_content=tc.content,
302
+ preconditions=tc.preconditions or "",
303
+ test_data_hints=tc.test_data_hints or "",
304
+ aut_base_url=project.aut_base_url,
305
+ framework=framework,
306
+ auth_hints=project.auth_hints or "",
307
+ llm_provider=llm_provider,
308
+ llm_model=llm_model,
309
+ )
310
+
311
+ if result.get("error") and llm_provider == "gemini" and "model was not found" in str(result.get("error")).lower():
312
+ logger.warning(
313
+ "Gemini model was not found for script_id=%s with model=%s. Retrying once with gemini-flash-latest.",
314
+ script_id,
315
+ llm_model or settings.GEMINI_MODEL,
316
+ )
317
+ fallback_result = _run_agent_with_deadline(
318
+ test_case_title=tc.title,
319
+ test_case_content=tc.content,
320
+ preconditions=tc.preconditions or "",
321
+ test_data_hints=tc.test_data_hints or "",
322
+ aut_base_url=project.aut_base_url,
323
+ framework=framework,
324
+ auth_hints=project.auth_hints or "",
325
+ llm_provider="gemini",
326
+ llm_model="gemini-flash-latest",
327
+ )
328
+ if fallback_result.get("error"):
329
+ result["error"] = (
330
+ f"{result.get('error')} | Gemini fallback failed: {fallback_result.get('error')}"
331
+ )
332
+ else:
333
+ result = fallback_result
334
+ script_record.llm_model = "gemini-flash-latest"
335
+
336
+ if result.get("error") and _looks_like_connection_error(result.get("error")) and llm_provider != "ollama":
337
+ logger.warning(
338
+ "Primary provider '%s' failed with connection error for script_id=%s. Retrying once with Ollama.",
339
+ llm_provider,
340
+ script_id,
341
+ )
342
+ try:
343
+ fallback_diagnostics = get_llm_diagnostics("ollama", None)
344
+ logger.info(
345
+ "Fallback provider pick: script_id=%s provider=%s model=%s api_key_env=%s api_key_present=%s api_key=%s",
346
+ script_id,
347
+ fallback_diagnostics["provider"],
348
+ fallback_diagnostics["model"],
349
+ fallback_diagnostics["api_key_env"],
350
+ fallback_diagnostics["api_key_present"],
351
+ fallback_diagnostics["api_key_masked"],
352
+ )
353
+ fallback_result = _run_agent_with_deadline(
354
+ test_case_title=tc.title,
355
+ test_case_content=tc.content,
356
+ preconditions=tc.preconditions or "",
357
+ test_data_hints=tc.test_data_hints or "",
358
+ aut_base_url=project.aut_base_url,
359
+ framework=framework,
360
+ auth_hints=project.auth_hints or "",
361
+ llm_provider="ollama",
362
+ llm_model=None,
363
+ )
364
+ if fallback_result.get("error"):
365
+ result["error"] = (
366
+ f"{result.get('error')} | Ollama fallback failed: {fallback_result.get('error')}"
367
+ )
368
+ else:
369
+ result = fallback_result
370
+ script_record.llm_provider = "ollama"
371
+ script_record.llm_model = ""
372
+ except Exception as fallback_exc:
373
+ result["error"] = f"{result.get('error')} | Ollama fallback exception: {fallback_exc}"
374
+
375
+ script_record.script_content = result["script"]
376
+ script_record.error_message = result.get("error")
377
+ script_record.token_usage = result.get("token_usage")
378
+ script_record.status = ScriptStatus.failed if result.get("error") else ScriptStatus.completed
379
+ db.commit()
380
+ except Exception as exc:
381
+ logger.exception("Script generation failed for script_id=%s", script_id)
382
+ script_record = db.query(GeneratedScript).filter(GeneratedScript.id == script_id).first()
383
+ if script_record:
384
+ script_record.status = ScriptStatus.failed
385
+ script_record.error_message = str(exc)
386
+ db.commit()
387
+ finally:
388
+ db.close()
389
+
390
+
391
+ @router.post("/generate", response_model=GeneratedScriptResponse, status_code=status.HTTP_202_ACCEPTED)
392
+ def generate_script(
393
+ project_id: int,
394
+ tc_id: int,
395
+ payload: GenerateScriptRequest,
396
+ background_tasks: BackgroundTasks,
397
+ db: Session = Depends(get_db),
398
+ ):
399
+ """Kick off async script generation. Poll GET /scripts/{id} for the result."""
400
+ project = _get_project_or_404(project_id, db)
401
+ _get_tc_or_404(project_id, tc_id, db)
402
+
403
+ framework = (payload.framework or project.default_framework).value
404
+ script_record = GeneratedScript(
405
+ project_id=project_id,
406
+ test_case_id=tc_id,
407
+ framework=framework,
408
+ llm_provider=payload.llm_provider,
409
+ llm_model=payload.llm_model or "",
410
+ status=ScriptStatus.pending,
411
+ )
412
+ db.add(script_record)
413
+ db.commit()
414
+ db.refresh(script_record)
415
+
416
+ background_tasks.add_task(
417
+ _run_generation,
418
+ script_record.id,
419
+ project_id,
420
+ tc_id,
421
+ payload.model_dump(),
422
+ )
423
+ return script_record
424
+
425
+
426
+ @router.get("/", response_model=list[GeneratedScriptResponse])
427
+ def list_scripts(
428
+ project_id: int,
429
+ tc_id: int,
430
+ skip: int = 0,
431
+ limit: int = 50,
432
+ db: Session = Depends(get_db),
433
+ ):
434
+ _get_project_or_404(project_id, db)
435
+ _get_tc_or_404(project_id, tc_id, db)
436
+ return (
437
+ db.query(GeneratedScript)
438
+ .filter(GeneratedScript.test_case_id == tc_id)
439
+ .order_by(GeneratedScript.created_at.desc())
440
+ .offset(skip)
441
+ .limit(limit)
442
+ .all()
443
+ )
444
+
445
+
446
+ @router.get("/{script_id}", response_model=GeneratedScriptResponse)
447
+ def get_script(project_id: int, tc_id: int, script_id: int, db: Session = Depends(get_db)):
448
+ _get_project_or_404(project_id, db)
449
+ _get_tc_or_404(project_id, tc_id, db)
450
+ return _get_script_or_404(project_id, tc_id, script_id, db)
451
+
452
+
453
+ @router.get("/{script_id}/runs", response_model=list[ScriptRunResponse])
454
+ def list_script_runs(project_id: int, tc_id: int, script_id: int, db: Session = Depends(get_db)):
455
+ _get_project_or_404(project_id, db)
456
+ _get_tc_or_404(project_id, tc_id, db)
457
+ _get_script_or_404(project_id, tc_id, script_id, db)
458
+ return (
459
+ db.query(ScriptRun)
460
+ .filter(ScriptRun.script_id == script_id)
461
+ .order_by(ScriptRun.created_at.desc(), ScriptRun.id.desc())
462
+ .all()
463
+ )
464
+
465
+
466
+ @router.post("/{script_id}/run", response_model=ScriptRunResponse)
467
+ def run_script(project_id: int, tc_id: int, script_id: int, db: Session = Depends(get_db)):
468
+ project = _get_project_or_404(project_id, db)
469
+ _get_tc_or_404(project_id, tc_id, db)
470
+ script = _get_script_or_404(project_id, tc_id, script_id, db)
471
+
472
+ if script.framework != "playwright_python":
473
+ raise HTTPException(
474
+ status_code=400,
475
+ detail="Only playwright_python scripts can be executed from the UI",
476
+ )
477
+
478
+ if script.status != ScriptStatus.completed or not script.script_content:
479
+ raise HTTPException(status_code=400, detail="Script is not ready to run")
480
+
481
+ try:
482
+ run_result = _run_python_script(script.script_content)
483
+ run_status = ScriptRunStatus.completed if run_result["success"] else ScriptRunStatus.failed
484
+ return _create_script_run(db, script, run_result, run_status)
485
+ except ValueError as exc:
486
+ run_result = {
487
+ "success": False,
488
+ "exit_code": 2,
489
+ "stdout": "",
490
+ "stderr": str(exc),
491
+ "duration_seconds": 0.0,
492
+ "command": f"{sys.executable} generated_scenario.py",
493
+ }
494
+ return _create_script_run(db, script, run_result, ScriptRunStatus.failed)
495
+ except subprocess.TimeoutExpired as exc:
496
+ run_result = {
497
+ "success": False,
498
+ "exit_code": 124,
499
+ "stdout": exc.stdout or "",
500
+ "stderr": (exc.stderr or "") + f"\nScript execution timed out after {exc.timeout} seconds",
501
+ "duration_seconds": float(exc.timeout),
502
+ "command": f"{sys.executable} generated_scenario.py",
503
+ }
504
+ return _create_script_run(db, script, run_result, ScriptRunStatus.timed_out)
505
+
506
+
507
+ @router.post("/{script_id}/github-export")
508
+ def github_export_script(project_id: int, tc_id: int, script_id: int, db: Session = Depends(get_db)):
509
+ project = _get_project_or_404(project_id, db)
510
+ tc = _get_tc_or_404(project_id, tc_id, db)
511
+ script = _get_script_or_404(project_id, tc_id, script_id, db)
512
+
513
+ if script.status != ScriptStatus.completed or not script.script_content:
514
+ raise HTTPException(status_code=400, detail="Script must be completed before GitHub export")
515
+
516
+ if not settings.AUTO_EXPORT_GIT_ENABLED:
517
+ raise HTTPException(status_code=400, detail="GitHub export is disabled. Set AUTO_EXPORT_GIT_ENABLED=true in .env")
518
+
519
+ try:
520
+ export_path = export_generated_script(
521
+ project_name=project.name,
522
+ test_case_title=tc.title,
523
+ script_id=script.id,
524
+ framework=script.framework,
525
+ llm_provider=script.llm_provider,
526
+ llm_model=script.llm_model or "runtime-default",
527
+ script_content=script.script_content,
528
+ )
529
+ except Exception as exc:
530
+ logger.exception("GitHub export failed for script_id=%s", script_id)
531
+ raise HTTPException(status_code=500, detail=f"GitHub export failed: {exc}") from exc
532
+
533
+ if not export_path:
534
+ raise HTTPException(status_code=500, detail="GitHub export did not produce an output path")
535
+
536
+ return {
537
+ "exported": True,
538
+ "path": export_path,
539
+ "repository": settings.AUTO_EXPORT_GIT_REPO_URL,
540
+ "branch": settings.AUTO_EXPORT_GIT_BRANCH,
541
+ }
542
+
543
+
544
+ @router.delete("/{script_id}", status_code=status.HTTP_204_NO_CONTENT)
545
+ def delete_script(project_id: int, tc_id: int, script_id: int, db: Session = Depends(get_db)):
546
+ _get_project_or_404(project_id, db)
547
+ script = _get_script_or_404(project_id, tc_id, script_id, db)
548
+ db.delete(script)
549
+ db.commit()
@@ -0,0 +1,64 @@
1
+ from fastapi import APIRouter, Depends, HTTPException, status
2
+ from sqlalchemy.orm import Session
3
+
4
+ from app.database import get_db
5
+ from app.models.project import Project
6
+ from app.models.test_case import TestCase
7
+ from app.schemas.test_case import TestCaseCreate, TestCaseUpdate, TestCaseResponse
8
+
9
+ router = APIRouter(prefix="/projects/{project_id}/test-cases", tags=["Test Cases"])
10
+
11
+
12
+ def _get_project_or_404(project_id: int, db: Session) -> Project:
13
+ project = db.query(Project).filter(Project.id == project_id).first()
14
+ if not project:
15
+ raise HTTPException(status_code=404, detail="Project not found")
16
+ return project
17
+
18
+
19
+ @router.post("/", response_model=TestCaseResponse, status_code=status.HTTP_201_CREATED)
20
+ def create_test_case(project_id: int, payload: TestCaseCreate, db: Session = Depends(get_db)):
21
+ _get_project_or_404(project_id, db)
22
+ tc = TestCase(project_id=project_id, **payload.model_dump())
23
+ db.add(tc)
24
+ db.commit()
25
+ db.refresh(tc)
26
+ return tc
27
+
28
+
29
+ @router.get("/", response_model=list[TestCaseResponse])
30
+ def list_test_cases(project_id: int, skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
31
+ _get_project_or_404(project_id, db)
32
+ return db.query(TestCase).filter(TestCase.project_id == project_id).offset(skip).limit(limit).all()
33
+
34
+
35
+ @router.get("/{tc_id}", response_model=TestCaseResponse)
36
+ def get_test_case(project_id: int, tc_id: int, db: Session = Depends(get_db)):
37
+ _get_project_or_404(project_id, db)
38
+ tc = db.query(TestCase).filter(TestCase.project_id == project_id, TestCase.id == tc_id).first()
39
+ if not tc:
40
+ raise HTTPException(status_code=404, detail="Test case not found")
41
+ return tc
42
+
43
+
44
+ @router.patch("/{tc_id}", response_model=TestCaseResponse)
45
+ def update_test_case(project_id: int, tc_id: int, payload: TestCaseUpdate, db: Session = Depends(get_db)):
46
+ _get_project_or_404(project_id, db)
47
+ tc = db.query(TestCase).filter(TestCase.project_id == project_id, TestCase.id == tc_id).first()
48
+ if not tc:
49
+ raise HTTPException(status_code=404, detail="Test case not found")
50
+ for field, value in payload.model_dump(exclude_none=True).items():
51
+ setattr(tc, field, value)
52
+ db.commit()
53
+ db.refresh(tc)
54
+ return tc
55
+
56
+
57
+ @router.delete("/{tc_id}", status_code=status.HTTP_204_NO_CONTENT)
58
+ def delete_test_case(project_id: int, tc_id: int, db: Session = Depends(get_db)):
59
+ _get_project_or_404(project_id, db)
60
+ tc = db.query(TestCase).filter(TestCase.project_id == project_id, TestCase.id == tc_id).first()
61
+ if not tc:
62
+ raise HTTPException(status_code=404, detail="Test case not found")
63
+ db.delete(tc)
64
+ db.commit()
File without changes
@@ -0,0 +1,27 @@
1
+ from datetime import datetime
2
+
3
+ from pydantic import BaseModel, ConfigDict
4
+
5
+
6
+ class RecentFailureResponse(BaseModel):
7
+ run_id: int
8
+ script_id: int
9
+ test_case_id: int
10
+ test_case_title: str | None = None
11
+ exit_code: int
12
+ duration_seconds: float
13
+ stderr_excerpt: str
14
+ created_at: datetime
15
+
16
+ model_config = ConfigDict(from_attributes=True)
17
+
18
+
19
+ class RunAnalyticsResponse(BaseModel):
20
+ project_id: int
21
+ total_runs: int
22
+ success_runs: int
23
+ failed_runs: int
24
+ timed_out_runs: int
25
+ success_rate: float
26
+ average_duration_seconds: float
27
+ recent_failures: list[RecentFailureResponse]
@@ -0,0 +1,48 @@
1
+ from datetime import datetime
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from app.config import settings
6
+ from app.llm.provider import LLMProvider
7
+ from app.models.bulk_job import BulkJobKind, BulkJobStatus, BulkJobItemStatus
8
+ from app.models.project import TestFramework
9
+
10
+
11
+ class BulkGenerateRequest(BaseModel):
12
+ llm_provider: LLMProvider = settings.DEFAULT_LLM_PROVIDER
13
+ llm_model: str | None = Field(None, description="Override the default model for the chosen provider")
14
+ framework: TestFramework | None = Field(None, description="Override the project's default framework")
15
+ test_case_ids: list[int] | None = Field(None, description="Optional subset of test case IDs to process")
16
+
17
+
18
+ class BulkRunRequest(BaseModel):
19
+ test_case_ids: list[int] | None = Field(None, description="Optional subset of test case IDs to process")
20
+
21
+
22
+ class BulkJobItemResponse(BaseModel):
23
+ id: int
24
+ job_id: int
25
+ test_case_id: int
26
+ script_id: int | None
27
+ status: BulkJobItemStatus
28
+ message: str | None
29
+ created_at: datetime
30
+ updated_at: datetime
31
+
32
+ model_config = {"from_attributes": True}
33
+
34
+
35
+ class BulkJobResponse(BaseModel):
36
+ id: int
37
+ project_id: int
38
+ kind: BulkJobKind
39
+ status: BulkJobStatus
40
+ total_items: int
41
+ completed_items: int
42
+ failed_items: int
43
+ skipped_items: int
44
+ created_at: datetime
45
+ updated_at: datetime
46
+ items: list[BulkJobItemResponse] = []
47
+
48
+ model_config = {"from_attributes": True}