scriptgini 1.3.0__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. {scriptgini-1.3.0 → scriptgini-1.3.1}/PKG-INFO +4 -4
  2. {scriptgini-1.3.0 → scriptgini-1.3.1}/README.md +3 -3
  3. scriptgini-1.3.1/app/__init__.py +3 -0
  4. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/celery_app.py +2 -2
  5. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/config.py +28 -0
  6. scriptgini-1.3.1/app/main.py +178 -0
  7. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/bulk_jobs.py +18 -5
  8. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/scripts.py +19 -11
  9. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/tasks.py +73 -4
  10. {scriptgini-1.3.0 → scriptgini-1.3.1}/pyproject.toml +1 -1
  11. {scriptgini-1.3.0 → scriptgini-1.3.1}/scriptgini.egg-info/PKG-INFO +4 -4
  12. {scriptgini-1.3.0 → scriptgini-1.3.1}/tests/test_api.py +132 -0
  13. {scriptgini-1.3.0 → scriptgini-1.3.1}/tests/test_coverage.py +63 -15
  14. {scriptgini-1.3.0 → scriptgini-1.3.1}/tests/test_infra_services_coverage.py +119 -2
  15. scriptgini-1.3.0/app/__init__.py +0 -3
  16. scriptgini-1.3.0/app/main.py +0 -84
  17. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/agents/__init__.py +0 -0
  18. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/agents/prompts.py +0 -0
  19. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/agents/script_gini_agent.py +0 -0
  20. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/cache.py +0 -0
  21. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/database.py +0 -0
  22. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/llm/__init__.py +0 -0
  23. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/llm/provider.py +0 -0
  24. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/__init__.py +0 -0
  25. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/api_key.py +0 -0
  26. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/bulk_job.py +0 -0
  27. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/execution_job.py +0 -0
  28. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/generated_script.py +0 -0
  29. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/membership.py +0 -0
  30. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/organization.py +0 -0
  31. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/project.py +0 -0
  32. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/script_run.py +0 -0
  33. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/test_case.py +0 -0
  34. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/models/user.py +0 -0
  35. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/__init__.py +0 -0
  36. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/analytics.py +0 -0
  37. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/api_key.py +0 -0
  38. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/auth.py +0 -0
  39. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/demo.py +0 -0
  40. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/execution.py +0 -0
  41. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/organizations.py +0 -0
  42. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/projects.py +0 -0
  43. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/routers/test_cases.py +0 -0
  44. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/__init__.py +0 -0
  45. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/analytics.py +0 -0
  46. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/api_key.py +0 -0
  47. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/auth.py +0 -0
  48. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/bulk_job.py +0 -0
  49. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/execution.py +0 -0
  50. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/generated_script.py +0 -0
  51. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/membership.py +0 -0
  52. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/organization.py +0 -0
  53. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/project.py +0 -0
  54. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/schemas/test_case.py +0 -0
  55. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/services/api_key.py +0 -0
  56. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/services/auth.py +0 -0
  57. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/services/auth_dependencies.py +0 -0
  58. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/services/git_export.py +0 -0
  59. {scriptgini-1.3.0 → scriptgini-1.3.1}/app/services/rbac.py +0 -0
  60. {scriptgini-1.3.0 → scriptgini-1.3.1}/scriptgini.egg-info/SOURCES.txt +0 -0
  61. {scriptgini-1.3.0 → scriptgini-1.3.1}/scriptgini.egg-info/dependency_links.txt +0 -0
  62. {scriptgini-1.3.0 → scriptgini-1.3.1}/scriptgini.egg-info/top_level.txt +0 -0
  63. {scriptgini-1.3.0 → scriptgini-1.3.1}/setup.cfg +0 -0
  64. {scriptgini-1.3.0 → scriptgini-1.3.1}/tests/test_auth.py +0 -0
  65. {scriptgini-1.3.0 → scriptgini-1.3.1}/tests/test_sprint2_rbac.py +0 -0
  66. {scriptgini-1.3.0 → scriptgini-1.3.1}/tests/test_sprint3_execution.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scriptgini
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: Agentic AI system that converts functional test cases into automation test scripts.
5
5
  Author: ScriptGini Team
6
6
  License: Proprietary
@@ -16,7 +16,7 @@ Description-Content-Type: text/markdown
16
16
 
17
17
  > **Enterprise-grade Agentic AI system that converts functional test cases into high-quality, review-ready automation test scripts.**
18
18
 
19
- Current release: v1.3.0 (Sprint 3)
19
+ Current release: v1.3.1 (Sprint 4 hardening increment)
20
20
 
21
21
  ---
22
22
 
@@ -441,8 +441,8 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
441
441
  |--------|-------|--------|--------|
442
442
  | **Sprint 1** | IAM Core | 30-36pts | 🟡 Core delivered (auth hardening pending) |
443
443
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
444
- | **Sprint 3** | Durable Execution | 34-40pts | 🔲 Pending (Redis + Celery/Arq setup) |
445
- | **Sprint 4** | Security & Hardening | 30-36pts | 🔲 Pending (Container sandbox, audit logging) |
444
+ | **Sprint 3** | Durable Execution | 34-40pts | Completed (Redis + Celery queue foundation) |
445
+ | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
446
446
  | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
447
447
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
448
448
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  > **Enterprise-grade Agentic AI system that converts functional test cases into high-quality, review-ready automation test scripts.**
4
4
 
5
- Current release: v1.3.0 (Sprint 3)
5
+ Current release: v1.3.1 (Sprint 4 hardening increment)
6
6
 
7
7
  ---
8
8
 
@@ -427,8 +427,8 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
427
427
  |--------|-------|--------|--------|
428
428
  | **Sprint 1** | IAM Core | 30-36pts | 🟡 Core delivered (auth hardening pending) |
429
429
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
430
- | **Sprint 3** | Durable Execution | 34-40pts | 🔲 Pending (Redis + Celery/Arq setup) |
431
- | **Sprint 4** | Security & Hardening | 30-36pts | 🔲 Pending (Container sandbox, audit logging) |
430
+ | **Sprint 3** | Durable Execution | 34-40pts | Completed (Redis + Celery queue foundation) |
431
+ | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
432
432
  | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
433
433
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
434
434
 
@@ -0,0 +1,3 @@
1
+ __version__ = "1.3.1"
2
+ __api_version__ = "v1.3.1"
3
+
@@ -16,8 +16,8 @@ celery_app.conf.update(
16
16
  timezone="UTC",
17
17
  enable_utc=True,
18
18
  task_track_started=True,
19
- task_time_limit=30 * 60, # 30 minute hard time limit
20
- task_soft_time_limit=25 * 60, # 25 minute soft time limit
19
+ task_time_limit=settings.CELERY_TASK_HARD_TIMEOUT,
20
+ task_soft_time_limit=settings.CELERY_TASK_TIMEOUT,
21
21
  worker_prefetch_multiplier=4,
22
22
  worker_max_tasks_per_child=1000,
23
23
  )
@@ -8,6 +8,11 @@ class Settings(BaseSettings):
8
8
  # App
9
9
  APP_NAME: str = "ScriptGini"
10
10
  DEBUG: bool = False
11
+ CORS_ALLOWED_ORIGINS: str = "http://localhost:3000,http://127.0.0.1:3000"
12
+ RATE_LIMIT_REQUESTS: int = 120
13
+ RATE_LIMIT_WINDOW_SECONDS: int = 60
14
+ RATE_LIMIT_EXEMPT_PATHS: str = "/health,/docs,/redoc,/openapi.json,/static"
15
+ SECURITY_AUDIT_LOG_ENABLED: bool = True
11
16
 
12
17
  # Database (PostgreSQL)
13
18
  # Format: postgresql://user:password@host:port/dbname
@@ -22,6 +27,7 @@ class Settings(BaseSettings):
22
27
  CELERY_BROKER_URL: str = "redis://localhost:6379/1" # Different DB for task queue
23
28
  CELERY_RESULT_BACKEND: str = "redis://localhost:6379/2" # Different DB for results
24
29
  CELERY_TASK_TIMEOUT: int = 600 # 10 minutes default task timeout
30
+ CELERY_TASK_HARD_TIMEOUT: int = 660
25
31
  CELERY_MAX_RETRIES: int = 3
26
32
  JWT_SECRET_KEY: str = "your-secret-key-change-in-production"
27
33
  JWT_ALGORITHM: str = "HS256"
@@ -34,6 +40,8 @@ class Settings(BaseSettings):
34
40
  SCRIPT_GENERATION_TIMEOUT_SECONDS: int = 180
35
41
  PLAYWRIGHT_RUN_HEADED: bool = True
36
42
  SCRIPT_EXECUTION_TIMEOUT_SECONDS: int = 300
43
+ EXECUTION_ENV_ALLOWED_KEYS: str = "PATH,SYSTEMROOT,TEMP,TMP,HOME,USERPROFILE,PYTHONPATH,PYTHONHOME,PYTHONIOENCODING"
44
+ EXECUTION_ENV_ALLOWED_PREFIXES: str = "PLAYWRIGHT_"
37
45
  SKIP_REVIEW_FOR_OLLAMA: bool = True
38
46
  USE_LLM_INTENT_ANALYSIS: bool = True
39
47
 
@@ -71,5 +79,25 @@ class Settings(BaseSettings):
71
79
  AWS_REGION_NAME: str = "us-east-1"
72
80
  BEDROCK_MODEL_ID: str = "anthropic.claude-3-5-sonnet-20241022-v2:0"
73
81
 
82
+ @staticmethod
83
+ def _split_csv(value: str) -> list[str]:
84
+ return [entry.strip() for entry in value.split(",") if entry.strip()]
85
+
86
+ @property
87
+ def cors_allowed_origins_list(self) -> list[str]:
88
+ return self._split_csv(self.CORS_ALLOWED_ORIGINS)
89
+
90
+ @property
91
+ def rate_limit_exempt_paths_list(self) -> list[str]:
92
+ return self._split_csv(self.RATE_LIMIT_EXEMPT_PATHS)
93
+
94
+ @property
95
+ def execution_env_allowed_keys_set(self) -> set[str]:
96
+ return set(self._split_csv(self.EXECUTION_ENV_ALLOWED_KEYS))
97
+
98
+ @property
99
+ def execution_env_allowed_prefixes_tuple(self) -> tuple[str, ...]:
100
+ return tuple(self._split_csv(self.EXECUTION_ENV_ALLOWED_PREFIXES))
101
+
74
102
 
75
103
  settings = Settings()
@@ -0,0 +1,178 @@
1
+ import logging
2
+ import threading
3
+ import time
4
+ from contextlib import asynccontextmanager
5
+ from pathlib import Path
6
+ from uuid import uuid4
7
+
8
+ from fastapi import FastAPI, Request
9
+ from fastapi.responses import JSONResponse
10
+ from fastapi.responses import FileResponse
11
+ from fastapi.middleware.cors import CORSMiddleware
12
+ from fastapi.staticfiles import StaticFiles
13
+
14
+ from app import __version__
15
+ from app.config import settings
16
+ from app.llm.provider import get_llm_diagnostics
17
+ from app.routers import projects, test_cases, scripts, bulk_jobs, analytics, demo, auth, api_key, organizations, execution
18
+
19
+ logging.basicConfig(level=logging.DEBUG if settings.DEBUG else logging.INFO)
20
+ logger = logging.getLogger(__name__)
21
+
22
+ static_dir = Path(__file__).resolve().parent / "static"
23
+ _RATE_LIMIT_BUCKETS: dict[str, tuple[int, float]] = {}
24
+ _RATE_LIMIT_LOCK = threading.Lock()
25
+
26
+
27
+ def _client_ip(request: Request) -> str:
28
+ forwarded_for = request.headers.get("x-forwarded-for", "")
29
+ if forwarded_for:
30
+ return forwarded_for.split(",")[0].strip()
31
+ return request.client.host if request.client else "unknown"
32
+
33
+
34
+ def _is_exempt_path(path: str) -> bool:
35
+ for exempt in settings.rate_limit_exempt_paths_list:
36
+ if path == exempt or path.startswith(exempt):
37
+ return True
38
+ return False
39
+
40
+
41
+ def _check_rate_limit(key: str, now: float) -> tuple[bool, int, int]:
42
+ window_seconds = max(1, settings.RATE_LIMIT_WINDOW_SECONDS)
43
+ max_requests = max(1, settings.RATE_LIMIT_REQUESTS)
44
+ window_start = now - window_seconds
45
+ with _RATE_LIMIT_LOCK:
46
+ current_count, reset_at = _RATE_LIMIT_BUCKETS.get(key, (0, now + window_seconds))
47
+ if reset_at <= now:
48
+ current_count = 0
49
+ reset_at = now + window_seconds
50
+ current_count += 1
51
+ _RATE_LIMIT_BUCKETS[key] = (current_count, reset_at)
52
+
53
+ remaining = max(0, max_requests - current_count)
54
+ retry_after = max(0, int(reset_at - now))
55
+ return current_count <= max_requests, remaining, retry_after
56
+
57
+
58
+ def _security_audit(event: str, request: Request, status_code: int, request_id: str) -> None:
59
+ if not settings.SECURITY_AUDIT_LOG_ENABLED:
60
+ return
61
+ logger.info(
62
+ "security_audit event=%s request_id=%s method=%s path=%s status=%s ip=%s user_agent=%s",
63
+ event,
64
+ request_id,
65
+ request.method,
66
+ request.url.path,
67
+ status_code,
68
+ _client_ip(request),
69
+ request.headers.get("user-agent", "unknown"),
70
+ )
71
+
72
+
73
+ @asynccontextmanager
74
+ async def lifespan(_: FastAPI):
75
+ diagnostics = get_llm_diagnostics()
76
+ logger.info(
77
+ "Runtime LLM default: provider=%s model=%s api_key_env=%s api_key_present=%s api_key=%s",
78
+ diagnostics["provider"],
79
+ diagnostics["model"],
80
+ diagnostics["api_key_env"],
81
+ diagnostics["api_key_present"],
82
+ diagnostics["api_key_masked"],
83
+ )
84
+ yield
85
+
86
+ app = FastAPI(
87
+ title=settings.APP_NAME,
88
+ description=(
89
+ "Enterprise-grade Agentic AI system that converts functional test cases "
90
+ "into high-quality automation scripts."
91
+ ),
92
+ version=__version__,
93
+ lifespan=lifespan,
94
+ )
95
+
96
+ app.add_middleware(
97
+ CORSMiddleware,
98
+ allow_origins=settings.cors_allowed_origins_list,
99
+ allow_credentials=True,
100
+ allow_methods=["*"],
101
+ allow_headers=["*"],
102
+ )
103
+
104
+
105
+ @app.middleware("http")
106
+ async def apply_security_controls(request: Request, call_next):
107
+ request_id = request.headers.get("X-Request-ID") or str(uuid4())
108
+ request.state.request_id = request_id
109
+ path = request.url.path
110
+ now = time.time()
111
+
112
+ if not _is_exempt_path(path):
113
+ key = f"{_client_ip(request)}:{path}"
114
+ allowed, remaining, retry_after = _check_rate_limit(key, now)
115
+ if not allowed:
116
+ _security_audit("rate_limit_block", request, 429, request_id)
117
+ return JSONResponse(
118
+ status_code=429,
119
+ content={"detail": "Rate limit exceeded", "request_id": request_id},
120
+ headers={
121
+ "X-Request-ID": request_id,
122
+ "X-RateLimit-Limit": str(max(1, settings.RATE_LIMIT_REQUESTS)),
123
+ "X-RateLimit-Remaining": "0",
124
+ "Retry-After": str(retry_after),
125
+ },
126
+ )
127
+
128
+ response = await call_next(request)
129
+ response.headers["X-Request-ID"] = request_id
130
+
131
+ if not _is_exempt_path(path):
132
+ response.headers["X-RateLimit-Limit"] = str(max(1, settings.RATE_LIMIT_REQUESTS))
133
+ response.headers["X-RateLimit-Remaining"] = str(response.headers.get("X-RateLimit-Remaining", ""))
134
+ if not response.headers["X-RateLimit-Remaining"]:
135
+ key = f"{_client_ip(request)}:{path}"
136
+ with _RATE_LIMIT_LOCK:
137
+ count, _ = _RATE_LIMIT_BUCKETS.get(key, (0, now))
138
+ response.headers["X-RateLimit-Remaining"] = str(max(0, max(1, settings.RATE_LIMIT_REQUESTS) - count))
139
+
140
+ if path.startswith("/api/v1/auth") or path.startswith("/api/v1/execution"):
141
+ if response.status_code >= 400 or request.method in {"POST", "PUT", "PATCH", "DELETE"}:
142
+ _security_audit("security_sensitive_request", request, response.status_code, request_id)
143
+
144
+ return response
145
+
146
+ app.include_router(projects.router, prefix="/api/v1")
147
+ app.include_router(test_cases.router, prefix="/api/v1")
148
+ app.include_router(scripts.router, prefix="/api/v1")
149
+ app.include_router(bulk_jobs.router, prefix="/api/v1")
150
+ app.include_router(analytics.router, prefix="/api/v1")
151
+ app.include_router(demo.router, prefix="/api/v1")
152
+ app.include_router(auth.router, prefix="/api/v1")
153
+ app.include_router(api_key.router, prefix="/api/v1")
154
+ app.include_router(organizations.router, prefix="/api/v1")
155
+ app.include_router(execution.router, prefix="/api/v1")
156
+ app.mount("/static", StaticFiles(directory=static_dir), name="static")
157
+
158
+
159
+ @app.get("/api/v1/runtime/llm", tags=["Runtime"])
160
+ def runtime_llm():
161
+ default_diagnostics = get_llm_diagnostics()
162
+ return {
163
+ "default_provider": default_diagnostics["provider"],
164
+ "default_model": default_diagnostics["model"],
165
+ "provider_diagnostics": {
166
+ provider: get_llm_diagnostics(provider) for provider in ["openai", "openrouter", "gemini", "ollama", "bedrock"]
167
+ },
168
+ }
169
+
170
+
171
+ @app.get("/", include_in_schema=False)
172
+ def index():
173
+ return FileResponse(static_dir / "index.html")
174
+
175
+
176
+ @app.get("/health", tags=["Health"])
177
+ def health():
178
+ return {"status": "ok", "app": settings.APP_NAME}
@@ -2,7 +2,7 @@ import logging
2
2
  import subprocess
3
3
  import sys
4
4
 
5
- from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, status
5
+ from fastapi import APIRouter, Depends, HTTPException, status
6
6
  from sqlalchemy.orm import Session
7
7
 
8
8
  from app.database import get_db
@@ -13,6 +13,7 @@ from app.models.project import Project
13
13
  from app.models.script_run import ScriptRunStatus
14
14
  from app.models.test_case import TestCase
15
15
  from app.schemas.bulk_job import BulkGenerateRequest, BulkJobResponse, BulkRunRequest
16
+ from app.tasks import process_bulk_execution_job, process_bulk_generation_job
16
17
  from app.routers.scripts import (
17
18
  _create_script_run,
18
19
  _get_project_or_404,
@@ -218,7 +219,6 @@ def _run_bulk_execution(job_id: int, request: BulkRunRequest):
218
219
  def bulk_generate_scripts(
219
220
  project_id: int,
220
221
  payload: BulkGenerateRequest,
221
- background_tasks: BackgroundTasks,
222
222
  db: Session = Depends(get_db),
223
223
  ):
224
224
  _get_project_or_404(project_id, db)
@@ -238,7 +238,14 @@ def bulk_generate_scripts(
238
238
  _refresh_job_counts(job, db)
239
239
  db.commit()
240
240
 
241
- background_tasks.add_task(_run_bulk_generation, job.id, payload)
241
+ try:
242
+ process_bulk_generation_job.delay(job.id, payload.model_dump())
243
+ except Exception as exc:
244
+ logger.exception("Failed to enqueue bulk generation job_id=%s", job.id)
245
+ job.status = BulkJobStatus.failed
246
+ db.commit()
247
+ raise HTTPException(status_code=503, detail="Failed to enqueue bulk generation") from exc
248
+
242
249
  return _serialize_bulk_job(job, db)
243
250
 
244
251
 
@@ -246,7 +253,6 @@ def bulk_generate_scripts(
246
253
  def bulk_run_scripts(
247
254
  project_id: int,
248
255
  payload: BulkRunRequest,
249
- background_tasks: BackgroundTasks,
250
256
  db: Session = Depends(get_db),
251
257
  ):
252
258
  _get_project_or_404(project_id, db)
@@ -266,7 +272,14 @@ def bulk_run_scripts(
266
272
  _refresh_job_counts(job, db)
267
273
  db.commit()
268
274
 
269
- background_tasks.add_task(_run_bulk_execution, job.id, payload)
275
+ try:
276
+ process_bulk_execution_job.delay(job.id, payload.model_dump())
277
+ except Exception as exc:
278
+ logger.exception("Failed to enqueue bulk run job_id=%s", job.id)
279
+ job.status = BulkJobStatus.failed
280
+ db.commit()
281
+ raise HTTPException(status_code=503, detail="Failed to enqueue bulk run") from exc
282
+
270
283
  return _serialize_bulk_job(job, db)
271
284
 
272
285
 
@@ -11,7 +11,7 @@ import time
11
11
  from pathlib import Path
12
12
  from urllib.parse import urlparse
13
13
 
14
- from fastapi import APIRouter, Depends, HTTPException, status, BackgroundTasks
14
+ from fastapi import APIRouter, Depends, HTTPException, status
15
15
  from sqlalchemy.orm import Session
16
16
 
17
17
  from app.database import get_db
@@ -24,6 +24,7 @@ from app.schemas.generated_script import GenerateScriptRequest, GeneratedScriptR
24
24
  from app.agents.script_gini_agent import run_agent
25
25
  from app.llm.provider import LLMProvider, get_llm_diagnostics
26
26
  from app.services.git_export import export_generated_script
27
+ from app.tasks import process_script_generation_job
27
28
 
28
29
  logger = logging.getLogger(__name__)
29
30
 
@@ -207,13 +208,14 @@ def _uses_pytest_playwright(script_content: str) -> bool:
207
208
 
208
209
 
209
210
  def _build_restricted_env() -> dict:
210
- allowed_exact = {"PATH", "SYSTEMROOT", "TEMP", "TMP", "HOME", "USERPROFILE"}
211
- allowed_prefixes = ("PLAYWRIGHT_", "PYTHON")
211
+ allowed_exact = settings.execution_env_allowed_keys_set
212
+ allowed_prefixes = settings.execution_env_allowed_prefixes_tuple
212
213
  env = {}
213
214
  for key, value in os.environ.items():
214
215
  if key in allowed_exact or key.startswith(allowed_prefixes):
215
216
  env[key] = value
216
217
  env["PLAYWRIGHT_HEADLESS"] = "0" if settings.PLAYWRIGHT_RUN_HEADED else "1"
218
+ env["PYTHONNOUSERSITE"] = "1"
217
219
  return env
218
220
 
219
221
 
@@ -393,7 +395,6 @@ def generate_script(
393
395
  project_id: int,
394
396
  tc_id: int,
395
397
  payload: GenerateScriptRequest,
396
- background_tasks: BackgroundTasks,
397
398
  db: Session = Depends(get_db),
398
399
  ):
399
400
  """Kick off async script generation. Poll GET /scripts/{id} for the result."""
@@ -413,13 +414,20 @@ def generate_script(
413
414
  db.commit()
414
415
  db.refresh(script_record)
415
416
 
416
- background_tasks.add_task(
417
- _run_generation,
418
- script_record.id,
419
- project_id,
420
- tc_id,
421
- payload.model_dump(),
422
- )
417
+ try:
418
+ process_script_generation_job.delay(
419
+ script_record.id,
420
+ project_id,
421
+ tc_id,
422
+ payload.model_dump(),
423
+ )
424
+ except Exception as exc:
425
+ logger.exception("Failed to enqueue generation task for script_id=%s", script_record.id)
426
+ script_record.status = ScriptStatus.failed
427
+ script_record.error_message = f"Queue enqueue failed: {exc}"
428
+ db.commit()
429
+ raise HTTPException(status_code=503, detail="Failed to enqueue script generation") from exc
430
+
423
431
  return script_record
424
432
 
425
433
 
@@ -16,6 +16,67 @@ from app.models.execution_job import ExecutionJob, ExecutionJobStatus, can_trans
16
16
  logger = logging.getLogger(__name__)
17
17
 
18
18
 
19
+ def _retry_countdown(retries: int) -> int:
20
+ return min(300, 5 ** (retries + 1))
21
+
22
+
23
+ def _log_dead_letter(task_name: str, entity_id: int, exc: Exception) -> None:
24
+ logger.error("dead_letter task=%s entity_id=%s error=%s", task_name, entity_id, exc)
25
+
26
+
27
+ @shared_task(bind=True, max_retries=settings.CELERY_MAX_RETRIES)
28
+ def process_script_generation_job(
29
+ self,
30
+ script_id: int,
31
+ project_id: int,
32
+ test_case_id: int,
33
+ request_data: dict,
34
+ ):
35
+ """Queue-backed script generation worker task."""
36
+ try:
37
+ from app.routers.scripts import _run_generation
38
+
39
+ _run_generation(script_id, project_id, test_case_id, request_data)
40
+ return {"status": "completed", "script_id": script_id}
41
+ except Exception as exc:
42
+ if self.request.retries >= self.max_retries:
43
+ _log_dead_letter("process_script_generation_job", script_id, exc)
44
+ raise
45
+ raise self.retry(exc=exc, countdown=_retry_countdown(self.request.retries))
46
+
47
+
48
+ @shared_task(bind=True, max_retries=settings.CELERY_MAX_RETRIES)
49
+ def process_bulk_generation_job(self, bulk_job_id: int, request_data: dict):
50
+ """Queue-backed bulk generation worker task."""
51
+ try:
52
+ from app.routers.bulk_jobs import _run_bulk_generation
53
+ from app.schemas.bulk_job import BulkGenerateRequest
54
+
55
+ _run_bulk_generation(bulk_job_id, BulkGenerateRequest.model_validate(request_data))
56
+ return {"status": "completed", "bulk_job_id": bulk_job_id}
57
+ except Exception as exc:
58
+ if self.request.retries >= self.max_retries:
59
+ _log_dead_letter("process_bulk_generation_job", bulk_job_id, exc)
60
+ raise
61
+ raise self.retry(exc=exc, countdown=_retry_countdown(self.request.retries))
62
+
63
+
64
+ @shared_task(bind=True, max_retries=settings.CELERY_MAX_RETRIES)
65
+ def process_bulk_execution_job(self, bulk_job_id: int, request_data: dict):
66
+ """Queue-backed bulk execution worker task."""
67
+ try:
68
+ from app.routers.bulk_jobs import _run_bulk_execution
69
+ from app.schemas.bulk_job import BulkRunRequest
70
+
71
+ _run_bulk_execution(bulk_job_id, BulkRunRequest.model_validate(request_data))
72
+ return {"status": "completed", "bulk_job_id": bulk_job_id}
73
+ except Exception as exc:
74
+ if self.request.retries >= self.max_retries:
75
+ _log_dead_letter("process_bulk_execution_job", bulk_job_id, exc)
76
+ raise
77
+ raise self.retry(exc=exc, countdown=_retry_countdown(self.request.retries))
78
+
79
+
19
80
  @shared_task(bind=True, max_retries=settings.CELERY_MAX_RETRIES)
20
81
  def generate_test_script(self, test_case_id: int, project_id: int):
21
82
  """
@@ -42,8 +103,10 @@ def generate_test_script(self, test_case_id: int, project_id: int):
42
103
  }
43
104
  except Exception as exc:
44
105
  logger.error(f"Error generating script: {exc}")
45
- # Retry with exponential backoff (5s, 25s, 125s)
46
- raise self.retry(exc=exc, countdown=5 ** self.request.retries)
106
+ if self.request.retries >= self.max_retries:
107
+ _log_dead_letter("generate_test_script", test_case_id, exc)
108
+ raise
109
+ raise self.retry(exc=exc, countdown=_retry_countdown(self.request.retries))
47
110
  finally:
48
111
  db.close()
49
112
 
@@ -127,7 +190,10 @@ def execute_script(self, script_id: int, execution_env: dict | None = None, exec
127
190
  ExecutionJobStatus.failed,
128
191
  error_message=str(exc),
129
192
  )
130
- raise self.retry(exc=exc, countdown=5 ** self.request.retries)
193
+ if self.request.retries >= self.max_retries:
194
+ _log_dead_letter("execute_script", script_id, exc)
195
+ raise
196
+ raise self.retry(exc=exc, countdown=_retry_countdown(self.request.retries))
131
197
  finally:
132
198
  db.close()
133
199
 
@@ -158,7 +224,10 @@ def bulk_job_processor(self, bulk_job_id: int):
158
224
  }
159
225
  except Exception as exc:
160
226
  logger.error(f"Error processing bulk job: {exc}")
161
- raise self.retry(exc=exc, countdown=10 ** self.request.retries)
227
+ if self.request.retries >= self.max_retries:
228
+ _log_dead_letter("bulk_job_processor", bulk_job_id, exc)
229
+ raise
230
+ raise self.retry(exc=exc, countdown=min(300, 10 ** (self.request.retries + 1)))
162
231
  finally:
163
232
  db.close()
164
233
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "scriptgini"
7
- version = "1.3.0"
7
+ version = "1.3.1"
8
8
  description = "Agentic AI system that converts functional test cases into automation test scripts."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.11"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scriptgini
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: Agentic AI system that converts functional test cases into automation test scripts.
5
5
  Author: ScriptGini Team
6
6
  License: Proprietary
@@ -16,7 +16,7 @@ Description-Content-Type: text/markdown
16
16
 
17
17
  > **Enterprise-grade Agentic AI system that converts functional test cases into high-quality, review-ready automation test scripts.**
18
18
 
19
- Current release: v1.3.0 (Sprint 3)
19
+ Current release: v1.3.1 (Sprint 4 hardening increment)
20
20
 
21
21
  ---
22
22
 
@@ -441,8 +441,8 @@ The project follows an **enterprise-grade development roadmap** with 6 sprints c
441
441
  |--------|-------|--------|--------|
442
442
  | **Sprint 1** | IAM Core | 30-36pts | 🟡 Core delivered (auth hardening pending) |
443
443
  | **Sprint 2** | RBAC + Multi-Tenancy | 32-38pts | 🟡 Core delivered (RBAC hardening pending) |
444
- | **Sprint 3** | Durable Execution | 34-40pts | 🔲 Pending (Redis + Celery/Arq setup) |
445
- | **Sprint 4** | Security & Hardening | 30-36pts | 🔲 Pending (Container sandbox, audit logging) |
444
+ | **Sprint 3** | Durable Execution | 34-40pts | Completed (Redis + Celery queue foundation) |
445
+ | **Sprint 4** | Security & Hardening | 30-36pts | 🟡 In progress (isolation boundary + breakout tests pending) |
446
446
  | **Sprint 5** | Reporting & Analytics | 28-34pts | 🔲 Pending (Artifact storage, dashboards) |
447
447
  | **Sprint 6** | Advanced Features | 24-30pts | 🔲 Pending (Webhooks, defect sync, versioning) |
448
448