validra 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,142 @@
1
+ import json
2
+ import logging
3
+
4
+ from fastapi import APIRouter, Request
5
+ from fastapi.responses import StreamingResponse
6
+
7
+ from app.api.schemas.requests import TestRequest
8
+ from app.engine.orchestrator import Orchestrator
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ router = APIRouter(tags=["Execution"])
13
+
14
+
15
+ @router.post("/generateAndRun", summary="Generates Test Cases and Run")
16
+ def generate_and_run(request: TestRequest, req: Request):
17
+ """
18
+ Generate and execute automated API tests, streaming results via SSE.
19
+
20
+ Events emitted:
21
+ - `{"phase": "generating"}` — LLM is generating test cases
22
+ - `{"phase": "running", "progress": N, "total": T, "result": {...}}` — one test completed
23
+ - `{"phase": "done", "summary": {...}}` — all tests finished
24
+ - `{"phase": "error", "message": "..."}` — unrecoverable failure
25
+
26
+ ## Test Types
27
+
28
+ ### FUZZ
29
+ Generates invalid, unexpected, or edge-case payloads to test input validation.
30
+
31
+ ### AUTH
32
+ Mutates headers to test authentication and authorization scenarios
33
+ (missing tokens, expired credentials, malformed values).
34
+
35
+ ### PEN
36
+ Simulates penetration test payloads: injection, privilege escalation,
37
+ parameter pollution, ID tampering, encoding tricks.
38
+
39
+ ## Provider Config
40
+ Pass `provider` and optionally `provider_config` to select and configure
41
+ the LLM backend per request. Unset fields fall back to environment defaults.
42
+ Unknown keys in `provider_config` are rejected with a 400 error.
43
+ """
44
+ plugin_registry = req.app.state.plugin_registry
45
+ provider_registry = req.app.state.provider_registry
46
+ executor = req.app.state.executor
47
+ validator = req.app.state.validator
48
+
49
+ try:
50
+ plugin = plugin_registry.get(request.test_type.upper())
51
+ except KeyError as e:
52
+ msg = str(e)
53
+ def error_stream():
54
+ yield f"data: {json.dumps({'phase': 'error', 'message': msg})}\n\n"
55
+ return StreamingResponse(error_stream(), media_type="text/event-stream")
56
+
57
+ try:
58
+ provider = provider_registry.get(request.provider.lower())
59
+ except KeyError as e:
60
+ msg = str(e)
61
+ def error_stream():
62
+ yield f"data: {json.dumps({'phase': 'error', 'message': msg})}\n\n"
63
+ return StreamingResponse(error_stream(), media_type="text/event-stream")
64
+
65
+ try:
66
+ merged = {**provider.default_config().model_dump(), **(request.provider_config or {})}
67
+ provider_config = provider.config_class(**merged)
68
+ except Exception as e:
69
+ msg = f"Invalid provider_config: {e}"
70
+ def error_stream():
71
+ yield f"data: {json.dumps({'phase': 'error', 'message': msg})}\n\n"
72
+ return StreamingResponse(error_stream(), media_type="text/event-stream")
73
+
74
+ orchestrator = Orchestrator(plugin, executor, validator, provider, provider_config)
75
+
76
+ safe_input = {
77
+ "payload": request.payload,
78
+ "headers": request.headers,
79
+ "meta": request.payload_meta or {},
80
+ }
81
+
82
+ safe_request = {
83
+ "endpoint": request.endpoint,
84
+ "method": request.method,
85
+ "headers": request.headers,
86
+ "validate": request.validate,
87
+ "meta": request.payload_meta or {},
88
+ }
89
+
90
+ def event_stream():
91
+ # Phase 1: Warming up (instant feedback to the client)
92
+ yield f"data: {json.dumps({'phase': 'warming_up'})}\n\n"
93
+
94
+ # Phase 2: Generate all test cases (single LLM call)
95
+ yield f"data: {json.dumps({'phase': 'generating'})}\n\n"
96
+
97
+ try:
98
+ tests = orchestrator.generate(safe_input, request.max_cases)
99
+ tests = tests[: request.max_cases]
100
+ except Exception as e:
101
+ logger.exception("Error generating test cases")
102
+ yield f"data: {json.dumps({'phase': 'error', 'message': f'Error generating test cases: {e}'})}\n\n"
103
+ return
104
+
105
+ if not tests:
106
+ yield f"data: {json.dumps({'phase': 'error', 'message': 'No test cases were generated. LLM may be unavailable or returned invalid output.'})}\n\n"
107
+ return
108
+
109
+ # Phase 3+4: Execute and validate each test, yielding granular step events
110
+ results = []
111
+ try:
112
+ for step in orchestrator.run_stream(safe_request, tests):
113
+ event = step["event"]
114
+ if event in ("executing", "validating"):
115
+ yield f"data: {json.dumps({'phase': event, 'progress': step['progress'], 'total': step['total']})}\n\n"
116
+ elif event == "result":
117
+ results.append(step["result"])
118
+ yield f"data: {json.dumps({'phase': 'result', 'progress': step['progress'], 'total': step['total'], 'result': step['result']}, default=str)}\n\n"
119
+ except Exception as e:
120
+ logger.exception("Error executing test cases")
121
+ yield f"data: {json.dumps({'phase': 'error', 'message': f'Error executing test cases: {e}'})}\n\n"
122
+ return
123
+
124
+ # Phase 5: Final summary
125
+ success_count = sum(1 for r in results if r["success"])
126
+ total_duration = sum(r["duration_ms"] for r in results)
127
+ summary = {
128
+ "total": len(results),
129
+ "success": success_count,
130
+ "failed": len(results) - success_count,
131
+ "total_duration_ms": total_duration,
132
+ }
133
+ yield f"data: {json.dumps({'phase': 'done', 'summary': summary})}\n\n"
134
+
135
+ return StreamingResponse(
136
+ event_stream(),
137
+ media_type="text/event-stream",
138
+ headers={
139
+ "Cache-Control": "no-cache",
140
+ "X-Accel-Buffering": "no",
141
+ },
142
+ )
@@ -0,0 +1,34 @@
1
+ from fastapi import APIRouter, HTTPException, Request
2
+
3
+ from app.api.schemas.requests import ValidateRequest
4
+
5
+ router = APIRouter(tags=["Validation"])
6
+
7
+
8
+ @router.post("/validate", summary="Validate Response from /generateAndRun")
9
+ def validate(request: ValidateRequest, req: Request):
10
+ provider_registry = req.app.state.provider_registry
11
+ validator = req.app.state.validator
12
+
13
+ try:
14
+ provider = provider_registry.get(request.provider.lower())
15
+ except KeyError as e:
16
+ raise HTTPException(status_code=400, detail=str(e))
17
+
18
+ try:
19
+ merged = {**provider.default_config().model_dump(), **(request.provider_config or {})}
20
+ provider_config = provider.config_class(**merged)
21
+ except Exception as e:
22
+ raise HTTPException(status_code=400, detail=f"Invalid provider_config: {e}")
23
+
24
+ try:
25
+ result = validator.validate(
26
+ test=request.test,
27
+ response=request.response,
28
+ meta=request.meta or {},
29
+ provider=provider,
30
+ provider_config=provider_config,
31
+ )
32
+ return {"validation": result}
33
+ except Exception as e:
34
+ raise HTTPException(status_code=500, detail=f"Validation error: {str(e)}")
@@ -0,0 +1,159 @@
1
+ from pydantic import BaseModel, Field
2
+ from typing import Optional, Dict, Any
3
+
4
+
5
+ _PROVIDER_CONFIG_DESCRIPTION = """
6
+ Per-request provider overrides. Fields depend on the selected provider:
7
+
8
+ **ollama**
9
+ - `model` (str) — default: `llama3:8b-instruct-q4_0`
10
+ - `temperature` (float) — default: `0.7`
11
+ - `max_tokens` (int) — default: `700`
12
+ - `top_p` (float) — default: `0.9`
13
+ - `url` (str) — default: `http://localhost:11434/api/generate`
14
+ - `timeout` (int, seconds) — default: `300`
15
+
16
+ **openai**
17
+ - `model` (str) — default: `gpt-4o`
18
+ - `temperature` (float) — default: `0.7`
19
+ - `max_tokens` (int) — default: `700`
20
+ - `timeout` (int, seconds) — default: `60`
21
+ - `api_key` (str) — required
22
+ - `base_url` (str) — default: `https://api.openai.com/v1/chat/completions`
23
+
24
+ **anthropic**
25
+ - `model` (str) — default: `claude-sonnet-4-6`
26
+ - `temperature` (float) — default: `0.7`
27
+ - `max_tokens` (int) — default: `700`
28
+ - `timeout` (int, seconds) — default: `60`
29
+ - `api_key` (str) — required
30
+ - `base_url` (str) — default: `https://api.anthropic.com/v1/messages`
31
+ - `anthropic_version` (str) — default: `2023-06-01`
32
+ """.strip()
33
+
34
+
35
+ class TestRequest(BaseModel):
36
+ endpoint: str
37
+ method: str
38
+ headers: dict = {}
39
+ payload: dict
40
+ payload_meta: Optional[Dict[str, Any]] = None
41
+ test_type: str
42
+ max_cases: int = Field(
43
+ default=10,
44
+ ge=3,
45
+ le=100,
46
+ description="Maximum number of test cases to generate (3-100)",
47
+ )
48
+ validate: bool = True
49
+ provider: str = Field(
50
+ default="ollama",
51
+ description="LLM provider: ollama | openai | anthropic",
52
+ )
53
+ provider_config: Optional[Dict[str, Any]] = Field(
54
+ default=None,
55
+ description=_PROVIDER_CONFIG_DESCRIPTION,
56
+ )
57
+
58
+ model_config = {
59
+ "json_schema_extra": {
60
+ "examples": [
61
+ {
62
+ "endpoint": "https://jsonplaceholder.typicode.com/posts",
63
+ "method": "POST",
64
+ "headers": {"Content-Type": "application/json"},
65
+ "payload": {"title": "Validra Test", "body": "Testing fuzzy payload generation", "userId": 30},
66
+ "payload_meta": {
67
+ "body": "required, alphanumeric [1-50]",
68
+ "title": "optional, alphanumeric [1-50]",
69
+ "userId": "numeric [1-999]",
70
+ },
71
+ "test_type": "FUZZ",
72
+ "max_cases": 10,
73
+ "validate": True,
74
+ "provider": "ollama",
75
+ "provider_config": {
76
+ "model": "llama3:8b-instruct-q4_0",
77
+ "temperature": 0.5,
78
+ "top_p": 0.9,
79
+ "max_tokens": 700,
80
+ "timeout": 300,
81
+ "url": "http://localhost:11434/api/generate",
82
+ },
83
+ },
84
+ {
85
+ "endpoint": "https://jsonplaceholder.typicode.com/posts",
86
+ "method": "POST",
87
+ "headers": {"Content-Type": "application/json"},
88
+ "payload": {"title": "Validra Test", "body": "Testing fuzzy payload generation", "userId": 30},
89
+ "test_type": "FUZZ",
90
+ "max_cases": 10,
91
+ "validate": True,
92
+ "provider": "openai",
93
+ "provider_config": {
94
+ "api_key": "sk-...",
95
+ "model": "gpt-4o",
96
+ "temperature": 0.7,
97
+ "max_tokens": 700,
98
+ "timeout": 60,
99
+ },
100
+ },
101
+ {
102
+ "endpoint": "https://jsonplaceholder.typicode.com/posts",
103
+ "method": "POST",
104
+ "headers": {"Content-Type": "application/json"},
105
+ "payload": {"title": "Validra Test", "body": "Testing fuzzy payload generation", "userId": 30},
106
+ "test_type": "FUZZ",
107
+ "max_cases": 10,
108
+ "validate": True,
109
+ "provider": "anthropic",
110
+ "provider_config": {
111
+ "api_key": "sk-ant-...",
112
+ "model": "claude-sonnet-4-6",
113
+ "temperature": 0.7,
114
+ "max_tokens": 700,
115
+ "timeout": 60,
116
+ },
117
+ },
118
+ ]
119
+ }
120
+ }
121
+
122
+
123
+ class ValidateRequest(BaseModel):
124
+ test: Dict[str, Any]
125
+ response: Dict[str, Any]
126
+ meta: Optional[Dict[str, Any]] = None
127
+ provider: str = Field(
128
+ default="ollama",
129
+ description="LLM provider: ollama | openai | anthropic",
130
+ )
131
+ provider_config: Optional[Dict[str, Any]] = Field(
132
+ default=None,
133
+ description=_PROVIDER_CONFIG_DESCRIPTION,
134
+ )
135
+
136
+ model_config = {
137
+ "json_schema_extra": {
138
+ "examples": [
139
+ {
140
+ "test": {
141
+ "id": "tc-001",
142
+ "description": "Body too long",
143
+ "payload": {
144
+ "title": "Validra Test",
145
+ "body": "Testing fuzzy payload generation",
146
+ "userId": 30,
147
+ },
148
+ },
149
+ "response": {"status_code": 201, "body": {"id": 101}},
150
+ "meta": {
151
+ "body": "required, alphanumeric [1-50]",
152
+ "title": "optional, alphanumeric [1-50]",
153
+ "userId": "numeric [1-999]",
154
+ },
155
+ "provider": "ollama",
156
+ }
157
+ ]
158
+ }
159
+ }
@@ -0,0 +1,30 @@
1
+ from pydantic import BaseModel
2
+ from typing import Any, Dict, List, Optional
3
+
4
+
5
+ class ValidationResult(BaseModel):
6
+ dstatus: str
7
+ reason: str
8
+ confidence: float
9
+
10
+
11
+ class TestResult(BaseModel):
12
+ id: str
13
+ description: Optional[str]
14
+ request: Dict[str, Any]
15
+ response: Dict[str, Any]
16
+ success: bool
17
+ duration_ms: int
18
+ validation: Optional[ValidationResult]
19
+
20
+
21
+ class Summary(BaseModel):
22
+ total: int
23
+ success: int
24
+ failed: int
25
+ total_duration_ms: int
26
+
27
+
28
+ class GenerationResponse(BaseModel):
29
+ tests: List[TestResult]
30
+ summary: Summary
app/cli.py ADDED
@@ -0,0 +1,5 @@
1
+ import uvicorn
2
+
3
+
4
+ def serve():
5
+ uvicorn.run("app.main:app", host="0.0.0.0", port=8000, reload=False)
app/config/settings.py ADDED
@@ -0,0 +1,21 @@
1
+ from pydantic_settings import BaseSettings
2
+ from typing import Optional
3
+
4
+
5
+ class Settings(BaseSettings):
6
+ default_provider: str = "ollama"
7
+ executor_timeout: int = 60
8
+
9
+ # Deployment-specific (changes between local/docker)
10
+ ollama_url: str = "http://localhost:11434/api/generate"
11
+
12
+ # Secrets — no defaults, must be set to use that provider
13
+ openai_api_key: Optional[str] = None
14
+ anthropic_api_key: Optional[str] = None
15
+
16
+ class Config:
17
+ env_file = ".env"
18
+ env_file_encoding = "utf-8"
19
+
20
+
21
+ settings = Settings()
app/engine/executor.py ADDED
@@ -0,0 +1,33 @@
1
+ import requests
2
+
3
+
4
+ class Executor:
5
+
6
+ def execute(self, request: dict, payload: dict, headers: dict = None) -> dict:
7
+ from app.config.settings import settings
8
+
9
+ url = request["endpoint"]
10
+ method = request.get("method", "POST")
11
+ timeout = settings.executor_timeout
12
+
13
+ try:
14
+ if method == "POST":
15
+ response = requests.post(url, json=payload, headers=headers, timeout=timeout)
16
+ elif method == "GET":
17
+ response = requests.get(url, params=payload, headers=headers, timeout=timeout)
18
+ else:
19
+ raise Exception(f"Unsupported HTTP method: {method}")
20
+
21
+ try:
22
+ body = response.json()
23
+ except Exception:
24
+ body = response.text
25
+
26
+ return {"status_code": response.status_code, "body": body}
27
+
28
+ except requests.exceptions.Timeout:
29
+ return {"status_code": 408, "error": "timeout"}
30
+ except requests.exceptions.ConnectionError:
31
+ return {"status_code": 503, "error": "connection_error"}
32
+ except Exception as e:
33
+ return {"status_code": 500, "error": str(e)}
@@ -0,0 +1,136 @@
1
+ import time
2
+
3
+ from app.engine.executor import Executor
4
+ from app.validator.base import BaseValidator
5
+
6
+
7
+ class Orchestrator:
8
+
9
+ def __init__(
10
+ self,
11
+ plugin,
12
+ executor: Executor,
13
+ validator: BaseValidator,
14
+ provider,
15
+ provider_config,
16
+ ):
17
+ self.plugin = plugin
18
+ self.executor = executor
19
+ self.validator = validator
20
+ self.provider = provider
21
+ self.provider_config = provider_config
22
+
23
+ def generate(self, payload: dict, max_cases: int) -> list:
24
+ return self.plugin.generate(
25
+ example=payload,
26
+ previous_cases=[],
27
+ max_cases=max_cases,
28
+ meta=payload.get("meta", {}),
29
+ provider=self.provider,
30
+ provider_config=self.provider_config,
31
+ )
32
+
33
+ def run_stream(self, request: dict, tests: list):
34
+ """Generator yielding typed step events for each test.
35
+
36
+ Event shapes:
37
+ {"event": "executing", "progress": N, "total": T}
38
+ {"event": "validating", "progress": N, "total": T}
39
+ {"event": "result", "progress": N, "total": T, "result": {...}}
40
+ """
41
+ validate_enabled = request.get("validate", True)
42
+ meta = request.get("meta", {})
43
+ total = len(tests)
44
+
45
+ for idx, test in enumerate(tests, start=1):
46
+ payload = test.get("payload", {})
47
+ test_headers = test.get("headers")
48
+
49
+ yield {"event": "executing", "progress": idx, "total": total}
50
+
51
+ start = time.time()
52
+ response = self.executor.execute(request, payload, headers=test_headers)
53
+ duration = int((time.time() - start) * 1000)
54
+ success = 200 <= response.get("status_code", 500) < 300
55
+
56
+ validation_result = None
57
+ if validate_enabled:
58
+ yield {"event": "validating", "progress": idx, "total": total}
59
+ validation_result = self.validator.validate(
60
+ test=test,
61
+ response=response,
62
+ meta=meta,
63
+ provider=self.provider,
64
+ provider_config=self.provider_config,
65
+ )
66
+
67
+ yield {
68
+ "event": "result",
69
+ "progress": idx,
70
+ "total": total,
71
+ "result": {
72
+ "id": f"tc-{idx:03}",
73
+ "description": test.get("description"),
74
+ "request": {
75
+ "headers": test_headers if test_headers is not None else request.get("headers", {}),
76
+ "body": payload,
77
+ },
78
+ "response": response,
79
+ "success": success,
80
+ "duration_ms": duration,
81
+ "validation": validation_result,
82
+ },
83
+ }
84
+
85
+ def run(self, request: dict, tests: list) -> dict:
86
+ validate_enabled = request.get("validate", True)
87
+ meta = request.get("meta", {})
88
+ enriched_tests = []
89
+ success_count = 0
90
+ total_duration = 0
91
+
92
+ for idx, test in enumerate(tests, start=1):
93
+ payload = test.get("payload", {})
94
+ test_headers = test.get("headers")
95
+
96
+ start = time.time()
97
+ response = self.executor.execute(request, payload, headers=test_headers)
98
+ duration = int((time.time() - start) * 1000)
99
+ total_duration += duration
100
+
101
+ success = 200 <= response.get("status_code", 500) < 300
102
+ if success:
103
+ success_count += 1
104
+
105
+ validation_result = None
106
+ if validate_enabled:
107
+ validation_result = self.validator.validate(
108
+ test=test,
109
+ response=response,
110
+ meta=meta,
111
+ provider=self.provider,
112
+ provider_config=self.provider_config,
113
+ )
114
+
115
+ enriched_tests.append({
116
+ "id": f"tc-{idx:03}",
117
+ "description": test.get("description"),
118
+ "request": {
119
+ "headers": test_headers if test_headers is not None else request.get("headers", {}),
120
+ "body": payload,
121
+ },
122
+ "response": response,
123
+ "success": success,
124
+ "duration_ms": duration,
125
+ "validation": validation_result,
126
+ })
127
+
128
+ return {
129
+ "tests": enriched_tests,
130
+ "summary": {
131
+ "total": len(enriched_tests),
132
+ "success": success_count,
133
+ "failed": len(enriched_tests) - success_count,
134
+ "total_duration_ms": total_duration,
135
+ },
136
+ }
app/main.py ADDED
@@ -0,0 +1,95 @@
1
+ import logging
2
+ import traceback
3
+ from contextlib import asynccontextmanager
4
+ from pathlib import Path
5
+
6
+ from fastapi import FastAPI, Request
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.openapi.docs import get_swagger_ui_html
9
+ from fastapi.responses import FileResponse, JSONResponse
10
+ from fastapi.staticfiles import StaticFiles
11
+
12
+ logger = logging.getLogger("validra")
13
+
14
+ from app.api.routes import generation, validation
15
+ from app.engine.executor import Executor
16
+ from app.plugins.fuzz.plugin import FuzzPlugin
17
+ from app.plugins.pen.plugin import PenTestPlugin
18
+ from app.plugins.registry import PluginRegistry
19
+ from app.plugins.security.plugin import SecurityPlugin
20
+ from app.providers.anthropic.provider import AnthropicProvider
21
+ from app.providers.ollama.provider import OllamaProvider
22
+ from app.providers.openai.provider import OpenAIProvider
23
+ from app.providers.registry import ProviderRegistry
24
+ from app.validator.llm_validator import LLMValidator
25
+
26
+
27
+ @asynccontextmanager
28
+ async def lifespan(app: FastAPI):
29
+ # ── Plugins ──────────────────────────────────────────────────────────────
30
+ plugin_registry = PluginRegistry()
31
+ plugin_registry.register("FUZZ", FuzzPlugin())
32
+ plugin_registry.register("AUTH", SecurityPlugin())
33
+ plugin_registry.register("PEN", PenTestPlugin())
34
+ app.state.plugin_registry = plugin_registry
35
+
36
+ # ── Providers ────────────────────────────────────────────────────────────
37
+ provider_registry = ProviderRegistry()
38
+ provider_registry.register("ollama", OllamaProvider())
39
+ provider_registry.register("openai", OpenAIProvider())
40
+ provider_registry.register("anthropic", AnthropicProvider())
41
+ app.state.provider_registry = provider_registry
42
+
43
+ # ── Shared singletons ────────────────────────────────────────────────────
44
+ app.state.executor = Executor()
45
+ app.state.validator = LLMValidator()
46
+
47
+ yield
48
+
49
+
50
+ def create_app() -> FastAPI:
51
+ app = FastAPI(
52
+ title="Validra",
53
+ swagger_ui_parameters={"defaultModelsExpandDepth": -1},
54
+ docs_url=None,
55
+ redoc_url=None,
56
+ lifespan=lifespan,
57
+ )
58
+
59
+ app.add_middleware(
60
+ CORSMiddleware,
61
+ allow_origins=["http://localhost:3000"],
62
+ allow_methods=["*"],
63
+ allow_headers=["*"],
64
+ )
65
+
66
+ _static_dir = Path(__file__).parent / "static"
67
+ app.mount("/static", StaticFiles(directory=str(_static_dir)), name="static")
68
+ app.include_router(generation.router)
69
+ app.include_router(validation.router)
70
+
71
+ @app.get("/docs", include_in_schema=False)
72
+ async def custom_swagger_ui_html():
73
+ return get_swagger_ui_html(
74
+ openapi_url=app.openapi_url,
75
+ title="Validra",
76
+ swagger_favicon_url="/favicon.ico",
77
+ swagger_ui_parameters={
78
+ "defaultModelsExpandDepth": -1,
79
+ "defaultModelExpandDepth": -1,
80
+ },
81
+ )
82
+
83
+ @app.get("/favicon.ico", include_in_schema=False)
84
+ async def favicon():
85
+ return FileResponse(str(Path(__file__).parent / "static" / "favicon.ico"))
86
+
87
+ @app.exception_handler(Exception)
88
+ async def unhandled_exception_handler(_req: Request, exc: Exception):
89
+ logger.error("Unhandled exception:\n%s", traceback.format_exc())
90
+ return JSONResponse(status_code=500, content={"detail": str(exc)})
91
+
92
+ return app
93
+
94
+
95
+ app = create_app()
app/plugins/base.py ADDED
@@ -0,0 +1,18 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Optional
3
+
4
+
5
+ class BasePlugin(ABC):
6
+ name: str = "base"
7
+
8
+ @abstractmethod
9
+ def generate(
10
+ self,
11
+ example: dict,
12
+ previous_cases: list,
13
+ max_cases: int,
14
+ meta: Optional[dict],
15
+ provider,
16
+ provider_config,
17
+ ) -> list:
18
+ raise NotImplementedError