scriptgini 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,50 @@
1
+ from datetime import datetime
2
+ from pydantic import BaseModel, Field
3
+
4
+ from app.models.generated_script import ScriptStatus
5
+ from app.models.project import TestFramework
6
+ from app.models.script_run import ScriptRunStatus
7
+ from app.config import settings
8
+ from app.llm.provider import LLMProvider
9
+
10
+
11
+ class GenerateScriptRequest(BaseModel):
12
+ llm_provider: LLMProvider = settings.DEFAULT_LLM_PROVIDER
13
+ llm_model: str | None = Field(None, description="Override the default model for the chosen provider")
14
+ framework: TestFramework | None = Field(
15
+ None, description="Override the project's default framework"
16
+ )
17
+
18
+
19
+ class GeneratedScriptResponse(BaseModel):
20
+ id: int
21
+ project_id: int
22
+ test_case_id: int
23
+ framework: str
24
+ llm_provider: str
25
+ llm_model: str
26
+ script_content: str | None
27
+ status: ScriptStatus
28
+ error_message: str | None
29
+ token_usage: int | None
30
+ created_at: datetime
31
+ updated_at: datetime
32
+
33
+ model_config = {"from_attributes": True}
34
+
35
+
36
+ class ScriptRunResponse(BaseModel):
37
+ id: int
38
+ script_id: int
39
+ project_id: int
40
+ test_case_id: int
41
+ status: ScriptRunStatus
42
+ success: bool
43
+ exit_code: int
44
+ stdout: str
45
+ stderr: str
46
+ duration_seconds: float
47
+ command: str
48
+ created_at: datetime
49
+
50
+ model_config = {"from_attributes": True}
app/schemas/project.py ADDED
@@ -0,0 +1,36 @@
1
+ from datetime import datetime
2
+ from pydantic import BaseModel, HttpUrl, Field
3
+
4
+ from app.models.project import TestFramework, SelectorPreference
5
+
6
+
7
+ class ProjectCreate(BaseModel):
8
+ name: str = Field(..., max_length=255)
9
+ description: str | None = None
10
+ aut_base_url: str = Field(..., description="Base URL of the Application Under Test")
11
+ default_framework: TestFramework = TestFramework.playwright_python
12
+ selector_preference: SelectorPreference = SelectorPreference.role
13
+ auth_hints: str | None = None
14
+
15
+
16
+ class ProjectUpdate(BaseModel):
17
+ name: str | None = Field(None, max_length=255)
18
+ description: str | None = None
19
+ aut_base_url: str | None = None
20
+ default_framework: TestFramework | None = None
21
+ selector_preference: SelectorPreference | None = None
22
+ auth_hints: str | None = None
23
+
24
+
25
+ class ProjectResponse(BaseModel):
26
+ id: int
27
+ name: str
28
+ description: str | None
29
+ aut_base_url: str
30
+ default_framework: TestFramework
31
+ selector_preference: SelectorPreference
32
+ auth_hints: str | None
33
+ created_at: datetime
34
+ updated_at: datetime
35
+
36
+ model_config = {"from_attributes": True}
@@ -0,0 +1,34 @@
1
+ from datetime import datetime
2
+ from pydantic import BaseModel, Field
3
+
4
+ from app.models.test_case import TestCaseFormat
5
+
6
+
7
+ class TestCaseCreate(BaseModel):
8
+ title: str = Field(..., max_length=512)
9
+ format: TestCaseFormat = TestCaseFormat.step_based
10
+ content: str = Field(..., description="Full test case text (steps or BDD scenarios)")
11
+ preconditions: str | None = None
12
+ test_data_hints: str | None = None
13
+
14
+
15
+ class TestCaseUpdate(BaseModel):
16
+ title: str | None = Field(None, max_length=512)
17
+ format: TestCaseFormat | None = None
18
+ content: str | None = None
19
+ preconditions: str | None = None
20
+ test_data_hints: str | None = None
21
+
22
+
23
+ class TestCaseResponse(BaseModel):
24
+ id: int
25
+ project_id: int
26
+ title: str
27
+ format: TestCaseFormat
28
+ content: str
29
+ preconditions: str | None
30
+ test_data_hints: str | None
31
+ created_at: datetime
32
+ updated_at: datetime
33
+
34
+ model_config = {"from_attributes": True}
@@ -0,0 +1,133 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import logging
5
+ import subprocess
6
+ import threading
7
+ from datetime import datetime, timezone
8
+ from pathlib import Path
9
+ import re
10
+
11
+ from app.config import settings
12
+
13
+ logger = logging.getLogger(__name__)
14
+ _export_lock = threading.Lock()
15
+
16
+
17
+ def export_generated_script(
18
+ *,
19
+ project_name: str,
20
+ test_case_title: str,
21
+ script_id: int,
22
+ framework: str,
23
+ llm_provider: str,
24
+ llm_model: str,
25
+ script_content: str,
26
+ ) -> str | None:
27
+ if not settings.AUTO_EXPORT_GIT_ENABLED:
28
+ return None
29
+ if not settings.AUTO_EXPORT_GIT_REPO_URL.strip():
30
+ logger.warning("Automatic git export is enabled but AUTO_EXPORT_GIT_REPO_URL is empty.")
31
+ return None
32
+
33
+ with _export_lock:
34
+ repo_dir = _prepare_repo_checkout()
35
+ relative_script_path = _build_relative_script_path(project_name, test_case_title, script_id)
36
+ absolute_script_path = repo_dir / relative_script_path
37
+ absolute_script_path.parent.mkdir(parents=True, exist_ok=True)
38
+ absolute_script_path.write_text(script_content, encoding="utf-8")
39
+
40
+ metadata_path = absolute_script_path.with_suffix(".json")
41
+ metadata = {
42
+ "script_id": script_id,
43
+ "project_name": project_name,
44
+ "test_case_title": test_case_title,
45
+ "framework": framework,
46
+ "llm_provider": llm_provider,
47
+ "llm_model": llm_model,
48
+ "exported_at": datetime.now(timezone.utc).isoformat(),
49
+ "script_path": relative_script_path.as_posix(),
50
+ }
51
+ metadata_path.write_text(json.dumps(metadata, indent=2), encoding="utf-8")
52
+
53
+ _git_run(["add", relative_script_path.as_posix(), metadata_path.relative_to(repo_dir).as_posix()], cwd=repo_dir)
54
+ if not _repo_has_changes(repo_dir):
55
+ return relative_script_path.as_posix()
56
+
57
+ commit_title = f"Add generated script {script_id} for {_slugify(test_case_title)}"
58
+ commit_body = (
59
+ f"Project: {project_name}\n"
60
+ f"Test case: {test_case_title}\n"
61
+ f"Framework: {framework}\n"
62
+ f"Provider: {llm_provider}\n"
63
+ f"Model: {llm_model or 'runtime default'}"
64
+ )
65
+ _git_run(
66
+ [
67
+ "-c",
68
+ f"user.name={settings.AUTO_EXPORT_GIT_USER_NAME}",
69
+ "-c",
70
+ f"user.email={settings.AUTO_EXPORT_GIT_USER_EMAIL}",
71
+ "commit",
72
+ "-m",
73
+ commit_title,
74
+ "-m",
75
+ commit_body,
76
+ ],
77
+ cwd=repo_dir,
78
+ )
79
+ _git_run(["push", "origin", settings.AUTO_EXPORT_GIT_BRANCH], cwd=repo_dir)
80
+ logger.info(
81
+ "Exported generated script to sandbox repo: path=%s branch=%s",
82
+ relative_script_path.as_posix(),
83
+ settings.AUTO_EXPORT_GIT_BRANCH,
84
+ )
85
+ return relative_script_path.as_posix()
86
+
87
+
88
+ def _prepare_repo_checkout() -> Path:
89
+ repo_dir = Path(settings.AUTO_EXPORT_GIT_LOCAL_PATH).expanduser().resolve()
90
+ repo_dir.parent.mkdir(parents=True, exist_ok=True)
91
+
92
+ if not (repo_dir / ".git").exists():
93
+ _git_run(
94
+ [
95
+ "clone",
96
+ "--branch",
97
+ settings.AUTO_EXPORT_GIT_BRANCH,
98
+ "--single-branch",
99
+ settings.AUTO_EXPORT_GIT_REPO_URL,
100
+ str(repo_dir),
101
+ ],
102
+ cwd=repo_dir.parent,
103
+ )
104
+ return repo_dir
105
+
106
+ _git_run(["remote", "set-url", "origin", settings.AUTO_EXPORT_GIT_REPO_URL], cwd=repo_dir)
107
+ _git_run(["fetch", "origin", settings.AUTO_EXPORT_GIT_BRANCH], cwd=repo_dir)
108
+ _git_run(["checkout", settings.AUTO_EXPORT_GIT_BRANCH], cwd=repo_dir)
109
+ _git_run(["pull", "--ff-only", "origin", settings.AUTO_EXPORT_GIT_BRANCH], cwd=repo_dir)
110
+ return repo_dir
111
+
112
+
113
+ def _build_relative_script_path(project_name: str, test_case_title: str, script_id: int) -> Path:
114
+ return Path("generated-scripts") / _slugify(project_name) / f"{_slugify(test_case_title)}-{script_id}.py"
115
+
116
+
117
+ def _repo_has_changes(repo_dir: Path) -> bool:
118
+ result = _git_run(["status", "--short"], cwd=repo_dir)
119
+ return bool(result.stdout.strip())
120
+
121
+
122
+ def _git_run(args: list[str], *, cwd: Path) -> subprocess.CompletedProcess[str]:
123
+ command = ["git", *args]
124
+ result = subprocess.run(command, cwd=cwd, capture_output=True, text=True, check=False)
125
+ if result.returncode != 0:
126
+ raise RuntimeError(result.stderr.strip() or result.stdout.strip() or f"git command failed: {' '.join(command)}")
127
+ return result
128
+
129
+
130
+ def _slugify(value: str) -> str:
131
+ normalized = re.sub(r"[^a-zA-Z0-9]+", "-", value.strip().lower())
132
+ collapsed = re.sub(r"-+", "-", normalized).strip("-")
133
+ return collapsed or "untitled"
@@ -0,0 +1,381 @@
1
+ Metadata-Version: 2.4
2
+ Name: scriptgini
3
+ Version: 0.1.2
4
+ Summary: Agentic AI system that converts functional test cases into automation test scripts.
5
+ Author: ScriptGini Team
6
+ License: Proprietary
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: Programming Language :: Python :: 3 :: Only
9
+ Classifier: Programming Language :: Python :: 3.11
10
+ Classifier: Programming Language :: Python :: 3.12
11
+ Classifier: Framework :: FastAPI
12
+ Requires-Python: >=3.11
13
+ Description-Content-Type: text/markdown
14
+
15
+ # ScriptGini
16
+
17
+ > **Enterprise-grade Agentic AI system that converts functional test cases into high-quality, review-ready automation test scripts.**
18
+
19
+ ---
20
+
21
+ ## What is ScriptGini?
22
+
23
+ ScriptGini is an AI-powered test automation engine built for Quality Engineering teams. You feed it a functional test case and an Application Under Test (AUT) URL — it returns a production-ready automation script in your chosen framework, generated by a multi-step LangGraph agent that reasons about test intent before writing a single line of code.
24
+
25
+ ---
26
+
27
+ ## Features
28
+
29
+ - **Agentic 3-node LangGraph pipeline** — Intent analysis → Script generation → Quality review
30
+ - **Multi-provider LLM support** — OpenAI, Ollama (local), OpenRouter, Google Gemini, AWS Bedrock
31
+ - **Framework-agnostic output** — Playwright Python, Selenium Python, UFT VBScript, Cypress JS
32
+ - **Intelligent selector strategy** — Role → Label → data-testid → CSS → XPath (last resort)
33
+ - **Project & AUT management** — Store multiple projects, each with its own base URL and defaults
34
+ - **Full test case history** — Every generated script is stored in SQLite with status and token usage
35
+ - **Execution history persistence** — Every run is stored in `script_runs` with stdout/stderr, exit code, and duration
36
+ - **Hardened execution sandbox** — Script runs use isolated Python mode, static safety validation, and restricted environment variables
37
+ - **Bulk job orchestration** — Project-level bulk generate and bulk run with pollable job status
38
+ - **Run analytics dashboard** — Project-level pass/fail/timeout metrics and recent failure feed
39
+ - **Richer test case intake** — Import `.txt`, `.md`, `.json`, `.csv`, `.feature`, `.yml/.yaml`, and `.xlsx`
40
+ - **Import preview mapping** — Preview parsed scenarios in the UI before creating a project workspace
41
+ - **REST API** — FastAPI with auto-generated Swagger UI
42
+ - **Alembic migrations** — Safe, versioned schema management over SQLite
43
+
44
+ ---
45
+
46
+ ## Tech Stack
47
+
48
+ | Layer | Technology |
49
+ |---|---|
50
+ | API | FastAPI + Uvicorn |
51
+ | Agentic AI | LangGraph + LangChain |
52
+ | LLM Providers | OpenAI, Ollama, OpenRouter, Gemini, Bedrock |
53
+ | Database | SQLite |
54
+ | ORM | SQLAlchemy 2.0 |
55
+ | Migrations | Alembic |
56
+ | Config | Pydantic Settings (.env) |
57
+
58
+ ---
59
+
60
+ ## Quick Start
61
+
62
+ ### Windows
63
+
64
+ ```bat
65
+ start.bat
66
+ ```
67
+
68
+ ### Linux / macOS
69
+
70
+ ```bash
71
+ chmod +x start.sh
72
+ ./start.sh
73
+ ```
74
+
75
+ The script will:
76
+ 1. Create a Python virtual environment
77
+ 2. Install all dependencies
78
+ 3. Copy `.env.example` → `.env` if missing (edit it before re-running)
79
+ 4. Run Alembic migrations
80
+ 5. Start the server and open Swagger UI in your browser
81
+
82
+ To load a ready-made sample workspace, use the `Load Demo Project` button in the web UI or call `POST /api/v1/demo/load`.
83
+
84
+ ---
85
+
86
+ ## Configuration
87
+
88
+ Copy `.env.example` to `.env` and fill in the values you need:
89
+
90
+ ```bash
91
+ cp .env.example .env
92
+ ```
93
+
94
+ ```env
95
+ # Choose your default provider
96
+ DEFAULT_LLM_PROVIDER=openrouter # openai | ollama | openrouter | gemini | bedrock
97
+
98
+ # OpenAI
99
+ OPENAI_API_KEY=your_openai_api_key_here
100
+
101
+ # Ollama (local — no key needed)
102
+ OLLAMA_BASE_URL=http://localhost:11434
103
+ OLLAMA_MODEL=llama3
104
+ OLLAMA_NUM_PREDICT=700
105
+
106
+ # Generation latency controls
107
+ LLM_REQUEST_TIMEOUT_SECONDS=45
108
+ SCRIPT_GENERATION_TIMEOUT_SECONDS=180
109
+ SKIP_REVIEW_FOR_OLLAMA=true
110
+
111
+ # OpenRouter
112
+ OPENROUTER_API_KEY=your_openrouter_api_key_here
113
+ OPENROUTER_MODEL=openai/gpt-4o
114
+
115
+ # Google Gemini
116
+ GOOGLE_API_KEY=your_google_api_key_here
117
+
118
+ # AWS Bedrock
119
+ AWS_ACCESS_KEY_ID=your_aws_access_key_id
120
+ AWS_SECRET_ACCESS_KEY=your_aws_secret_access_key
121
+ AWS_REGION_NAME=us-east-1
122
+ ```
123
+
124
+ > `.env` is git-ignored. Never commit real API keys.
125
+
126
+ If local generation feels slow, reduce `OLLAMA_NUM_PREDICT`, keep `SKIP_REVIEW_FOR_OLLAMA=true`, or switch to a smaller/faster Ollama model.
127
+
128
+ ---
129
+
130
+ ## API Reference
131
+
132
+ Once running, visit:
133
+
134
+ | URL | Description |
135
+ |---|---|
136
+ | `http://localhost:8000/docs` | Swagger UI (interactive) |
137
+ | `http://localhost:8000/redoc` | ReDoc |
138
+ | `http://localhost:8000/health` | Health check |
139
+
140
+ ### Core Workflow
141
+
142
+ #### 1. Create a Project (AUT)
143
+
144
+ ```http
145
+ POST /api/v1/projects/
146
+ ```
147
+
148
+ ```json
149
+ {
150
+ "name": "My Web App",
151
+ "aut_base_url": "https://example.com",
152
+ "default_framework": "playwright_python",
153
+ "selector_preference": "role",
154
+ "auth_hints": "Login with admin/admin on /login"
155
+ }
156
+ ```
157
+
158
+ #### 2. Add a Test Case
159
+
160
+ ```http
161
+ POST /api/v1/projects/{project_id}/test-cases/
162
+ ```
163
+
164
+ ```json
165
+ {
166
+ "title": "TC-001 Successful Login",
167
+ "format": "step_based",
168
+ "content": "Step 1: Navigate to /login\nStep 2: Enter username 'admin'\nStep 3: Enter password 'admin123'\nStep 4: Click Login button\nExpected: User is redirected to /dashboard and sees 'Welcome' message",
169
+ "preconditions": "User account exists in the system",
170
+ "test_data_hints": "username=admin, password=admin123"
171
+ }
172
+ ```
173
+
174
+ #### 3. Generate a Script
175
+
176
+ ```http
177
+ POST /api/v1/projects/{project_id}/test-cases/{tc_id}/scripts/generate
178
+ ```
179
+
180
+ ```json
181
+ {
182
+ "llm_provider": "openrouter",
183
+ "llm_model": "openai/gpt-4o",
184
+ "framework": "playwright_python"
185
+ }
186
+ ```
187
+
188
+ Returns `202 Accepted` immediately. The agent runs in the background.
189
+
190
+ #### 4. Poll for the Result
191
+
192
+ ```http
193
+ GET /api/v1/projects/{project_id}/test-cases/{tc_id}/scripts/{script_id}
194
+ ```
195
+
196
+ Status values: `pending` → `generating` → `completed` | `failed`
197
+
198
+ #### 5. Run a Generated Playwright Script
199
+
200
+ ```http
201
+ POST /api/v1/projects/{project_id}/test-cases/{tc_id}/scripts/{script_id}/run
202
+ ```
203
+
204
+ Returns a persisted run record with:
205
+ - `status` (`completed` | `failed` | `timed_out`)
206
+ - `stdout`, `stderr`
207
+ - `exit_code`, `duration_seconds`
208
+
209
+ Execution safeguards:
210
+ - Script content is statically validated before execution.
211
+ - Unsafe imports and unsafe builtin calls are rejected and persisted as failed runs.
212
+ - Runtime uses Python isolated mode with a restricted environment.
213
+
214
+ #### 6. List Script Run History
215
+
216
+ ```http
217
+ GET /api/v1/projects/{project_id}/test-cases/{tc_id}/scripts/{script_id}/runs
218
+ ```
219
+
220
+ #### 7. Bulk Generate Scripts (Project-level)
221
+
222
+ ```http
223
+ POST /api/v1/projects/{project_id}/scripts/bulk-generate
224
+ ```
225
+
226
+ ```json
227
+ {
228
+ "llm_provider": "openrouter",
229
+ "llm_model": "openai/gpt-4o",
230
+ "framework": "playwright_python",
231
+ "test_case_ids": [1, 2, 3]
232
+ }
233
+ ```
234
+
235
+ #### 8. Bulk Run Latest Completed Scripts
236
+
237
+ ```http
238
+ POST /api/v1/projects/{project_id}/scripts/bulk-run
239
+ ```
240
+
241
+ #### 9. Poll Bulk Job Status
242
+
243
+ ```http
244
+ GET /api/v1/projects/{project_id}/scripts/bulk-jobs/{job_id}
245
+ ```
246
+
247
+ #### 10. Get Run Analytics (Project-level)
248
+
249
+ ```http
250
+ GET /api/v1/projects/{project_id}/analytics/runs
251
+ ```
252
+
253
+ Returns aggregate execution metrics and latest failure details.
254
+
255
+ ---
256
+
257
+ ## LangGraph Agent Pipeline
258
+
259
+ ```
260
+ ┌─────────────────┐ ┌──────────────────┐ ┌───────────────┐
261
+ │ parse_intent │────▶│ generate_script │────▶│ review_script │
262
+ │ │ │ │ │ │
263
+ │ Extracts: │ │ Produces full │ │ QA checks: │
264
+ │ • Business goal │ │ framework- │ │ • Assertions │
265
+ │ • Actions list │ │ specific script │ │ • TODO markers│
266
+ │ • Assertions │ │ │ │ • Rewrites if │
267
+ │ • Preconditions │ │ │ │ needed │
268
+ └─────────────────┘ └──────────────────┘ └───────────────┘
269
+ ```
270
+
271
+ ---
272
+
273
+ ## Supported Frameworks
274
+
275
+ | Key | Framework |
276
+ |---|---|
277
+ | `playwright_python` | Playwright for Python (default) |
278
+ | `selenium_python` | Selenium WebDriver Python |
279
+ | `uft_vbscript` | UFT / QTP VBScript |
280
+ | `cypress_js` | Cypress JavaScript |
281
+
282
+ ---
283
+
284
+ ## Project Structure
285
+
286
+ ```
287
+ scriptgini/
288
+ ├── app/
289
+ │ ├── main.py # FastAPI application
290
+ │ ├── config.py # Settings loaded from .env
291
+ │ ├── database.py # SQLAlchemy engine + session
292
+ │ ├── models/
293
+ │ │ ├── project.py # Project / AUT model
294
+ │ │ ├── test_case.py # Test case model
295
+ │ │ └── generated_script.py # Script history model
296
+ │ ├── schemas/ # Pydantic request/response schemas
297
+ │ ├── routers/
298
+ │ │ ├── projects.py # CRUD — projects
299
+ │ │ ├── test_cases.py # CRUD — test cases
300
+ │ │ └── scripts.py # Generate + history
301
+ │ ├── agents/
302
+ │ │ ├── script_gini_agent.py # LangGraph graph definition
303
+ │ │ └── prompts.py # All prompt templates
304
+ │ └── llm/
305
+ │ └── provider.py # LLM provider factory
306
+ ├── alembic/ # Database migration scripts
307
+ ├── alembic.ini
308
+ ├── requirements.txt
309
+ ├── .env.example # Template — copy to .env
310
+ ├── start.bat # Windows launcher
311
+ └── start.sh # Linux / macOS launcher
312
+ ```
313
+
314
+ ---
315
+
316
+ ## Database Migrations
317
+
318
+ Migrations are handled automatically by `start.bat` / `start.sh`.
319
+
320
+ To run manually:
321
+
322
+ ```bash
323
+ # Apply all pending migrations
324
+ alembic upgrade head
325
+
326
+ # Create a new migration after model changes
327
+ alembic revision --autogenerate -m "description"
328
+
329
+ # Rollback one step
330
+ alembic downgrade -1
331
+ ```
332
+
333
+ ---
334
+
335
+ ## Quality Gate Policy
336
+
337
+ Every check-in is expected to pass:
338
+
339
+ 1. Unit tests
340
+ 2. **100%** coverage on `app/`
341
+ 3. `pip-audit`
342
+ 4. Trivy filesystem scan
343
+
344
+ Local commands:
345
+
346
+ ```bat
347
+ test.bat
348
+ audit.bat
349
+ trivy.bat
350
+ ```
351
+
352
+ ```bash
353
+ ./test.sh
354
+ ./audit.sh
355
+ ./trivy.sh
356
+ ```
357
+
358
+ A CI gate is configured in `.github/workflows/quality-gate.yml` to enforce the same checks on push/PR.
359
+
360
+ ---
361
+
362
+ ## Adding a New LLM Provider
363
+
364
+ 1. Add config keys to `app/config.py`
365
+ 2. Add a new `_provider()` function in `app/llm/provider.py`
366
+ 3. Register it in `get_llm()` and the `LLMProvider` type alias
367
+ 4. Add the corresponding key to `.env.example`
368
+
369
+ ---
370
+
371
+ ## Security Notes
372
+
373
+ - `.env` is git-ignored — never commit API keys
374
+ - The API has no authentication by default — add an API key middleware before exposing to a network
375
+ - UI validation only — the agent never makes live requests to the AUT
376
+
377
+ ---
378
+
379
+ ## License
380
+
381
+ MIT
@@ -0,0 +1,33 @@
1
+ app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ app/config.py,sha256=qbX0xAkMzNNrvwA561j_BU0YZgAm2r6zv0NnTvvKkPY,1844
3
+ app/database.py,sha256=NxpKEIn4PHAItidfouLa-Nm1PF1xOwwzDpwtA_f8Y3c,466
4
+ app/main.py,sha256=s6jS8N4tyHOjUY1NuQ8z4W5L2vw1kDNphdjq1i8z_cA,2346
5
+ app/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ app/agents/prompts.py,sha256=BC4uPlzMS5-i3ehLb7h0Tvlh5rHF5gz7GvFBK3xFIRI,6047
7
+ app/agents/script_gini_agent.py,sha256=yBo8U4m-GsD1SXrwAVrg7y5W1w8gjdyOgqCW5DT69vY,12386
8
+ app/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ app/llm/provider.py,sha256=y96eeywS24w-f_fj5hzwPtMAIqobkziJZW2Qy2aN08U,6327
10
+ app/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ app/models/bulk_job.py,sha256=oV_iwl0uEdtosjTuu07mOc2N10j4trNw0g--AhFF5WM,2729
12
+ app/models/generated_script.py,sha256=V1sPcxWq4MrZxGj6-CXYRdWMZR0QkJBe8tV8NRUFK6I,1641
13
+ app/models/project.py,sha256=JKMLMoA-Vm-VpNedGSTAvnnfU4nHTrEPLOTAPfRseh0,1571
14
+ app/models/script_run.py,sha256=GFwhYT49Ec1KVd9M4Cu8t_PONYtcw7Prfny1ugWWE8I,1515
15
+ app/models/test_case.py,sha256=NuXNXHPx2rLTrH8DFH3Mpwk3fYAhHZxIOzl0lbk-M5M,1285
16
+ app/routers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ app/routers/analytics.py,sha256=D-upxhtKiWJs2Ml_m6SSkyYckk8wdmAygGZ23HBVpXQ,2636
18
+ app/routers/bulk_jobs.py,sha256=VQWls1jYVXfUQqMplMKuxZWFvGImGhIAKl60EOqEKo4,10910
19
+ app/routers/demo.py,sha256=9QzBP51SmFkrqrp5YmC_CVrDtU5AMt1Mw6hpy4iwH4c,2862
20
+ app/routers/projects.py,sha256=RvYW6PQETM9KnEww-TOzVlkMYihjZNdUOwbHiLE3h5w,1947
21
+ app/routers/scripts.py,sha256=Lw4fknzDobsMuc1UI5pIUoJMgarSFwgNDOCxq1Dcer8,21255
22
+ app/routers/test_cases.py,sha256=6cTZubpR8r3Jbzc_oUiZwU86V-SgUfy1kg7fqKgHz9o,2639
23
+ app/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
+ app/schemas/analytics.py,sha256=hUqLfWQwzUBeAujvi3j4LdVeC7IFmnvOshWHaCgirLA,618
25
+ app/schemas/bulk_job.py,sha256=8e2IXfBwikdSvcVvHdFL7opzg860bKOadv1FOAqM3Bw,1444
26
+ app/schemas/generated_script.py,sha256=eQLhdTtbOy-iqAmCArcXT7KK5l7-X7D_TwJRFQZs2ns,1312
27
+ app/schemas/project.py,sha256=isfQN-KxmhdshWvOl29bnOI1A1cIYikBXEVHBRN6Lfw,1134
28
+ app/schemas/test_case.py,sha256=1H7IHB_w1IS7SAmJ5HjQqU-1Xvi1bJ9N346sxeYZaxg,938
29
+ app/services/git_export.py,sha256=o009qiCjeoKMdi5rMLmCgXUTbDhh6hXVl16ElDj2ekY,4777
30
+ scriptgini-0.1.2.dist-info/METADATA,sha256=f5nuxu9y9mM2ckdWczGCDpHvCivq4jU3HVlGodUd1FY,10800
31
+ scriptgini-0.1.2.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
32
+ scriptgini-0.1.2.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
33
+ scriptgini-0.1.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ app