scriptgini 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/__init__.py ADDED
File without changes
app/agents/__init__.py ADDED
File without changes
app/agents/prompts.py ADDED
@@ -0,0 +1,147 @@
1
+ """
2
+ Prompt templates used by the ScriptGini LangGraph agent.
3
+ """
4
+
5
+ SYSTEM_PROMPT = """You are SCRIPT GINI, an enterprise-grade Test Script Generation Engine.
6
+
7
+ Your primary responsibility is to convert FUNCTIONAL TEST CASES into
8
+ HIGH-QUALITY, REVIEW-READY AUTOMATION TEST SCRIPTS using the supplied
9
+ APPLICATION UNDER TEST (AUT) URL as execution context.
10
+
11
+ CORE PRINCIPLES:
12
+ 1. Understand WHAT is being tested before writing HOW.
13
+ 2. Reason using generic actions (navigate, click, fill, select, assert, wait),
14
+ then convert to the requested framework syntax in the final output.
15
+ 3. Generate executable test scripts ONLY — no project scaffolding, no CI config.
16
+ 4. Place TODO comments where selectors or data are ambiguous.
17
+ 5. DO NOT hallucinate APIs, DB calls, or backend validations. UI only.
18
+
19
+ SELECTOR PRIORITY (follow strictly):
20
+ 1. Role-based / accessible selectors
21
+ 2. Label-based selectors
22
+ 3. data-testid / data-qa attributes
23
+ 4. Stable CSS selectors
24
+ 5. XPath — LAST RESORT only
25
+
26
+ ASSERTIONS must validate visible UI state: text, visibility, URL, page title.
27
+ """
28
+
29
+ INTENT_ANALYSIS_PROMPT = """\
30
+ Analyze the following functional test case and extract:
31
+ 1. Business intent — what user flow / feature is being tested
32
+ 2. Preconditions — what must be true before the test starts
33
+ 3. Key actions — list of user interactions (navigate, click, fill, select, etc.)
34
+ 4. Assertions — what outcomes need to be verified
35
+
36
+ AUT Base URL: {aut_base_url}
37
+
38
+ Test Case:
39
+ {test_case_content}
40
+
41
+ Preconditions provided: {preconditions}
42
+ Test data hints: {test_data_hints}
43
+
44
+ Respond in structured JSON with keys: business_intent, preconditions, actions, assertions.
45
+ """
46
+
47
+ SCRIPT_GENERATION_PROMPT = """\
48
+ Generate a complete, executable automation test script for the following test case.
49
+
50
+ Framework: {framework}
51
+ AUT Base URL: {aut_base_url}
52
+ Authentication hints: {auth_hints}
53
+
54
+ Test Case Title: {test_case_title}
55
+ Business Intent: {business_intent}
56
+ Preconditions: {preconditions}
57
+ Key Actions: {actions}
58
+ Expected Assertions: {assertions}
59
+
60
+ RULES (NON-NEGOTIABLE):
61
+ 1. STEP FIDELITY: Implement EVERY action as an explicit framework call, in exact order.
62
+ Add a comment before each step: # Step N: <original action text>
63
+ 2. LOCATOR PRIORITY (follow strictly):
64
+ role-based > label-based > placeholder > visible text > data-testid/data-qa > CSS > XPath (last resort)
65
+ 3. LOCATOR PROVENANCE: Infer locator names ONLY from the provided actions and assertions.
66
+ Never invent UI labels, placeholders, or test-ids not present in the input.
67
+ If you must assume, add an # ASSUMPTION: comment explaining why.
68
+ 4. ASSERTION TRACEABILITY: Every expected assertion must map to ≥1 framework assertion call.
69
+ Add a comment above each assertion: # Assert: <expected outcome text>
70
+ 5. NO PSEUDO-CODE: Output runnable code only. No placeholders, no stub functions.
71
+ 6. ARRANGE / ACT / ASSERT: Structure the script with a comment separating each phase.
72
+ 7. TEST DATA: If test data hints are provided use them; if not, add # No test data required.
73
+ 8. The script must be directly runnable — complete imports, no missing setup.
74
+ 9. Follow {framework} best practices and idiomatic style.
75
+
76
+ Output ONLY the script code. No explanations, no markdown fences.
77
+ """
78
+
79
+ PLAYWRIGHT_SCRIPT_GENERATION_PROMPT = """\
80
+ Generate a complete, runnable Python Playwright (sync API) + Pytest script for the following test case.
81
+
82
+ AUT Base URL: {aut_base_url}
83
+ Authentication hints: {auth_hints}
84
+
85
+ Test Case Title: {test_case_title}
86
+ Business Intent: {business_intent}
87
+ Preconditions: {preconditions}
88
+ Key Actions: {actions}
89
+ Expected Assertions: {assertions}
90
+
91
+ MANDATORY FRAMEWORK RULES:
92
+ - Use pytest-playwright built-in fixtures: page, context, browser (sync API only).
93
+ - Use Playwright sync API exclusively (playwright.sync_api). Never use async/await.
94
+ - All assertions MUST use Playwright expect() — never bare Python assert for UI state.
95
+ - Name the test function: test_<slugified_title>() (lowercase, underscores, max 60 chars).
96
+ - Add a docstring containing: Title, Business Intent, Preconditions.
97
+
98
+ LOCATOR PRIORITY (follow strictly):
99
+ 1. page.get_by_role(role, name="...")
100
+ 2. page.get_by_label("...")
101
+ 3. page.get_by_placeholder("...")
102
+ 4. page.get_by_text("...")
103
+ 5. page.get_by_test_id("...") — only if explicitly justified by the input
104
+ 6. page.locator("css=...") — last resort only
105
+ Never invent labels, roles, or test-ids not present in the input.
106
+ If you must assume a locator name, add: # ASSUMPTION: <reason>
107
+
108
+ STEP FIDELITY (NON-NEGOTIABLE):
109
+ - Implement EVERY action in exact order.
110
+ - Add a comment before each action: # Step N: <original action text>
111
+ - For navigation: use page.goto(base_url + path). Define base_url as a local variable.
112
+ - Never skip or summarise steps.
113
+
114
+ ASSERTION TRACEABILITY (NON-NEGOTIABLE):
115
+ - Every expected assertion must produce ≥1 expect() call.
116
+ - Add a comment above each assertion: # Assert: <expected outcome text>
117
+ - Validate visible UI state: text content, element visibility, URL, page title.
118
+ - If the assertion says "no error shown": assert the error locator is hidden or not present.
119
+
120
+ ARRANGE / ACT / ASSERT structure:
121
+ - Separate the three phases with # --- Arrange ---, # --- Act ---, # --- Assert --- comments.
122
+
123
+ TEST DATA:
124
+ - If test data hints are provided, define them as named constants at the top of the function.
125
+ - If no test data: add # No test data required.
126
+ - Never hardcode credentials inline without a named constant.
127
+
128
+ The script must be directly runnable with: pytest <filename>.py
129
+ Output ONLY the script code. No explanations, no markdown fences.
130
+ """
131
+
132
+ REVIEW_PROMPT = """\
133
+ Review the following {framework} test script for quality issues:
134
+
135
+ 1. Are all assertions present and meaningful?
136
+ 2. Are there any hardcoded credentials or sensitive data? (flag them)
137
+ 3. Are TODO comments placed wherever assumptions were made?
138
+ 4. Is the script readable and logically structured?
139
+
140
+ If there are issues, rewrite the improved script.
141
+ If the script is acceptable, return it unchanged.
142
+
143
+ Script:
144
+ {script}
145
+
146
+ Output ONLY the final script code. No explanations, no markdown fences.
147
+ """
@@ -0,0 +1,342 @@
1
+ """
2
+ ScriptGini LangGraph Agent.
3
+
4
+ Graph nodes:
5
+ 1. parse_intent — Analyse test case; extract business intent, actions, assertions
6
+ 2. generate_script — Produce framework-specific test script
7
+ 3. review_script — Quality-check and optionally rewrite the script
8
+ 4. finalize — Return the final script
9
+
10
+ State flows linearly: parse_intent → generate_script → review_script → finalize
11
+ """
12
+ from __future__ import annotations
13
+
14
+ import json
15
+ import logging
16
+ import re
17
+ from typing import TypedDict, Annotated
18
+
19
+ from langchain_core.messages import HumanMessage, SystemMessage
20
+ from langgraph.graph import StateGraph, END
21
+
22
+ from app.agents.prompts import (
23
+ SYSTEM_PROMPT,
24
+ INTENT_ANALYSIS_PROMPT,
25
+ SCRIPT_GENERATION_PROMPT,
26
+ PLAYWRIGHT_SCRIPT_GENERATION_PROMPT,
27
+ REVIEW_PROMPT,
28
+ )
29
+ from app.config import settings
30
+ from app.llm.provider import get_llm, LLMProvider
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ # ---------------------------------------------------------------------------
36
+ # Agent State
37
+ # ---------------------------------------------------------------------------
38
+
39
+ class AgentState(TypedDict):
40
+ # Inputs
41
+ test_case_title: str
42
+ test_case_content: str
43
+ preconditions: str
44
+ test_data_hints: str
45
+ aut_base_url: str
46
+ framework: str
47
+ auth_hints: str
48
+ llm_provider: str
49
+ llm_model: str | None
50
+
51
+ # Intermediate
52
+ business_intent: str
53
+ actions: str
54
+ assertions: str
55
+ parsed_preconditions: str
56
+
57
+ # Output
58
+ script: str
59
+ error: str | None
60
+ token_usage: int
61
+
62
+
63
+ # ---------------------------------------------------------------------------
64
+ # Node implementations
65
+ # ---------------------------------------------------------------------------
66
+
67
+ def _llm_invoke(state: AgentState, system: str, human: str) -> tuple[str, int]:
68
+ """Helper: call the configured LLM and return (content, token_count)."""
69
+ llm = get_llm(
70
+ provider=state["llm_provider"], # type: ignore[arg-type]
71
+ model=state.get("llm_model"),
72
+ )
73
+ messages = [SystemMessage(content=system), HumanMessage(content=human)]
74
+
75
+ max_attempts = 1
76
+ if state.get("llm_provider") == "ollama":
77
+ max_attempts += settings.OLLAMA_TIMEOUT_RETRIES
78
+
79
+ last_error: Exception | None = None
80
+ for attempt in range(1, max_attempts + 1):
81
+ try:
82
+ response = llm.invoke(messages)
83
+ break
84
+ except Exception as exc: # pragma: no cover - depends on provider/network behavior
85
+ if state.get("llm_provider") == "gemini" and "not_found" in str(exc).lower():
86
+ raise ValueError(
87
+ "Gemini model was not found for this API version/method. "
88
+ "Update GEMINI_MODEL in .env (for example: gemini-2.0-flash) "
89
+ "or set llm_model in the request payload to a currently available model."
90
+ ) from exc
91
+ is_timeout = "timeout" in str(exc).lower() or exc.__class__.__name__.endswith("Timeout")
92
+ if state.get("llm_provider") != "ollama" or not is_timeout or attempt >= max_attempts:
93
+ raise
94
+ last_error = exc
95
+ logger.warning("Ollama timeout during LLM invoke (attempt %s/%s). Retrying once.", attempt, max_attempts)
96
+ else:
97
+ # Defensive guard: loop exhausted without a response or raised exception.
98
+ raise RuntimeError("LLM invocation failed unexpectedly")
99
+
100
+ usage = 0
101
+ if hasattr(response, "usage_metadata") and response.usage_metadata:
102
+ usage = response.usage_metadata.get("total_tokens", 0)
103
+ return response.content, usage
104
+
105
+
106
+ def _normalize_case_line(value: str) -> str:
107
+ return re.sub(r"^[-*\d\.)\s]+", "", value.strip())
108
+
109
+
110
+ def _extract_local_intent(state: AgentState) -> dict:
111
+ """Fast path: derive intent/actions/assertions heuristically without an LLM round-trip."""
112
+ raw_content = state.get("test_case_content", "") or ""
113
+ lines = [_normalize_case_line(line) for line in raw_content.splitlines() if line.strip()]
114
+
115
+ actions: list[str] = []
116
+ assertions: list[str] = []
117
+ extracted_preconditions: list[str] = []
118
+
119
+ for line in lines:
120
+ lowered = line.lower()
121
+
122
+ if lowered.startswith("precondition"):
123
+ extracted_preconditions.append(line)
124
+ continue
125
+ if lowered.startswith("given"):
126
+ extracted_preconditions.append(line)
127
+ continue
128
+ if lowered.startswith("step") or lowered.startswith("when") or lowered.startswith("and"):
129
+ actions.append(line)
130
+ continue
131
+ if lowered.startswith("expected") or lowered.startswith("then") or lowered.startswith("assert"):
132
+ assertions.append(line)
133
+ continue
134
+
135
+ # Fall back to sentence chunks so downstream generation still gets structured hints.
136
+ if not actions:
137
+ actions = [segment.strip() for segment in re.split(r"[\r\n]+", raw_content) if segment.strip()][:8]
138
+ if not assertions:
139
+ assertions = [line for line in lines if "expect" in line.lower() or "should" in line.lower()][:6]
140
+
141
+ business_intent = state.get("test_case_title", "").strip() or (lines[0] if lines else "")
142
+ preconditions = state.get("preconditions", "").strip()
143
+ if extracted_preconditions:
144
+ merged = [item for item in [preconditions, *extracted_preconditions] if item]
145
+ preconditions = "\n".join(dict.fromkeys(merged))
146
+
147
+ return {
148
+ "business_intent": business_intent,
149
+ "parsed_preconditions": preconditions,
150
+ "actions": json.dumps(actions, indent=2),
151
+ "assertions": json.dumps(assertions, indent=2),
152
+ "error": None,
153
+ }
154
+
155
+
156
+ def _extract_llm_intent(state: AgentState) -> dict:
157
+ prompt = INTENT_ANALYSIS_PROMPT.format(
158
+ aut_base_url=state["aut_base_url"],
159
+ test_case_content=state["test_case_content"],
160
+ preconditions=state.get("preconditions") or "None provided",
161
+ test_data_hints=state.get("test_data_hints") or "None provided",
162
+ )
163
+ content, tokens = _llm_invoke(state, SYSTEM_PROMPT, prompt)
164
+
165
+ # Try to parse JSON; fall back to using the raw text
166
+ try:
167
+ parsed = json.loads(content)
168
+ except json.JSONDecodeError:
169
+ # LLM may wrap JSON in markdown fences — strip them
170
+ clean = content.strip().removeprefix("```json").removeprefix("```").removesuffix("```").strip()
171
+ try:
172
+ parsed = json.loads(clean)
173
+ except json.JSONDecodeError:
174
+ parsed = {
175
+ "business_intent": content,
176
+ "preconditions": state.get("preconditions", ""),
177
+ "actions": [],
178
+ "assertions": [],
179
+ }
180
+
181
+ return {
182
+ "business_intent": parsed.get("business_intent", ""),
183
+ "parsed_preconditions": str(parsed.get("preconditions", state.get("preconditions", ""))),
184
+ "actions": json.dumps(parsed.get("actions", []), indent=2) if isinstance(parsed.get("actions"), list) else str(parsed.get("actions", "")),
185
+ "assertions": json.dumps(parsed.get("assertions", []), indent=2) if isinstance(parsed.get("assertions"), list) else str(parsed.get("assertions", "")),
186
+ "token_usage": state.get("token_usage", 0) + tokens,
187
+ "error": None,
188
+ }
189
+
190
+
191
+ def node_parse_intent(state: AgentState) -> dict:
192
+ """Node 1 — Parse the test case and extract structured intent."""
193
+ try:
194
+ if settings.USE_LLM_INTENT_ANALYSIS:
195
+ return _extract_llm_intent(state)
196
+ return _extract_local_intent(state)
197
+ except Exception as exc:
198
+ logger.exception("parse_intent node failed")
199
+ return {"error": str(exc)}
200
+
201
+
202
+ def node_generate_script(state: AgentState) -> dict:
203
+ """Node 2 — Generate the test script from parsed intent."""
204
+ if state.get("error"):
205
+ return {}
206
+
207
+ if state["framework"] == "playwright_python":
208
+ prompt = PLAYWRIGHT_SCRIPT_GENERATION_PROMPT.format(
209
+ aut_base_url=state["aut_base_url"],
210
+ auth_hints=state.get("auth_hints") or "None provided",
211
+ test_case_title=state["test_case_title"],
212
+ business_intent=state.get("business_intent", ""),
213
+ preconditions=state.get("parsed_preconditions", ""),
214
+ actions=state.get("actions", ""),
215
+ assertions=state.get("assertions", ""),
216
+ )
217
+ else:
218
+ prompt = SCRIPT_GENERATION_PROMPT.format(
219
+ framework=state["framework"],
220
+ aut_base_url=state["aut_base_url"],
221
+ auth_hints=state.get("auth_hints") or "None provided",
222
+ test_case_title=state["test_case_title"],
223
+ business_intent=state.get("business_intent", ""),
224
+ preconditions=state.get("parsed_preconditions", ""),
225
+ actions=state.get("actions", ""),
226
+ assertions=state.get("assertions", ""),
227
+ )
228
+ try:
229
+ content, tokens = _llm_invoke(state, SYSTEM_PROMPT, prompt)
230
+ return {
231
+ "script": content,
232
+ "token_usage": state.get("token_usage", 0) + tokens,
233
+ }
234
+ except Exception as exc:
235
+ logger.exception("generate_script node failed")
236
+ error_message = str(exc)
237
+ if state.get("llm_provider") == "ollama" and "timeout" in error_message.lower():
238
+ error_message = (
239
+ f"{error_message}. Ollama timed out while generating script. "
240
+ "Try a smaller model, reduce OLLAMA_NUM_PREDICT, or increase OLLAMA_REQUEST_TIMEOUT_SECONDS in .env."
241
+ )
242
+ return {"error": error_message}
243
+
244
+
245
+ def node_review_script(state: AgentState) -> dict:
246
+ """Node 3 — Quality review; rewrite if issues found."""
247
+ if state.get("error") or not state.get("script"):
248
+ return {}
249
+
250
+ if settings.SKIP_REVIEW_FOR_OLLAMA and state.get("llm_provider") == "ollama":
251
+ return {}
252
+
253
+ prompt = REVIEW_PROMPT.format(
254
+ framework=state["framework"],
255
+ script=state["script"],
256
+ )
257
+ try:
258
+ content, tokens = _llm_invoke(state, SYSTEM_PROMPT, prompt)
259
+ return {
260
+ "script": content,
261
+ "token_usage": state.get("token_usage", 0) + tokens,
262
+ }
263
+ except Exception as exc:
264
+ logger.exception("review_script node failed")
265
+ return {"error": str(exc)}
266
+
267
+
268
+ # ---------------------------------------------------------------------------
269
+ # Graph definition
270
+ # ---------------------------------------------------------------------------
271
+
272
+ def build_graph() -> StateGraph:
273
+ graph = StateGraph(AgentState)
274
+
275
+ graph.add_node("parse_intent", node_parse_intent)
276
+ graph.add_node("generate_script", node_generate_script)
277
+ graph.add_node("review_script", node_review_script)
278
+
279
+ graph.set_entry_point("parse_intent")
280
+ graph.add_edge("parse_intent", "generate_script")
281
+ graph.add_edge("generate_script", "review_script")
282
+ graph.add_edge("review_script", END)
283
+
284
+ return graph.compile()
285
+
286
+
287
+ # Singleton compiled graph
288
+ _app = None
289
+
290
+
291
+ def get_agent():
292
+ global _app
293
+ if _app is None:
294
+ _app = build_graph()
295
+ return _app
296
+
297
+
298
+ # ---------------------------------------------------------------------------
299
+ # Public entry point
300
+ # ---------------------------------------------------------------------------
301
+
302
+ def run_agent(
303
+ *,
304
+ test_case_title: str,
305
+ test_case_content: str,
306
+ preconditions: str = "",
307
+ test_data_hints: str = "",
308
+ aut_base_url: str,
309
+ framework: str = "playwright_python",
310
+ auth_hints: str = "",
311
+ llm_provider: LLMProvider = "openai",
312
+ llm_model: str | None = None,
313
+ ) -> dict:
314
+ """
315
+ Execute the ScriptGini agent and return a result dict with keys:
316
+ script, error, token_usage
317
+ """
318
+ initial_state: AgentState = {
319
+ "test_case_title": test_case_title,
320
+ "test_case_content": test_case_content,
321
+ "preconditions": preconditions,
322
+ "test_data_hints": test_data_hints,
323
+ "aut_base_url": aut_base_url,
324
+ "framework": framework,
325
+ "auth_hints": auth_hints,
326
+ "llm_provider": llm_provider,
327
+ "llm_model": llm_model,
328
+ # intermediate / output — initialise to empty
329
+ "business_intent": "",
330
+ "actions": "",
331
+ "assertions": "",
332
+ "parsed_preconditions": "",
333
+ "script": "",
334
+ "error": None,
335
+ "token_usage": 0,
336
+ }
337
+ result = get_agent().invoke(initial_state)
338
+ return {
339
+ "script": result.get("script", ""),
340
+ "error": result.get("error"),
341
+ "token_usage": result.get("token_usage", 0),
342
+ }
app/config.py ADDED
@@ -0,0 +1,59 @@
1
+ from pydantic_settings import BaseSettings, SettingsConfigDict
2
+ from typing import Literal
3
+
4
+
5
+ class Settings(BaseSettings):
6
+ model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8", extra="ignore")
7
+
8
+ # App
9
+ APP_NAME: str = "ScriptGini"
10
+ DEBUG: bool = False
11
+
12
+ # Database
13
+ DATABASE_URL: str = "sqlite:///./scriptgini.db"
14
+
15
+ # Default LLM provider
16
+ DEFAULT_LLM_PROVIDER: Literal["openai", "ollama", "openrouter", "gemini", "bedrock"] = "openai"
17
+ LLM_REQUEST_TIMEOUT_SECONDS: float = 45.0
18
+ SCRIPT_GENERATION_TIMEOUT_SECONDS: int = 180
19
+ PLAYWRIGHT_RUN_HEADED: bool = True
20
+ SCRIPT_EXECUTION_TIMEOUT_SECONDS: int = 300
21
+ SKIP_REVIEW_FOR_OLLAMA: bool = True
22
+ USE_LLM_INTENT_ANALYSIS: bool = True
23
+
24
+ # Automatic Git export
25
+ AUTO_EXPORT_GIT_ENABLED: bool = False
26
+ AUTO_EXPORT_GIT_REPO_URL: str = ""
27
+ AUTO_EXPORT_GIT_BRANCH: str = "main"
28
+ AUTO_EXPORT_GIT_LOCAL_PATH: str = "~/.scriptgini/scriptgini-sandbox"
29
+ AUTO_EXPORT_GIT_USER_NAME: str = "ScriptGini"
30
+ AUTO_EXPORT_GIT_USER_EMAIL: str = "scriptgini@local"
31
+
32
+ # OpenAI
33
+ OPENAI_API_KEY: str = ""
34
+ OPENAI_MODEL: str = "gpt-4o"
35
+
36
+ # Ollama
37
+ OLLAMA_BASE_URL: str = "http://localhost:11434"
38
+ OLLAMA_MODEL: str = "llama3"
39
+ OLLAMA_NUM_PREDICT: int = 700
40
+ OLLAMA_REQUEST_TIMEOUT_SECONDS: float = 180.0
41
+ OLLAMA_TIMEOUT_RETRIES: int = 1
42
+
43
+ # OpenRouter
44
+ OPENROUTER_API_KEY: str = ""
45
+ OPENROUTER_MODEL: str = "openai/gpt-4o"
46
+ OPENROUTER_BASE_URL: str = "https://openrouter.ai/api/v1"
47
+
48
+ # Google Gemini
49
+ GOOGLE_API_KEY: str = ""
50
+ GEMINI_MODEL: str = "gemini-2.5-flash"
51
+
52
+ # AWS Bedrock
53
+ AWS_ACCESS_KEY_ID: str = ""
54
+ AWS_SECRET_ACCESS_KEY: str = ""
55
+ AWS_REGION_NAME: str = "us-east-1"
56
+ BEDROCK_MODEL_ID: str = "anthropic.claude-3-5-sonnet-20241022-v2:0"
57
+
58
+
59
+ settings = Settings()
app/database.py ADDED
@@ -0,0 +1,23 @@
1
+ from sqlalchemy import create_engine
2
+ from sqlalchemy.orm import sessionmaker, DeclarativeBase
3
+
4
+ from app.config import settings
5
+
6
+ engine = create_engine(
7
+ settings.DATABASE_URL,
8
+ connect_args={"check_same_thread": False}, # required for SQLite
9
+ )
10
+
11
+ SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
12
+
13
+
14
+ class Base(DeclarativeBase):
15
+ pass
16
+
17
+
18
+ def get_db():
19
+ db = SessionLocal()
20
+ try:
21
+ yield db
22
+ finally:
23
+ db.close()
app/llm/__init__.py ADDED
File without changes