omga-cli 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- core/__init__.py +7 -0
- core/ai.py +399 -0
- core/cache_db.py +159 -0
- core/checker.py +101 -0
- core/cli.py +111 -0
- core/commands.py +369 -0
- core/completer.py +149 -0
- core/config.py +152 -0
- core/logger.py +21 -0
- core/resources/fastapi_template.py +39 -0
- core/shell.py +62 -0
- core/ui.py +513 -0
- core/utils.py +140 -0
- omga_cli-1.1.0.dist-info/METADATA +179 -0
- omga_cli-1.1.0.dist-info/RECORD +19 -0
- omga_cli-1.1.0.dist-info/WHEEL +5 -0
- omga_cli-1.1.0.dist-info/entry_points.txt +2 -0
- omga_cli-1.1.0.dist-info/licenses/LICENSE +21 -0
- omga_cli-1.1.0.dist-info/top_level.txt +3 -0
core/__init__.py
ADDED
core/ai.py
ADDED
|
@@ -0,0 +1,399 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
import threading
|
|
4
|
+
from typing import Iterator, List, Optional
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
from core.cache_db import get as cache_get, set_ as cache_set
|
|
9
|
+
from core.config import get_api_key, get_config_value
|
|
10
|
+
from core.logger import logger
|
|
11
|
+
|
|
12
|
+
# ── API constants ──────────────────────────────────────────────────────────────
|
|
13
|
+
_BASE_URL = "https://openrouter.ai/api/v1/chat/completions"
|
|
14
|
+
_REFERER = "https://github.com/omga-cli"
|
|
15
|
+
_APP_NAME = "omga-cli"
|
|
16
|
+
|
|
17
|
+
_SYSTEM_PROMPT_EN = (
|
|
18
|
+
"You are an expert AI coding assistant integrated into omga-cli, "
|
|
19
|
+
"developed by Pouria Hosseini (@isPoori — PouriaHosseini.ir). "
|
|
20
|
+
"You are precise and always produce complete, production-quality output.\n"
|
|
21
|
+
"RULES (non-negotiable):\n"
|
|
22
|
+
"- Never truncate your response. Write the complete answer every time.\n"
|
|
23
|
+
"- Never say 'continued below', 'see next', or '# ... rest unchanged'.\n"
|
|
24
|
+
"- When returning code use the appropriate markdown code fence.\n"
|
|
25
|
+
"- Think step by step before answering complex questions.\n"
|
|
26
|
+
"- State uncertainty clearly rather than guessing."
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
_SYSTEM_PROMPT_FA = (
|
|
30
|
+
"You are an expert AI coding assistant integrated into omga-cli, "
|
|
31
|
+
"developed by Pouria Hosseini (@isPoori — PouriaHosseini.ir). "
|
|
32
|
+
"You are precise and always produce complete, production-quality output.\n"
|
|
33
|
+
"RULES (non-negotiable):\n"
|
|
34
|
+
"- Never truncate your response. Write the complete answer every time.\n"
|
|
35
|
+
"- Never say 'continued below', 'see next', or '# ... rest unchanged'.\n"
|
|
36
|
+
"- When returning code use the appropriate markdown code fence.\n"
|
|
37
|
+
"- Think step by step before answering complex questions.\n"
|
|
38
|
+
"- State uncertainty clearly rather than guessing.\n\n"
|
|
39
|
+
"LANGUAGE RULE: The user is writing in Persian (Farsi — فارسی). "
|
|
40
|
+
"You MUST reply entirely in Persian. "
|
|
41
|
+
"Use proper Persian grammar, punctuation, and technical terminology. "
|
|
42
|
+
"Code identifiers remain in English; all prose must be in Persian."
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
_RETRYABLE = frozenset({429, 500, 502, 503, 504})
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# ── Persian detection ──────────────────────────────────────────────────────────
|
|
49
|
+
|
|
50
|
+
def _has_persian(text: str) -> bool:
|
|
51
|
+
"""Return True if *text* contains Persian/Arabic Unicode characters."""
|
|
52
|
+
for ch in text:
|
|
53
|
+
cp = ord(ch)
|
|
54
|
+
if 0x0600 <= cp <= 0x06FF or 0xFB50 <= cp <= 0xFDFF or 0xFE70 <= cp <= 0xFEFF:
|
|
55
|
+
return True
|
|
56
|
+
return False
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _system_prompt(prompt: str) -> str:
|
|
60
|
+
return _SYSTEM_PROMPT_FA if _has_persian(prompt) else _SYSTEM_PROMPT_EN
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
# ── HTTP helpers ───────────────────────────────────────────────────────────────
|
|
64
|
+
|
|
65
|
+
def _headers() -> dict:
|
|
66
|
+
return {
|
|
67
|
+
"Content-Type": "application/json",
|
|
68
|
+
"Authorization": f"Bearer {get_api_key()}",
|
|
69
|
+
"HTTP-Referer": _REFERER,
|
|
70
|
+
"X-Title": _APP_NAME,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _payload(prompt: str, *, stream: bool = False) -> dict:
|
|
75
|
+
return {
|
|
76
|
+
"model": get_config_value("api.model", "arcee-ai/trinity-large-preview:free"),
|
|
77
|
+
"max_tokens": get_config_value("api.max_tokens", 8192),
|
|
78
|
+
"temperature": get_config_value("api.temperature", 0.7),
|
|
79
|
+
"stream": stream,
|
|
80
|
+
"messages": [
|
|
81
|
+
{"role": "system", "content": _system_prompt(prompt)},
|
|
82
|
+
{"role": "user", "content": prompt},
|
|
83
|
+
],
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _parse_error(resp: requests.Response) -> str:
|
|
88
|
+
try:
|
|
89
|
+
return resp.json().get("error", {}).get("message", resp.text[:300])
|
|
90
|
+
except Exception:
|
|
91
|
+
return resp.text[:300]
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _backoff(attempt: int) -> None:
|
|
95
|
+
time.sleep(min(2 ** attempt, 16))
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
# ── Low-level streaming request ────────────────────────────────────────────────
|
|
99
|
+
|
|
100
|
+
def _request_stream(prompt: str, retries: int = 3) -> Iterator[str]:
|
|
101
|
+
"""
|
|
102
|
+
Open a streaming POST and yield SSE content chunks.
|
|
103
|
+
Retries on transient server errors (429, 5xx) with exponential back-off.
|
|
104
|
+
All other errors propagate immediately.
|
|
105
|
+
"""
|
|
106
|
+
timeout = get_config_value("api.timeout", 60)
|
|
107
|
+
|
|
108
|
+
for attempt in range(1, retries + 1):
|
|
109
|
+
try:
|
|
110
|
+
resp = requests.post(
|
|
111
|
+
_BASE_URL,
|
|
112
|
+
json=_payload(prompt, stream=True),
|
|
113
|
+
headers=_headers(),
|
|
114
|
+
timeout=timeout,
|
|
115
|
+
stream=True,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if not resp.ok:
|
|
119
|
+
detail = _parse_error(resp)
|
|
120
|
+
logger.error("Stream API %s attempt %d: %s", resp.status_code, attempt, detail)
|
|
121
|
+
if not resp.status_code in _RETRYABLE:
|
|
122
|
+
raise RuntimeError(f"API error {resp.status_code}: {detail}")
|
|
123
|
+
else:
|
|
124
|
+
for raw_line in resp.iter_lines(decode_unicode=True):
|
|
125
|
+
if not raw_line or not raw_line.startswith("data: "):
|
|
126
|
+
continue
|
|
127
|
+
payload = raw_line[6:].strip()
|
|
128
|
+
if payload == "[DONE]":
|
|
129
|
+
return
|
|
130
|
+
try:
|
|
131
|
+
data = json.loads(payload)
|
|
132
|
+
delta = (data.get("choices") or [{}])[0].get("delta", {})
|
|
133
|
+
chunk = delta.get("content", "")
|
|
134
|
+
if chunk:
|
|
135
|
+
yield chunk
|
|
136
|
+
except (json.JSONDecodeError, IndexError):
|
|
137
|
+
continue
|
|
138
|
+
return # clean EOF
|
|
139
|
+
|
|
140
|
+
except RuntimeError:
|
|
141
|
+
raise
|
|
142
|
+
except (requests.ConnectionError, requests.Timeout) as exc:
|
|
143
|
+
logger.warning("Network error stream attempt %d/%d: %s", attempt, retries, exc)
|
|
144
|
+
except Exception:
|
|
145
|
+
raise
|
|
146
|
+
|
|
147
|
+
if attempt < retries:
|
|
148
|
+
_backoff(attempt)
|
|
149
|
+
|
|
150
|
+
raise RuntimeError("AI service unavailable after multiple retries.")
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
# ── Public: ask_stream (spinner → stream) ─────────────────────────────────────
|
|
154
|
+
|
|
155
|
+
def ask_stream(prompt: str, context: Optional[str] = None) -> Iterator[str]:
|
|
156
|
+
"""
|
|
157
|
+
Send *prompt* and yield response chunks with the full UX flow:
|
|
158
|
+
|
|
159
|
+
spinner (waiting for first token) → streaming panel (word-by-word)
|
|
160
|
+
|
|
161
|
+
Steps
|
|
162
|
+
──────
|
|
163
|
+
1. Cache hit → replay stored text in small chunks, no network call.
|
|
164
|
+
2. Cache miss →
|
|
165
|
+
a. Background thread opens the stream and waits for the first chunk.
|
|
166
|
+
b. Main thread shows the thinking spinner.
|
|
167
|
+
c. When the first chunk arrives the background thread sets an event.
|
|
168
|
+
d. Spinner stops; main thread yields all chunks to the caller.
|
|
169
|
+
e. Caller's Live panel renders them word-by-word.
|
|
170
|
+
f. Complete response is written to cache after the stream ends.
|
|
171
|
+
"""
|
|
172
|
+
from core.ui import ui # lazy — avoids circular import at module load
|
|
173
|
+
|
|
174
|
+
full_prompt = f"Context:\n{context}\n\n{prompt}" if context else prompt
|
|
175
|
+
persian = _has_persian(full_prompt)
|
|
176
|
+
|
|
177
|
+
# ── 1. Cache check ─────────────────────────────────────────────────────────
|
|
178
|
+
if get_config_value("features.cache_responses", True):
|
|
179
|
+
cached = cache_get(full_prompt)
|
|
180
|
+
if cached:
|
|
181
|
+
logger.debug("Cache hit, replaying %d chars", len(cached))
|
|
182
|
+
for i in range(0, len(cached), 4):
|
|
183
|
+
yield cached[i:i + 4]
|
|
184
|
+
return
|
|
185
|
+
|
|
186
|
+
# ── 2. Prime the stream in a background thread ─────────────────────────────
|
|
187
|
+
# We buffer only the *first* chunk here; the rest are yielded directly
|
|
188
|
+
# after the spinner exits so no large buffer is ever held in memory.
|
|
189
|
+
|
|
190
|
+
ready_event: threading.Event = threading.Event()
|
|
191
|
+
first_buf: List[str] = [] # holds at most one chunk
|
|
192
|
+
prime_error: List[Exception] = []
|
|
193
|
+
gen_holder: List[Iterator[str]] = [] # the live generator
|
|
194
|
+
|
|
195
|
+
def _prime() -> None:
|
|
196
|
+
try:
|
|
197
|
+
gen = _request_stream(full_prompt)
|
|
198
|
+
gen_holder.append(gen)
|
|
199
|
+
try:
|
|
200
|
+
chunk = next(gen)
|
|
201
|
+
if chunk:
|
|
202
|
+
first_buf.append(chunk)
|
|
203
|
+
except StopIteration:
|
|
204
|
+
pass # empty stream — edge case
|
|
205
|
+
except Exception as exc:
|
|
206
|
+
prime_error.append(exc)
|
|
207
|
+
finally:
|
|
208
|
+
ready_event.set() # always unblock the main thread
|
|
209
|
+
|
|
210
|
+
threading.Thread(target=_prime, daemon=True).start()
|
|
211
|
+
|
|
212
|
+
# ── 3. Show spinner until first token arrives ──────────────────────────────
|
|
213
|
+
with ui.thinking_spinner(persian=persian):
|
|
214
|
+
ready_event.wait() # blocks here; spinner animates freely
|
|
215
|
+
|
|
216
|
+
# ── 4. Spinner is gone — propagate any error ───────────────────────────────
|
|
217
|
+
if prime_error:
|
|
218
|
+
raise prime_error[0]
|
|
219
|
+
|
|
220
|
+
# ── 5. Yield all chunks to the caller ─────────────────────────────────────
|
|
221
|
+
accumulated: List[str] = []
|
|
222
|
+
|
|
223
|
+
for chunk in first_buf:
|
|
224
|
+
accumulated.append(chunk)
|
|
225
|
+
yield chunk
|
|
226
|
+
|
|
227
|
+
if gen_holder:
|
|
228
|
+
try:
|
|
229
|
+
for chunk in gen_holder[0]:
|
|
230
|
+
accumulated.append(chunk)
|
|
231
|
+
yield chunk
|
|
232
|
+
except Exception as exc:
|
|
233
|
+
logger.error("Stream interrupted mid-response: %s", exc)
|
|
234
|
+
if not accumulated:
|
|
235
|
+
raise # nothing delivered yet — surface the error
|
|
236
|
+
# Otherwise: partial response already shown; log and stop cleanly
|
|
237
|
+
|
|
238
|
+
# ── 6. Persist complete response to cache ─────────────────────────────────
|
|
239
|
+
full_text = "".join(accumulated)
|
|
240
|
+
if full_text and get_config_value("features.cache_responses", True):
|
|
241
|
+
cache_set(full_prompt, full_text)
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
# ── Public: ask (blocking, collects full stream) ─────────────────────────────
|
|
245
|
+
|
|
246
|
+
def ask(prompt: str, context: Optional[str] = None, *, use_cache: bool = True) -> str:
|
|
247
|
+
"""
|
|
248
|
+
Send *prompt* and return the complete response as a string.
|
|
249
|
+
|
|
250
|
+
Internally calls ask_stream() so the spinner → stream UX applies
|
|
251
|
+
even for callers that only want the final string.
|
|
252
|
+
|
|
253
|
+
use_cache=False forces a fresh network call (used by generate_project_code).
|
|
254
|
+
"""
|
|
255
|
+
if not use_cache:
|
|
256
|
+
# Bypass cache by using a unique sentinel suffix that is never stored
|
|
257
|
+
_sentinel = "\x00nocache"
|
|
258
|
+
parts: List[str] = []
|
|
259
|
+
full = f"Context:\n{context}\n\n{prompt}" if context else prompt
|
|
260
|
+
# Call _request_stream directly to avoid cache logic in ask_stream
|
|
261
|
+
from core.ui import ui
|
|
262
|
+
persian = _has_persian(full)
|
|
263
|
+
|
|
264
|
+
ready_event = threading.Event()
|
|
265
|
+
first_buf: List[str] = []
|
|
266
|
+
prime_error: List[Exception] = []
|
|
267
|
+
gen_holder: List[Iterator[str]] = []
|
|
268
|
+
|
|
269
|
+
def _prime_nocache() -> None:
|
|
270
|
+
try:
|
|
271
|
+
gen = _request_stream(full)
|
|
272
|
+
gen_holder.append(gen)
|
|
273
|
+
try:
|
|
274
|
+
chunk = next(gen)
|
|
275
|
+
if chunk:
|
|
276
|
+
first_buf.append(chunk)
|
|
277
|
+
except StopIteration:
|
|
278
|
+
pass
|
|
279
|
+
except Exception as exc:
|
|
280
|
+
prime_error.append(exc)
|
|
281
|
+
finally:
|
|
282
|
+
ready_event.set()
|
|
283
|
+
|
|
284
|
+
threading.Thread(target=_prime_nocache, daemon=True).start()
|
|
285
|
+
with ui.thinking_spinner(persian=persian):
|
|
286
|
+
ready_event.wait()
|
|
287
|
+
if prime_error:
|
|
288
|
+
raise prime_error[0]
|
|
289
|
+
parts.extend(first_buf)
|
|
290
|
+
if gen_holder:
|
|
291
|
+
parts.extend(gen_holder[0])
|
|
292
|
+
return "".join(parts)
|
|
293
|
+
|
|
294
|
+
parts = list(ask_stream(prompt, context))
|
|
295
|
+
return "".join(parts)
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
# ── Specialised helpers ────────────────────────────────────────────────────────
|
|
299
|
+
|
|
300
|
+
def explain_code(code: str, language: str = "python") -> str:
|
|
301
|
+
prompt = (
|
|
302
|
+
f"Explain the following {language} code in full detail. "
|
|
303
|
+
f"Do NOT truncate — write the complete explanation.\n\n"
|
|
304
|
+
f"```{language}\n{code}\n```\n\n"
|
|
305
|
+
f"Structure your response as:\n"
|
|
306
|
+
f"1. **Overview** – what the code does in one paragraph\n"
|
|
307
|
+
f"2. **Step-by-step breakdown** – every significant block explained\n"
|
|
308
|
+
f"3. **Key concepts** – patterns, algorithms, or techniques used\n"
|
|
309
|
+
f"4. **Potential issues** – bugs, edge-cases, security concerns\n"
|
|
310
|
+
f"5. **Improvement suggestions** – actionable ideas with code snippets\n"
|
|
311
|
+
)
|
|
312
|
+
return ask(prompt)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def fix_code(code: str, language: str = "python", issues: Optional[str] = None) -> str:
|
|
316
|
+
issue_block = f"\nKnown issues to address:\n{issues}\n" if issues else ""
|
|
317
|
+
prompt = (
|
|
318
|
+
f"Fix ALL problems in the following {language} code.\n"
|
|
319
|
+
f"{issue_block}\n"
|
|
320
|
+
f"```{language}\n{code}\n```\n\n"
|
|
321
|
+
f"Requirements:\n"
|
|
322
|
+
f"- Fix every syntax error, logic bug, and style violation.\n"
|
|
323
|
+
f"- Improve error handling and input validation.\n"
|
|
324
|
+
f"- Add brief inline comments for non-obvious changes.\n"
|
|
325
|
+
f"- Return the COMPLETE corrected file in a single code fence.\n"
|
|
326
|
+
f"- Never omit any part. Never write '# ... rest unchanged'.\n"
|
|
327
|
+
)
|
|
328
|
+
return _strip_code_fence(ask(prompt))
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
def suggest_improvements(code: str, language: str = "python") -> str:
|
|
332
|
+
prompt = (
|
|
333
|
+
f"Review this {language} code and provide thorough improvement suggestions.\n\n"
|
|
334
|
+
f"```{language}\n{code}\n```\n\n"
|
|
335
|
+
f"Cover ALL sections — do not skip any:\n"
|
|
336
|
+
f"1. **Performance** – bottlenecks and faster alternatives\n"
|
|
337
|
+
f"2. **Readability** – naming, structure, comments\n"
|
|
338
|
+
f"3. **Maintainability** – modularity, testability\n"
|
|
339
|
+
f"4. **Modern practices** – idiomatic {language} patterns\n"
|
|
340
|
+
f"5. **Security** – input validation, injection risks, secret handling\n"
|
|
341
|
+
f"6. **Error handling** – robustness improvements\n\n"
|
|
342
|
+
f"Include short code snippets demonstrating each suggestion.\n"
|
|
343
|
+
)
|
|
344
|
+
return ask(prompt)
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
def generate_documentation(code: str, language: str = "python") -> str:
|
|
348
|
+
prompt = (
|
|
349
|
+
f"Generate complete, production-ready documentation for the following {language} code.\n\n"
|
|
350
|
+
f"```{language}\n{code}\n```\n\n"
|
|
351
|
+
f"Include ALL of the following — never truncate:\n"
|
|
352
|
+
f"1. Module / class / function docstrings (Google style)\n"
|
|
353
|
+
f"2. Parameter types and descriptions\n"
|
|
354
|
+
f"3. Return types and descriptions\n"
|
|
355
|
+
f"4. Raises sections where applicable\n"
|
|
356
|
+
f"5. Usage examples for every public function\n\n"
|
|
357
|
+
f"Return the COMPLETE documented source in a single code fence.\n"
|
|
358
|
+
)
|
|
359
|
+
return ask(prompt)
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def generate_project_code(description: str, template: str = "fastapi") -> dict[str, str]:
|
|
363
|
+
"""
|
|
364
|
+
Generate a complete project scaffold.
|
|
365
|
+
Returns a dict mapping relative file paths to their content.
|
|
366
|
+
"""
|
|
367
|
+
prompt = (
|
|
368
|
+
f"Generate a complete, production-ready {template} project based on:\n\n"
|
|
369
|
+
f'"{description}"\n\n'
|
|
370
|
+
f"Return a JSON object: keys = relative file paths, values = file contents.\n"
|
|
371
|
+
f"Example: {{\"main.py\": \"...\", \"requirements.txt\": \"...\"}}\n\n"
|
|
372
|
+
f"Include:\n"
|
|
373
|
+
f"- All application files — complete, not abbreviated\n"
|
|
374
|
+
f"- requirements.txt with pinned versions\n"
|
|
375
|
+
f"- Detailed README.md\n"
|
|
376
|
+
f"- Proper error handling and logging\n"
|
|
377
|
+
f"- Inline comments explaining the structure\n\n"
|
|
378
|
+
f"Return ONLY the raw JSON object — no markdown fences, no prose.\n"
|
|
379
|
+
f"CRITICAL: Write every file in full. Never truncate.\n"
|
|
380
|
+
)
|
|
381
|
+
raw = _strip_code_fence(ask(prompt, use_cache=False)).strip()
|
|
382
|
+
try:
|
|
383
|
+
return json.loads(raw)
|
|
384
|
+
except json.JSONDecodeError:
|
|
385
|
+
return {"main.py": raw}
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
# ── Utility ────────────────────────────────────────────────────────────────────
|
|
389
|
+
|
|
390
|
+
def _strip_code_fence(text: str) -> str:
|
|
391
|
+
"""Remove leading/trailing markdown code fences."""
|
|
392
|
+
if not text:
|
|
393
|
+
return ""
|
|
394
|
+
lines = text.strip().splitlines()
|
|
395
|
+
if lines and lines[0].lstrip().startswith("```"):
|
|
396
|
+
lines = lines[1:]
|
|
397
|
+
if lines and lines[-1].lstrip().startswith("```"):
|
|
398
|
+
lines = lines[:-1]
|
|
399
|
+
return "\n".join(lines).strip()
|
core/cache_db.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SQLite-backed response cache with thread-safe access and automatic TTL expiry.
|
|
3
|
+
|
|
4
|
+
Each entry stores a prompt hash → response text with a timestamp.
|
|
5
|
+
Entries older than TTL_HOURS (default: 24 h) are pruned automatically:
|
|
6
|
+
• on every write (set_)
|
|
7
|
+
• on every read (get) — stale entries return None
|
|
8
|
+
• on explicit (clear_expired)
|
|
9
|
+
|
|
10
|
+
This keeps the database lightweight and prevents stale AI answers
|
|
11
|
+
from being served indefinitely.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import sqlite3
|
|
15
|
+
import hashlib
|
|
16
|
+
import datetime
|
|
17
|
+
import threading
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
from core.config import CACHE_DB
|
|
21
|
+
|
|
22
|
+
_lock = threading.Lock()
|
|
23
|
+
TTL_HOURS: int = 24 # cache entries expire after this many hours
|
|
24
|
+
|
|
25
|
+
# ── Schema ─────────────────────────────────────────────────────────────────────
|
|
26
|
+
_DDL = """
|
|
27
|
+
CREATE TABLE IF NOT EXISTS cache (
|
|
28
|
+
key TEXT PRIMARY KEY,
|
|
29
|
+
value TEXT NOT NULL,
|
|
30
|
+
updated_at TEXT NOT NULL
|
|
31
|
+
)
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _connect() -> sqlite3.Connection:
|
|
36
|
+
conn = sqlite3.connect(CACHE_DB, check_same_thread=False)
|
|
37
|
+
conn.execute(_DDL)
|
|
38
|
+
conn.commit()
|
|
39
|
+
return conn
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _hash(text: str) -> str:
|
|
43
|
+
"""Return a stable SHA-256 hex digest of *text* for use as cache key."""
|
|
44
|
+
return hashlib.sha256(text.encode("utf-8", errors="replace")).hexdigest()
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _cutoff_iso() -> str:
|
|
48
|
+
"""Return the ISO timestamp that is exactly TTL_HOURS ago."""
|
|
49
|
+
cutoff = datetime.datetime.now() - datetime.timedelta(hours=TTL_HOURS)
|
|
50
|
+
return cutoff.isoformat(sep=" ", timespec="seconds")
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# ── Public API ─────────────────────────────────────────────────────────────────
|
|
54
|
+
|
|
55
|
+
def get(key: str) -> Optional[str]:
|
|
56
|
+
"""
|
|
57
|
+
Return cached value for *key*, or None if absent or expired.
|
|
58
|
+
|
|
59
|
+
Expired entries are deleted on access (lazy eviction) so they
|
|
60
|
+
never accumulate silently.
|
|
61
|
+
"""
|
|
62
|
+
with _lock:
|
|
63
|
+
conn = _connect()
|
|
64
|
+
try:
|
|
65
|
+
row = conn.execute(
|
|
66
|
+
"SELECT value, updated_at FROM cache WHERE key = ?",
|
|
67
|
+
(_hash(key),),
|
|
68
|
+
).fetchone()
|
|
69
|
+
|
|
70
|
+
if row is None:
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
value, updated_at_str = row
|
|
74
|
+
|
|
75
|
+
# Parse stored timestamp and check TTL
|
|
76
|
+
try:
|
|
77
|
+
updated_at = datetime.datetime.fromisoformat(updated_at_str)
|
|
78
|
+
age = datetime.datetime.now() - updated_at
|
|
79
|
+
if age.total_seconds() > TTL_HOURS * 3600:
|
|
80
|
+
# Expired — delete and return None
|
|
81
|
+
conn.execute("DELETE FROM cache WHERE key = ?", (_hash(key),))
|
|
82
|
+
conn.commit()
|
|
83
|
+
return None
|
|
84
|
+
except ValueError:
|
|
85
|
+
# Unparseable timestamp: treat as expired
|
|
86
|
+
conn.execute("DELETE FROM cache WHERE key = ?", (_hash(key),))
|
|
87
|
+
conn.commit()
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
return value
|
|
91
|
+
finally:
|
|
92
|
+
conn.close()
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def set_(key: str, value: str) -> None:
|
|
96
|
+
"""
|
|
97
|
+
Insert or replace a cache entry.
|
|
98
|
+
|
|
99
|
+
Also runs a background sweep to remove all entries older than TTL_HOURS,
|
|
100
|
+
keeping the database compact without ever blocking the caller for long.
|
|
101
|
+
"""
|
|
102
|
+
with _lock:
|
|
103
|
+
conn = _connect()
|
|
104
|
+
try:
|
|
105
|
+
now = datetime.datetime.now().isoformat(sep=" ", timespec="seconds")
|
|
106
|
+
conn.execute(
|
|
107
|
+
"INSERT OR REPLACE INTO cache (key, value, updated_at) VALUES (?, ?, ?)",
|
|
108
|
+
(_hash(key), value, now),
|
|
109
|
+
)
|
|
110
|
+
# Sweep stale rows on every write — cheap because updated_at is indexed
|
|
111
|
+
conn.execute(
|
|
112
|
+
"DELETE FROM cache WHERE updated_at < ?",
|
|
113
|
+
(_cutoff_iso(),),
|
|
114
|
+
)
|
|
115
|
+
conn.commit()
|
|
116
|
+
finally:
|
|
117
|
+
conn.close()
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def count() -> int:
|
|
121
|
+
"""Return total number of non-expired cached entries."""
|
|
122
|
+
with _lock:
|
|
123
|
+
conn = _connect()
|
|
124
|
+
try:
|
|
125
|
+
return conn.execute(
|
|
126
|
+
"SELECT COUNT(*) FROM cache WHERE updated_at >= ?",
|
|
127
|
+
(_cutoff_iso(),),
|
|
128
|
+
).fetchone()[0]
|
|
129
|
+
finally:
|
|
130
|
+
conn.close()
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def clear() -> None:
|
|
134
|
+
"""Remove ALL cache entries (expired and non-expired)."""
|
|
135
|
+
with _lock:
|
|
136
|
+
conn = _connect()
|
|
137
|
+
try:
|
|
138
|
+
conn.execute("DELETE FROM cache")
|
|
139
|
+
conn.commit()
|
|
140
|
+
finally:
|
|
141
|
+
conn.close()
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def clear_expired() -> int:
|
|
145
|
+
"""
|
|
146
|
+
Remove only the entries that have exceeded TTL_HOURS.
|
|
147
|
+
Returns the number of rows deleted.
|
|
148
|
+
"""
|
|
149
|
+
with _lock:
|
|
150
|
+
conn = _connect()
|
|
151
|
+
try:
|
|
152
|
+
cursor = conn.execute(
|
|
153
|
+
"DELETE FROM cache WHERE updated_at < ?",
|
|
154
|
+
(_cutoff_iso(),),
|
|
155
|
+
)
|
|
156
|
+
conn.commit()
|
|
157
|
+
return cursor.rowcount
|
|
158
|
+
finally:
|
|
159
|
+
conn.close()
|
core/checker.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Static analysis for source files.
|
|
3
|
+
|
|
4
|
+
check_file() – language-aware syntax + lint check
|
|
5
|
+
quick_fix_suggestion() – ask AI for a summary of detected issues
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import ast
|
|
9
|
+
import importlib.util
|
|
10
|
+
import os
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
from core.utils import detect_language, find_file, read_file
|
|
14
|
+
from core.logger import logger
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# ── Language dispatchers ───────────────────────────────────────────────────────
|
|
18
|
+
|
|
19
|
+
def _check_python(path: str) -> tuple[bool, list[str]]:
|
|
20
|
+
"""Parse Python source with ast; optionally run flake8 for style issues."""
|
|
21
|
+
try:
|
|
22
|
+
source = read_file(path)
|
|
23
|
+
except FileNotFoundError as exc:
|
|
24
|
+
return False, [str(exc)]
|
|
25
|
+
|
|
26
|
+
# AST syntax check
|
|
27
|
+
try:
|
|
28
|
+
ast.parse(source, filename=path)
|
|
29
|
+
except SyntaxError as exc:
|
|
30
|
+
return False, [f"SyntaxError at line {exc.lineno}: {exc.msg}"]
|
|
31
|
+
|
|
32
|
+
messages: list[str] = []
|
|
33
|
+
|
|
34
|
+
# Optional flake8 style check (installed separately)
|
|
35
|
+
if importlib.util.find_spec("flake8"):
|
|
36
|
+
try:
|
|
37
|
+
from flake8.api import legacy as flake8
|
|
38
|
+
guide = flake8.get_style_guide(max_line_length=120, ignore=["E501", "W503"])
|
|
39
|
+
report = guide.check_files([path])
|
|
40
|
+
messages = report.get_statistics("E") + report.get_statistics("W")
|
|
41
|
+
except Exception as exc:
|
|
42
|
+
logger.warning("flake8 check failed: %s", exc)
|
|
43
|
+
|
|
44
|
+
return True, messages
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _check_generic(path: str) -> tuple[bool, list[str]]:
|
|
48
|
+
"""Fallback checker: just verifies the file is readable UTF-8 text."""
|
|
49
|
+
try:
|
|
50
|
+
read_file(path)
|
|
51
|
+
return True, []
|
|
52
|
+
except FileNotFoundError as exc:
|
|
53
|
+
return False, [str(exc)]
|
|
54
|
+
except UnicodeDecodeError:
|
|
55
|
+
return False, [f"File is not valid UTF-8: {path}"]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# ── Public API ─────────────────────────────────────────────────────────────────
|
|
59
|
+
|
|
60
|
+
_CHECKERS = {
|
|
61
|
+
"python": _check_python,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def check_file(path: str) -> tuple[bool, list[str]]:
|
|
66
|
+
"""
|
|
67
|
+
Check *path* for syntax/style issues.
|
|
68
|
+
|
|
69
|
+
Returns (ok, messages) where *ok* is False only when the file
|
|
70
|
+
cannot be parsed at all. Style warnings leave *ok* as True.
|
|
71
|
+
"""
|
|
72
|
+
# Resolve the path if it is not absolute
|
|
73
|
+
if not os.path.isabs(path) and not os.path.exists(path):
|
|
74
|
+
resolved = find_file(os.path.basename(path))
|
|
75
|
+
if resolved:
|
|
76
|
+
path = resolved
|
|
77
|
+
|
|
78
|
+
lang = detect_language(path)
|
|
79
|
+
checker = _CHECKERS.get(lang, _check_generic)
|
|
80
|
+
return checker(path)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def quick_fix_suggestion(path: str) -> str:
|
|
84
|
+
"""
|
|
85
|
+
Run check_file() and ask the AI to summarise detected problems.
|
|
86
|
+
Returns a plain-text suggestion string.
|
|
87
|
+
"""
|
|
88
|
+
# Import here to avoid circular dependency at module load time
|
|
89
|
+
from core.ai import ask
|
|
90
|
+
|
|
91
|
+
ok, messages = check_file(path)
|
|
92
|
+
|
|
93
|
+
if ok and not messages:
|
|
94
|
+
return "✅ No issues found – the file looks clean."
|
|
95
|
+
|
|
96
|
+
summary = "; ".join(messages[:20]) # cap at 20 to keep the prompt concise
|
|
97
|
+
prompt = (
|
|
98
|
+
f"The following issues were detected in a source file:\n\n{summary}\n\n"
|
|
99
|
+
"Summarise these issues clearly and suggest precise fixes for each one."
|
|
100
|
+
)
|
|
101
|
+
return ask(prompt)
|