grindx 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. grindx-0.1.0/LICENSE +21 -0
  2. grindx-0.1.0/PKG-INFO +169 -0
  3. grindx-0.1.0/README.md +147 -0
  4. grindx-0.1.0/grindx/__init__.py +3 -0
  5. grindx-0.1.0/grindx/__main__.py +3 -0
  6. grindx-0.1.0/grindx/ai.py +360 -0
  7. grindx-0.1.0/grindx/app.py +111 -0
  8. grindx-0.1.0/grindx/clipboard.py +45 -0
  9. grindx-0.1.0/grindx/data.py +173 -0
  10. grindx-0.1.0/grindx/problems/arrays.json +424 -0
  11. grindx-0.1.0/grindx/problems/backtracking.json +201 -0
  12. grindx-0.1.0/grindx/problems/basics.json +73 -0
  13. grindx-0.1.0/grindx/problems/binary-search.json +404 -0
  14. grindx-0.1.0/grindx/problems/bit-manipulation.json +238 -0
  15. grindx-0.1.0/grindx/problems/dynamic-programming.json +798 -0
  16. grindx-0.1.0/grindx/problems/graphs.json +684 -0
  17. grindx-0.1.0/grindx/problems/greedy.json +198 -0
  18. grindx-0.1.0/grindx/problems/heaps.json +191 -0
  19. grindx-0.1.0/grindx/problems/intervals.json +85 -0
  20. grindx-0.1.0/grindx/problems/linked-list.json +372 -0
  21. grindx-0.1.0/grindx/problems/math-geometry.json +71 -0
  22. grindx-0.1.0/grindx/problems/sliding-window.json +191 -0
  23. grindx-0.1.0/grindx/problems/sorting.json +72 -0
  24. grindx-0.1.0/grindx/problems/stacks-queues.json +280 -0
  25. grindx-0.1.0/grindx/problems/strings.json +365 -0
  26. grindx-0.1.0/grindx/problems/trees.json +257 -0
  27. grindx-0.1.0/grindx/problems/tries.json +56 -0
  28. grindx-0.1.0/grindx/problems/two-pointers.json +149 -0
  29. grindx-0.1.0/grindx/screens/__init__.py +5 -0
  30. grindx-0.1.0/grindx/screens/browser.py +259 -0
  31. grindx-0.1.0/grindx/screens/evaluate.py +106 -0
  32. grindx-0.1.0/grindx/screens/solve.py +287 -0
  33. grindx-0.1.0/grindx/screens/stats.py +128 -0
  34. grindx-0.1.0/grindx/screens/welcome.py +117 -0
  35. grindx-0.1.0/grindx/sheets/blind-75.json +98 -0
  36. grindx-0.1.0/grindx/sheets/grind-75.json +110 -0
  37. grindx-0.1.0/grindx/sheets/neetcode-150.json +188 -0
  38. grindx-0.1.0/grindx/sheets/striver-a2z.json +346 -0
  39. grindx-0.1.0/grindx/widgets/__init__.py +2 -0
  40. grindx-0.1.0/grindx/widgets/editor.py +117 -0
  41. grindx-0.1.0/grindx/widgets/items.py +47 -0
  42. grindx-0.1.0/grindx.egg-info/PKG-INFO +169 -0
  43. grindx-0.1.0/grindx.egg-info/SOURCES.txt +47 -0
  44. grindx-0.1.0/grindx.egg-info/dependency_links.txt +1 -0
  45. grindx-0.1.0/grindx.egg-info/entry_points.txt +2 -0
  46. grindx-0.1.0/grindx.egg-info/requires.txt +4 -0
  47. grindx-0.1.0/grindx.egg-info/top_level.txt +1 -0
  48. grindx-0.1.0/pyproject.toml +38 -0
  49. grindx-0.1.0/setup.cfg +4 -0
grindx-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 xghostient
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
grindx-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,169 @@
1
+ Metadata-Version: 2.4
2
+ Name: grindx
3
+ Version: 0.1.0
4
+ Summary: Distraction-free DSA practice in your terminal
5
+ Author: xghostient
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/xghostient/grindx
8
+ Project-URL: Repository, https://github.com/xghostient/grindx
9
+ Keywords: dsa,leetcode,terminal,tui,practice
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Environment :: Console
12
+ Classifier: Topic :: Education
13
+ Classifier: Programming Language :: Python :: 3
14
+ Requires-Python: >=3.10
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: textual>=1.0.0
18
+ Requires-Dist: tree-sitter>=0.21.0
19
+ Requires-Dist: tree-sitter-python>=0.21.0
20
+ Requires-Dist: tree-sitter-go>=0.21.0
21
+ Dynamic: license-file
22
+
23
+ # grindx
24
+
25
+ Distraction-free DSA practice in your terminal. Zero network footprint.
26
+
27
+ ![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue)
28
+ ![License: MIT](https://img.shields.io/badge/license-MIT-green)
29
+
30
+ ## Why grindx?
31
+
32
+ - **Zero network calls** — everything runs locally, no tracking (optional AI review)
33
+ - **Terminal-native** — practice DSA without leaving your terminal or opening a browser
34
+ - **Multiple sheets** — Striver A2Z (316), Blind 75, NeetCode 150, Grind 75 built-in
35
+ - **Python & Go** — switch languages on the fly with `Ctrl+L`
36
+ - **Progress tracking** — solved/in-progress/not-started states, streaks, best times
37
+ - **Bookmarks & filters** — filter by difficulty (Easy/Medium/Hard) or bookmarked problems
38
+
39
+ ## Install
40
+
41
+ ```bash
42
+ pip install grindx
43
+ ```
44
+
45
+ Or with [pipx](https://pipx.pypa.io/) (recommended for CLI tools):
46
+
47
+ ```bash
48
+ pipx install grindx
49
+ ```
50
+
51
+ Or run from source:
52
+
53
+ ```bash
54
+ git clone https://github.com/xghostient/grindx.git
55
+ cd grindx
56
+ python3 -m venv .venv && source .venv/bin/activate
57
+ pip install -e .
58
+ grindx
59
+ ```
60
+
61
+ ## Usage
62
+
63
+ ```bash
64
+ grindx # CLI entry point
65
+ python -m grindx # or as a module
66
+ ```
67
+
68
+ ### Navigation
69
+
70
+ | Key | Action |
71
+ |-----|--------|
72
+ | `↑` `↓` | Navigate topics / problems |
73
+ | `←` `→` | Switch between topic and problem panes |
74
+ | `Enter` | Select topic or open problem |
75
+ | `Esc` | Go back |
76
+ | `q` | Quit |
77
+
78
+ ### Filters
79
+
80
+ | Key | Filter |
81
+ |-----|--------|
82
+ | `a` | All problems |
83
+ | `e` | Easy |
84
+ | `m` | Medium |
85
+ | `h` | Hard |
86
+ | `b` | Bookmarked |
87
+ | `s` | Stats dashboard |
88
+
89
+ ### Solve Screen
90
+
91
+ | Key | Action |
92
+ |-----|--------|
93
+ | `Ctrl+S` | Save code |
94
+ | `Ctrl+D` | Toggle solved |
95
+ | `Ctrl+E` | AI review |
96
+ | `Ctrl+L` | Switch Python / Go |
97
+ | `Ctrl+B` | Toggle bookmark |
98
+ | `Ctrl+T` | Pause / resume timer |
99
+ | `Ctrl+R` | Reset timer |
100
+ | `Ctrl+Shift+C` | Copy selection to clipboard |
101
+ | `Ctrl+Shift+V` | Paste from clipboard |
102
+ | `Alt+↑` / `Alt+↓` | Move line up / down |
103
+ | `Alt+Shift+↓` | Duplicate line |
104
+ | `Esc` | Save & go back |
105
+
106
+ ## Features
107
+
108
+ **Split-pane editor** — problem description on the left, code editor with syntax highlighting on the right.
109
+
110
+ **Auto-timer** — starts when you open a problem, tracks your best solve time.
111
+
112
+ **Three-state tracking** — each problem shows as not started (○), in progress (◐), or solved (✓).
113
+
114
+ **Stats dashboard** — overall progress, per-difficulty breakdown, per-topic progress bars, current streak, and top 10 best times.
115
+
116
+ **Sheet-agnostic** — built-in sheets live inside the package (`grindx/sheets/`). Format:
117
+
118
+ ```json
119
+ {
120
+ "Topic Name": ["problem-name-1", "problem-name-2"],
121
+ "Another Topic": ["problem-name-3"]
122
+ }
123
+ ```
124
+
125
+ **Progress safety** — automatic backups with corruption recovery. Progress, solutions, and backups are stored in `~/.grindx/` so they persist across installs and upgrades.
126
+
127
+ ## AI Review (optional)
128
+
129
+ Press `Ctrl+E` on the solve screen to get AI-powered feedback on your solution — correctness, edge cases, complexity analysis, and a pass/fail verdict.
130
+
131
+ ### Setup
132
+
133
+ Set two environment variables:
134
+
135
+ ```bash
136
+ export GRINDX_AI_PROVIDER=groq # or ollama, anthropic, openai
137
+ export GRINDX_AI_MODEL=llama-3.3-70b-versatile # optional, sensible defaults per provider
138
+ export GRINDX_AI_KEY=gsk_... # not needed for ollama
139
+ export GRINDX_AI_URL=https://custom.api/v1 # optional, auto-detected per provider
140
+ ```
141
+
142
+ Or create `~/.grindx.toml`:
143
+
144
+ ```toml
145
+ [ai]
146
+ provider = "groq"
147
+ model = "llama-3.3-70b-versatile"
148
+ api_key = "gsk_..."
149
+ ```
150
+
151
+ ### Supported Providers
152
+
153
+ | Provider | API Key | Default Model | Notes |
154
+ |----------|---------|---------------|-------|
155
+ | `ollama` | No | llama3 | Local, free, no network |
156
+ | `groq` | Yes | llama-3.3-70b-versatile | Fast, free tier available |
157
+ | `anthropic` | Yes | claude-sonnet-4-20250514 | Claude |
158
+ | `openai` | Yes | gpt-4o | GPT |
159
+
160
+ Any OpenAI-compatible API works — set provider to `openai` and add `base_url`.
161
+
162
+ ## Built with
163
+
164
+ - [Textual](https://github.com/Textualize/textual) — TUI framework
165
+ - [tree-sitter](https://tree-sitter.github.io/) — syntax highlighting
166
+
167
+ ## License
168
+
169
+ MIT
grindx-0.1.0/README.md ADDED
@@ -0,0 +1,147 @@
1
+ # grindx
2
+
3
+ Distraction-free DSA practice in your terminal. Zero network footprint.
4
+
5
+ ![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue)
6
+ ![License: MIT](https://img.shields.io/badge/license-MIT-green)
7
+
8
+ ## Why grindx?
9
+
10
+ - **Zero network calls** — everything runs locally, no tracking (optional AI review)
11
+ - **Terminal-native** — practice DSA without leaving your terminal or opening a browser
12
+ - **Multiple sheets** — Striver A2Z (316), Blind 75, NeetCode 150, Grind 75 built-in
13
+ - **Python & Go** — switch languages on the fly with `Ctrl+L`
14
+ - **Progress tracking** — solved/in-progress/not-started states, streaks, best times
15
+ - **Bookmarks & filters** — filter by difficulty (Easy/Medium/Hard) or bookmarked problems
16
+
17
+ ## Install
18
+
19
+ ```bash
20
+ pip install grindx
21
+ ```
22
+
23
+ Or with [pipx](https://pipx.pypa.io/) (recommended for CLI tools):
24
+
25
+ ```bash
26
+ pipx install grindx
27
+ ```
28
+
29
+ Or run from source:
30
+
31
+ ```bash
32
+ git clone https://github.com/xghostient/grindx.git
33
+ cd grindx
34
+ python3 -m venv .venv && source .venv/bin/activate
35
+ pip install -e .
36
+ grindx
37
+ ```
38
+
39
+ ## Usage
40
+
41
+ ```bash
42
+ grindx # CLI entry point
43
+ python -m grindx # or as a module
44
+ ```
45
+
46
+ ### Navigation
47
+
48
+ | Key | Action |
49
+ |-----|--------|
50
+ | `↑` `↓` | Navigate topics / problems |
51
+ | `←` `→` | Switch between topic and problem panes |
52
+ | `Enter` | Select topic or open problem |
53
+ | `Esc` | Go back |
54
+ | `q` | Quit |
55
+
56
+ ### Filters
57
+
58
+ | Key | Filter |
59
+ |-----|--------|
60
+ | `a` | All problems |
61
+ | `e` | Easy |
62
+ | `m` | Medium |
63
+ | `h` | Hard |
64
+ | `b` | Bookmarked |
65
+ | `s` | Stats dashboard |
66
+
67
+ ### Solve Screen
68
+
69
+ | Key | Action |
70
+ |-----|--------|
71
+ | `Ctrl+S` | Save code |
72
+ | `Ctrl+D` | Toggle solved |
73
+ | `Ctrl+E` | AI review |
74
+ | `Ctrl+L` | Switch Python / Go |
75
+ | `Ctrl+B` | Toggle bookmark |
76
+ | `Ctrl+T` | Pause / resume timer |
77
+ | `Ctrl+R` | Reset timer |
78
+ | `Ctrl+Shift+C` | Copy selection to clipboard |
79
+ | `Ctrl+Shift+V` | Paste from clipboard |
80
+ | `Alt+↑` / `Alt+↓` | Move line up / down |
81
+ | `Alt+Shift+↓` | Duplicate line |
82
+ | `Esc` | Save & go back |
83
+
84
+ ## Features
85
+
86
+ **Split-pane editor** — problem description on the left, code editor with syntax highlighting on the right.
87
+
88
+ **Auto-timer** — starts when you open a problem, tracks your best solve time.
89
+
90
+ **Three-state tracking** — each problem shows as not started (○), in progress (◐), or solved (✓).
91
+
92
+ **Stats dashboard** — overall progress, per-difficulty breakdown, per-topic progress bars, current streak, and top 10 best times.
93
+
94
+ **Sheet-agnostic** — built-in sheets live inside the package (`grindx/sheets/`). Format:
95
+
96
+ ```json
97
+ {
98
+ "Topic Name": ["problem-name-1", "problem-name-2"],
99
+ "Another Topic": ["problem-name-3"]
100
+ }
101
+ ```
102
+
103
+ **Progress safety** — automatic backups with corruption recovery. Progress, solutions, and backups are stored in `~/.grindx/` so they persist across installs and upgrades.
104
+
105
+ ## AI Review (optional)
106
+
107
+ Press `Ctrl+E` on the solve screen to get AI-powered feedback on your solution — correctness, edge cases, complexity analysis, and a pass/fail verdict.
108
+
109
+ ### Setup
110
+
111
+ Set two environment variables:
112
+
113
+ ```bash
114
+ export GRINDX_AI_PROVIDER=groq # or ollama, anthropic, openai
115
+ export GRINDX_AI_MODEL=llama-3.3-70b-versatile # optional, sensible defaults per provider
116
+ export GRINDX_AI_KEY=gsk_... # not needed for ollama
117
+ export GRINDX_AI_URL=https://custom.api/v1 # optional, auto-detected per provider
118
+ ```
119
+
120
+ Or create `~/.grindx.toml`:
121
+
122
+ ```toml
123
+ [ai]
124
+ provider = "groq"
125
+ model = "llama-3.3-70b-versatile"
126
+ api_key = "gsk_..."
127
+ ```
128
+
129
+ ### Supported Providers
130
+
131
+ | Provider | API Key | Default Model | Notes |
132
+ |----------|---------|---------------|-------|
133
+ | `ollama` | No | llama3 | Local, free, no network |
134
+ | `groq` | Yes | llama-3.3-70b-versatile | Fast, free tier available |
135
+ | `anthropic` | Yes | claude-sonnet-4-20250514 | Claude |
136
+ | `openai` | Yes | gpt-4o | GPT |
137
+
138
+ Any OpenAI-compatible API works — set provider to `openai` and add `base_url`.
139
+
140
+ ## Built with
141
+
142
+ - [Textual](https://github.com/Textualize/textual) — TUI framework
143
+ - [tree-sitter](https://tree-sitter.github.io/) — syntax highlighting
144
+
145
+ ## License
146
+
147
+ MIT
@@ -0,0 +1,3 @@
1
+ """grindx — Distraction-free DSA practice in your terminal."""
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,3 @@
1
+ from grindx.app import main
2
+
3
+ main()
@@ -0,0 +1,360 @@
1
+ """AI evaluation for solutions — supports Anthropic, OpenAI, Groq, Ollama."""
2
+
3
+ import json
4
+ import os
5
+ import urllib.request
6
+ import urllib.error
7
+ from pathlib import Path
8
+
9
+ from . import __version__
10
+
11
+ CONFIG_PATH = Path.home() / ".grindx.toml"
12
+
13
+ _PROVIDER_DEFAULTS = {
14
+ "ollama": ("http://localhost:11434", "llama3"),
15
+ "anthropic": ("https://api.anthropic.com", "claude-sonnet-4-20250514"),
16
+ "openai": ("https://api.openai.com", "gpt-4o"),
17
+ "groq": ("https://api.groq.com/openai", "llama-3.3-70b-versatile"),
18
+ }
19
+
20
+ _USER_AGENT = f"grindx/{__version__}"
21
+
22
+
23
+ def load_ai_config() -> dict:
24
+ """Load AI config from ~/.grindx.toml, then override with env vars."""
25
+ config = {"provider": "", "model": "", "api_key": "", "base_url": ""}
26
+
27
+ if CONFIG_PATH.exists():
28
+ _load_toml(config)
29
+
30
+ env_map = {
31
+ "GRINDX_AI_PROVIDER": "provider",
32
+ "GRINDX_AI_MODEL": "model",
33
+ "GRINDX_AI_KEY": "api_key",
34
+ "GRINDX_AI_URL": "base_url",
35
+ }
36
+ for env_key, cfg_key in env_map.items():
37
+ val = os.environ.get(env_key)
38
+ if val:
39
+ config[cfg_key] = val
40
+
41
+ p = config["provider"].lower()
42
+ if p in _PROVIDER_DEFAULTS:
43
+ default_url, default_model = _PROVIDER_DEFAULTS[p]
44
+ if not config["base_url"]:
45
+ config["base_url"] = default_url
46
+ if not config["model"]:
47
+ config["model"] = default_model
48
+
49
+ return config
50
+
51
+
52
+ def _load_toml(config: dict):
53
+ """Load config from TOML file (tomllib or simple fallback)."""
54
+ try:
55
+ import tomllib
56
+ with open(CONFIG_PATH, "rb") as f:
57
+ data = tomllib.load(f)
58
+ ai = data.get("ai", {})
59
+ for k in config:
60
+ if k in ai:
61
+ config[k] = ai[k]
62
+ except ImportError:
63
+ _parse_toml_simple(config)
64
+
65
+
66
+ def _parse_toml_simple(config: dict):
67
+ """Minimal parser for flat [ai] section."""
68
+ in_ai = False
69
+ with open(CONFIG_PATH) as f:
70
+ for line in f:
71
+ line = line.strip()
72
+ if line == "[ai]":
73
+ in_ai = True
74
+ continue
75
+ if line.startswith("["):
76
+ in_ai = False
77
+ continue
78
+ if in_ai and "=" in line:
79
+ key, _, val = line.partition("=")
80
+ key = key.strip()
81
+ val = val.strip().strip('"').strip("'")
82
+ if key in config:
83
+ config[key] = val
84
+
85
+
86
+ def _build_prompt(problem: dict, code: str, lang: str) -> str:
87
+ examples = ""
88
+ for i, ex in enumerate(problem.get("examples", []), 1):
89
+ examples += f"\nExample {i}:\n Input: {ex['input']}\n Output: {ex['output']}"
90
+
91
+ return f"""You are a rigorous DSA solution evaluator. This is a well-known competitive programming problem. Evaluate the solution strictly.
92
+
93
+ **Problem:** {problem['name']}
94
+ **Difficulty:** {problem.get('difficulty', '?')}
95
+ **Description:** {problem.get('description', 'N/A')}
96
+ **Constraints:** {problem.get('constraints', 'N/A')}
97
+ {examples}
98
+
99
+ **Language:** {lang}
100
+ **Solution:**
101
+ ```
102
+ {code}
103
+ ```
104
+
105
+ You MUST respond in exactly this format with these exact headers. Do not skip any section.
106
+
107
+ ## Verdict
108
+ PASS, FAIL, or PARTIAL. One word, then one-line reason.
109
+
110
+ ## Failing Test Cases
111
+ If FAIL or PARTIAL, list 2-3 concrete test cases where this solution produces wrong output. Format each as:
112
+ - Input: `...`
113
+ - Expected: `...`
114
+ - Actual (from this code): `...`
115
+ - Why: one-line explanation
116
+
117
+ If PASS, write "None — solution handles all cases correctly."
118
+
119
+ ## Complexity Analysis
120
+ | | This Solution | Optimal |
121
+ |---|---|---|
122
+ | Time | O(?) | O(?) |
123
+ | Space | O(?) | O(?) |
124
+
125
+ This is a classic well-known problem. You know the optimal complexity — state it confidently.
126
+ If this solution is suboptimal, say so clearly.
127
+
128
+ ## Correctness Notes
129
+
130
+ ### What your code does
131
+ Walk through the submitted solution step by step. Explain the approach/algorithm the user is attempting. Be specific — reference line numbers.
132
+
133
+ ### What the expected approach is
134
+ Explain the correct/optimal algorithm for this well-known problem. How should it work?
135
+
136
+ ### Where it diverges
137
+ If the code is wrong or suboptimal, list each issue as:
138
+ - **Line X**: `<the code on that line>` — what it does wrong and what it should do instead.
139
+
140
+ If the code is correct, say "Your implementation is correct." and note any minor style improvements (but don't nitpick).
141
+
142
+ ---HINTS---
143
+
144
+ ## Hints
145
+ Adapt hints based on what the user has written:
146
+
147
+ **If the solution is empty/template (no real code):**
148
+ Give generic progressive hints like LeetCode, guiding toward the right approach:
149
+ 1. What pattern/technique applies to this problem?
150
+ 2. What data structure would help?
151
+ 3. Key insight to crack it.
152
+
153
+ **If the solution has real code but is wrong/partial:**
154
+ Acknowledge what the user got RIGHT first, then build on their existing approach:
155
+ 1. "Your [specific part] is correct. Now think about..."
156
+ 2. "You're using [their approach]. The issue is at [specific point]..."
157
+ 3. "To fix this, consider what happens when [edge case]..."
158
+
159
+ **If PASS but suboptimal:**
160
+ "Your solution works. To optimize: think about [technique] to reduce from O(x) to O(y)."
161
+
162
+ **If PASS and optimal:**
163
+ "Your solution is optimal. No hints needed."
164
+
165
+ NEVER give the full solution. Guide, don't solve.
166
+
167
+ Keep everything concise. No fluff."""
168
+
169
+
170
+ def evaluate(problem: dict, code: str, lang: str) -> str:
171
+ """Send solution to AI for evaluation. Blocking call."""
172
+ config = load_ai_config()
173
+
174
+ if not config["provider"]:
175
+ return _no_config_msg()
176
+
177
+ prompt = _build_prompt(problem, code, lang)
178
+ provider = config["provider"].lower()
179
+
180
+ if provider == "anthropic":
181
+ return _call_anthropic(config, prompt)
182
+ elif provider in ("openai", "ollama", "groq"):
183
+ return _call_openai_compat(config, prompt)
184
+ else:
185
+ return (
186
+ f"# Unknown provider: `{config['provider']}`\n\n"
187
+ f"Supported: `ollama`, `anthropic`, `openai`, `groq`\n\n"
188
+ f"Any OpenAI-compatible API works — set provider to `openai` "
189
+ f"and configure `base_url`."
190
+ )
191
+
192
+
193
+ def _call_anthropic(config: dict, prompt: str) -> str:
194
+ if not config["api_key"]:
195
+ return _missing_key_msg("anthropic", "GRINDX_AI_KEY")
196
+
197
+ url = f"{config['base_url'].rstrip('/')}/v1/messages"
198
+ body = json.dumps({
199
+ "model": config["model"],
200
+ "max_tokens": 1500,
201
+ "messages": [{"role": "user", "content": prompt}],
202
+ }).encode()
203
+
204
+ req = urllib.request.Request(url, data=body, headers={
205
+ "Content-Type": "application/json",
206
+ "User-Agent": _USER_AGENT,
207
+ "x-api-key": config["api_key"],
208
+ "anthropic-version": "2023-06-01",
209
+ })
210
+
211
+ return _do_request(req, extractor=lambda d: d["content"][0]["text"])
212
+
213
+
214
+ def _call_openai_compat(config: dict, prompt: str) -> str:
215
+ """Works for OpenAI, Groq, Ollama, and any OpenAI-compatible API."""
216
+ provider = config["provider"].lower()
217
+ if provider != "ollama" and not config["api_key"]:
218
+ return _missing_key_msg(provider, "GRINDX_AI_KEY")
219
+
220
+ base = config["base_url"].rstrip("/")
221
+ url = f"{base}/v1/chat/completions"
222
+
223
+ body = json.dumps({
224
+ "model": config["model"],
225
+ "messages": [{"role": "user", "content": prompt}],
226
+ "max_tokens": 1500,
227
+ }).encode()
228
+
229
+ headers = {
230
+ "Content-Type": "application/json",
231
+ "User-Agent": _USER_AGENT,
232
+ }
233
+ if config["api_key"]:
234
+ headers["Authorization"] = f"Bearer {config['api_key']}"
235
+
236
+ req = urllib.request.Request(url, data=body, headers=headers)
237
+
238
+ return _do_request(
239
+ req,
240
+ extractor=lambda d: d["choices"][0]["message"]["content"],
241
+ timeout=120 if provider == "ollama" else 60,
242
+ )
243
+
244
+
245
+ def _do_request(req, extractor, timeout=60) -> str:
246
+ """Execute HTTP request with user-friendly error handling."""
247
+ try:
248
+ with urllib.request.urlopen(req, timeout=timeout) as resp:
249
+ data = json.loads(resp.read())
250
+ return extractor(data)
251
+ except urllib.error.HTTPError as e:
252
+ return _format_http_error(e)
253
+ except urllib.error.URLError as e:
254
+ reason = str(e.reason)
255
+ if "refused" in reason.lower():
256
+ return (
257
+ "# Connection Refused\n\n"
258
+ "Could not connect to the AI provider.\n\n"
259
+ "- **Ollama**: Make sure it's running (`ollama serve`)\n"
260
+ f"- **Cloud API**: Check your `base_url` setting\n"
261
+ f"- URL tried: `{req.full_url}`"
262
+ )
263
+ return f"# Connection Error\n\n{reason}"
264
+ except TimeoutError:
265
+ return (
266
+ "# Request Timed Out\n\n"
267
+ "The AI provider took too long to respond. "
268
+ "Try again or use a faster model."
269
+ )
270
+ except Exception as e:
271
+ return f"# Unexpected Error\n\n`{type(e).__name__}`: {e}"
272
+
273
+
274
+ def _format_http_error(e: urllib.error.HTTPError) -> str:
275
+ """Parse HTTP error into a helpful message."""
276
+ raw = ""
277
+ try:
278
+ raw = e.read().decode()
279
+ except Exception:
280
+ pass
281
+
282
+ # Try to extract message from JSON error body
283
+ detail = ""
284
+ try:
285
+ err_data = json.loads(raw)
286
+ # OpenAI/Groq format
287
+ if "error" in err_data:
288
+ err_obj = err_data["error"]
289
+ if isinstance(err_obj, dict):
290
+ detail = err_obj.get("message", "")
291
+ else:
292
+ detail = str(err_obj)
293
+ # Anthropic format
294
+ elif "message" in err_data:
295
+ detail = err_data["message"]
296
+ except (json.JSONDecodeError, KeyError):
297
+ detail = raw[:500] if raw else ""
298
+
299
+ hints = {
300
+ 401: "Invalid or missing API key. Check your `api_key` setting.",
301
+ 403: "Access denied. Your API key may lack permissions, or the provider is blocking the request.",
302
+ 404: "Endpoint not found. Check your `base_url` and `model` settings.",
303
+ 429: "Rate limited. Wait a moment and try again.",
304
+ 500: "Server error on the provider side. Try again later.",
305
+ 503: "Service unavailable. The provider may be overloaded.",
306
+ }
307
+
308
+ hint = hints.get(e.code, "")
309
+ lines = [f"# API Error ({e.code})"]
310
+ if detail:
311
+ lines.append(f"\n{detail}")
312
+ if hint:
313
+ lines.append(f"\n**Hint:** {hint}")
314
+
315
+ return "\n".join(lines)
316
+
317
+
318
+ def _missing_key_msg(provider: str, env_var: str) -> str:
319
+ return (
320
+ f"# API Key Required\n\n"
321
+ f"Provider `{provider}` needs an API key.\n\n"
322
+ f"Set it via:\n"
323
+ f"- **Config file** `~/.grindx.toml`:\n"
324
+ f" ```\n [ai]\n api_key = \"your-key-here\"\n ```\n\n"
325
+ f"- **Environment variable**:\n"
326
+ f" ```\n export {env_var}=your-key-here\n ```"
327
+ )
328
+
329
+
330
+ def _no_config_msg() -> str:
331
+ return """# No AI Provider Configured
332
+
333
+ Set up AI evaluation with **one environment variable** or a config file.
334
+
335
+ ## Quick Start (env vars)
336
+
337
+ ```
338
+ export GRINDX_AI_PROVIDER=groq
339
+ export GRINDX_AI_KEY=gsk_...
340
+ ```
341
+
342
+ ## Or config file (`~/.grindx.toml`)
343
+
344
+ ```toml
345
+ [ai]
346
+ provider = "groq"
347
+ model = "llama-3.3-70b-versatile"
348
+ api_key = "gsk_..."
349
+ ```
350
+
351
+ ## Supported Providers
352
+
353
+ | Provider | Needs API Key | Default Model |
354
+ |----------|--------------|---------------|
355
+ | `ollama` | No (local) | llama3 |
356
+ | `groq` | Yes | llama-3.3-70b-versatile |
357
+ | `anthropic` | Yes | claude-sonnet-4-20250514 |
358
+ | `openai` | Yes | gpt-4o |
359
+
360
+ Any OpenAI-compatible API works — set provider to `openai` and add `base_url`."""