codegpt-ai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,11 @@
1
+ # Codex Instructions
2
+
3
+ You are working on **CodeGPT** — a local AI assistant hub built in Python. See CLAUDE.md for full project context.
4
+
5
+ Main file is `chat.py` (~3500 lines). Be careful with edits — it has 60+ commands wired together.
6
+
7
+ When making changes:
8
+ - Read the existing code first
9
+ - Don't break the command routing (elif chain in main loop)
10
+ - Keep Rich formatting consistent
11
+ - Test with: `python chat.py`
@@ -0,0 +1,158 @@
1
+ #!/data/data/com.termux/files/usr/bin/bash
2
+ # CodeGPT Installer for Termux
3
+ # Run: curl -sL https://raw.githubusercontent.com/CCguvycu/codegpt/main/install-termux.sh | bash
4
+
5
+ set -e
6
+
7
+ echo ""
8
+ echo " ╔══════════════════════════════════════╗"
9
+ echo " ║ CodeGPT — Termux Installer ║"
10
+ echo " ╚══════════════════════════════════════╝"
11
+ echo ""
12
+
13
+ # Step 1: Install system deps
14
+ echo " [1/6] Installing system packages..."
15
+ pkg update -y -q 2>/dev/null
16
+ pkg install -y python git cmake golang curl 2>/dev/null
17
+
18
+ pip install --quiet requests rich prompt-toolkit 2>/dev/null
19
+
20
+ # Step 2: Install Ollama
21
+ echo " [2/6] Installing Ollama..."
22
+ if command -v ollama &>/dev/null; then
23
+ echo " Ollama already installed."
24
+ else
25
+ echo " Building Ollama from source (this takes a few minutes)..."
26
+
27
+ # Method 1: Try the official install script
28
+ curl -fsSL https://ollama.com/install.sh | sh 2>/dev/null && {
29
+ echo " Ollama installed via official script."
30
+ } || {
31
+ # Method 2: Build from Go source
32
+ echo " Official script failed. Building from Go..."
33
+ pkg install -y golang 2>/dev/null
34
+
35
+ OLLAMA_BUILD="$HOME/.ollama-build"
36
+ rm -rf "$OLLAMA_BUILD"
37
+ git clone --depth 1 https://github.com/ollama/ollama.git "$OLLAMA_BUILD" 2>/dev/null
38
+
39
+ cd "$OLLAMA_BUILD"
40
+ go build -o "$PREFIX/bin/ollama" . 2>/dev/null && {
41
+ echo " Ollama built from source."
42
+ } || {
43
+ # Method 3: Download pre-built ARM binary
44
+ echo " Go build failed. Trying pre-built binary..."
45
+ ARCH=$(uname -m)
46
+ if [ "$ARCH" = "aarch64" ]; then
47
+ curl -sL "https://github.com/ollama/ollama/releases/latest/download/ollama-linux-arm64" -o "$PREFIX/bin/ollama" 2>/dev/null && {
48
+ chmod +x "$PREFIX/bin/ollama"
49
+ echo " Ollama binary downloaded."
50
+ } || {
51
+ echo " WARNING: Could not install Ollama."
52
+ echo " You can connect to your PC's Ollama instead."
53
+ }
54
+ else
55
+ echo " WARNING: Unsupported arch ($ARCH). Connect to PC Ollama."
56
+ fi
57
+ }
58
+
59
+ rm -rf "$OLLAMA_BUILD"
60
+ cd "$HOME"
61
+ }
62
+ fi
63
+
64
+ # Step 3: Start Ollama and pull model
65
+ echo " [3/6] Setting up Ollama..."
66
+ if command -v ollama &>/dev/null; then
67
+ # Start Ollama in background
68
+ if ! curl -s http://localhost:11434/api/tags &>/dev/null 2>&1; then
69
+ echo " Starting Ollama server..."
70
+ ollama serve &>/dev/null &
71
+ sleep 5
72
+ fi
73
+
74
+ # Pull smallest model if none exist
75
+ if ! ollama list 2>/dev/null | grep -q ":"; then
76
+ echo " Pulling llama3.2:1b (smallest model, ~1.3GB)..."
77
+ echo " This may take a few minutes on mobile data."
78
+ ollama pull llama3.2:1b 2>/dev/null && {
79
+ echo " Model ready."
80
+ } || {
81
+ echo " WARNING: Model pull failed. Try manually: ollama pull llama3.2:1b"
82
+ }
83
+ else
84
+ echo " Models already available."
85
+ fi
86
+
87
+ # Auto-start Ollama on Termux boot
88
+ mkdir -p "$HOME/.termux/boot"
89
+ echo '#!/data/data/com.termux/files/usr/bin/bash' > "$HOME/.termux/boot/ollama.sh"
90
+ echo 'ollama serve &>/dev/null &' >> "$HOME/.termux/boot/ollama.sh"
91
+ chmod +x "$HOME/.termux/boot/ollama.sh"
92
+ echo " Ollama auto-start enabled."
93
+ else
94
+ echo " Ollama not available. You can connect to your PC:"
95
+ echo " When ai asks, enter: http://YOUR_PC_IP:11434/api/chat"
96
+ fi
97
+
98
+ # Step 4: Clone or update CodeGPT
99
+ INSTALL_DIR="$HOME/codegpt"
100
+ echo " [4/6] Setting up CodeGPT..."
101
+
102
+ if [ -d "$INSTALL_DIR/.git" ]; then
103
+ cd "$INSTALL_DIR"
104
+ git pull --quiet 2>/dev/null || true
105
+ else
106
+ rm -rf "$INSTALL_DIR"
107
+ git clone https://github.com/CCguvycu/codegpt.git "$INSTALL_DIR" 2>/dev/null
108
+ fi
109
+
110
+ # Step 5: Install ai command
111
+ echo " [5/6] Installing ai command..."
112
+ cd "$INSTALL_DIR"
113
+ pip install -e . --quiet 2>/dev/null || {
114
+ # Fallback: create wrapper script
115
+ cat > "$PREFIX/bin/ai" << 'WRAPPER'
116
+ #!/data/data/com.termux/files/usr/bin/bash
117
+ cd ~/codegpt && python -m ai_cli "$@"
118
+ WRAPPER
119
+ chmod +x "$PREFIX/bin/ai"
120
+ }
121
+
122
+ # Step 6: Create shortcuts
123
+ echo " [6/6] Creating shortcuts..."
124
+ mkdir -p "$HOME/.shortcuts"
125
+ cat > "$HOME/.shortcuts/CodeGPT" << 'SHORTCUT'
126
+ #!/data/data/com.termux/files/usr/bin/bash
127
+ # Start Ollama if not running
128
+ command -v ollama &>/dev/null && ! curl -s http://localhost:11434/api/tags &>/dev/null 2>&1 && ollama serve &>/dev/null &
129
+ sleep 1
130
+ ai
131
+ SHORTCUT
132
+ chmod +x "$HOME/.shortcuts/CodeGPT"
133
+
134
+ # Also add alias to bashrc
135
+ if ! grep -q "alias ai=" "$HOME/.bashrc" 2>/dev/null; then
136
+ echo '# CodeGPT' >> "$HOME/.bashrc"
137
+ echo 'command -v ollama &>/dev/null && ! curl -s http://localhost:11434/api/tags &>/dev/null 2>&1 && ollama serve &>/dev/null &' >> "$HOME/.bashrc"
138
+ echo "" >> "$HOME/.bashrc"
139
+ fi
140
+
141
+ echo ""
142
+ echo " ╔══════════════════════════════════════╗"
143
+ echo " ║ Installation complete! ║"
144
+ echo " ║ ║"
145
+ echo " ║ Type: ai ║"
146
+ echo " ║ ║"
147
+ if command -v ollama &>/dev/null; then
148
+ echo " ║ Ollama: installed ║"
149
+ echo " ║ Model: llama3.2:1b ║"
150
+ else
151
+ echo " ║ Ollama: not available ║"
152
+ echo " ║ Connect to PC when prompted ║"
153
+ fi
154
+ echo " ║ ║"
155
+ echo " ║ Termux Widget shortcut created ║"
156
+ echo " ║ Ollama auto-starts on boot ║"
157
+ echo " ╚══════════════════════════════════════╝"
158
+ echo ""
package/install.ps1 ADDED
@@ -0,0 +1,89 @@
1
+ # CodeGPT Installer — run with: irm https://raw.githubusercontent.com/ArukuX/codegpt/main/install.ps1 | iex
2
+ # Installs ai.exe to %LOCALAPPDATA%\codegpt\ and adds to PATH
3
+
4
+ $ErrorActionPreference = "Stop"
5
+
6
+ $repo = "ArukuX/codegpt"
7
+ $installDir = "$env:LOCALAPPDATA\codegpt"
8
+ $exeName = "ai.exe"
9
+ $exePath = "$installDir\$exeName"
10
+
11
+ Write-Host ""
12
+ Write-Host " ╔══════════════════════════════════════╗" -ForegroundColor Cyan
13
+ Write-Host " ║ CodeGPT Installer ║" -ForegroundColor Cyan
14
+ Write-Host " ╚══════════════════════════════════════╝" -ForegroundColor Cyan
15
+ Write-Host ""
16
+
17
+ # Step 1: Get latest release
18
+ Write-Host " [1/4] Fetching latest release..." -ForegroundColor Yellow
19
+ try {
20
+ $release = Invoke-RestMethod -Uri "https://api.github.com/repos/$repo/releases/latest" -Headers @{ "User-Agent" = "CodeGPT-Installer" }
21
+ $version = $release.tag_name
22
+ $asset = $release.assets | Where-Object { $_.name -eq $exeName } | Select-Object -First 1
23
+
24
+ if (-not $asset) {
25
+ Write-Host " ERROR: No ai.exe found in release $version" -ForegroundColor Red
26
+ exit 1
27
+ }
28
+
29
+ Write-Host " Found version $version" -ForegroundColor Green
30
+ } catch {
31
+ Write-Host " ERROR: Cannot reach GitHub. Check internet." -ForegroundColor Red
32
+ exit 1
33
+ }
34
+
35
+ # Step 2: Download
36
+ Write-Host " [2/4] Downloading ai.exe..." -ForegroundColor Yellow
37
+ New-Item -ItemType Directory -Force -Path $installDir | Out-Null
38
+
39
+ try {
40
+ $downloadUrl = $asset.browser_download_url
41
+ Invoke-WebRequest -Uri $downloadUrl -OutFile $exePath -UseBasicParsing
42
+ Write-Host " Downloaded to $exePath" -ForegroundColor Green
43
+ } catch {
44
+ Write-Host " ERROR: Download failed: $_" -ForegroundColor Red
45
+ exit 1
46
+ }
47
+
48
+ # Step 3: Add to PATH
49
+ Write-Host " [3/4] Adding to PATH..." -ForegroundColor Yellow
50
+ $userPath = [Environment]::GetEnvironmentVariable("Path", "User")
51
+
52
+ if ($userPath -notlike "*$installDir*") {
53
+ [Environment]::SetEnvironmentVariable("Path", "$userPath;$installDir", "User")
54
+ $env:Path = "$env:Path;$installDir"
55
+ Write-Host " Added $installDir to user PATH" -ForegroundColor Green
56
+ } else {
57
+ Write-Host " Already in PATH" -ForegroundColor Green
58
+ }
59
+
60
+ # Step 4: Verify
61
+ Write-Host " [4/4] Verifying..." -ForegroundColor Yellow
62
+ try {
63
+ $ver = & $exePath --version 2>&1
64
+ Write-Host " Installed: $ver" -ForegroundColor Green
65
+ } catch {
66
+ Write-Host " WARNING: Verify failed, but binary is installed" -ForegroundColor Yellow
67
+ }
68
+
69
+ # Check Ollama
70
+ Write-Host ""
71
+ if (Get-Command "ollama" -ErrorAction SilentlyContinue) {
72
+ Write-Host " Ollama: found" -ForegroundColor Green
73
+ } else {
74
+ Write-Host " Ollama: not found — install from https://ollama.com" -ForegroundColor Yellow
75
+ Write-Host " Then run: ollama pull llama3.2" -ForegroundColor Yellow
76
+ }
77
+
78
+ Write-Host ""
79
+ Write-Host " ╔══════════════════════════════════════╗" -ForegroundColor Green
80
+ Write-Host " ║ Installation complete! ║" -ForegroundColor Green
81
+ Write-Host " ║ ║" -ForegroundColor Green
82
+ Write-Host " ║ Open a new terminal and type: ai ║" -ForegroundColor Green
83
+ Write-Host " ║ ║" -ForegroundColor Green
84
+ Write-Host " ║ Commands: ║" -ForegroundColor Green
85
+ Write-Host " ║ ai — start chat ║" -ForegroundColor Green
86
+ Write-Host " ║ ai update — update to latest ║" -ForegroundColor Green
87
+ Write-Host " ║ ai doctor — check dependencies ║" -ForegroundColor Green
88
+ Write-Host " ╚══════════════════════════════════════╝" -ForegroundColor Green
89
+ Write-Host ""
package/mobile.py ADDED
@@ -0,0 +1,422 @@
1
+ """CodeGPT Mobile — Flet app for Android + Desktop. Connects to Ollama."""
2
+
3
+ import json
4
+ import time
5
+ import threading
6
+ from pathlib import Path
7
+
8
+ import flet as ft
9
+ import requests
10
+
11
+ # --- Config ---
12
+
13
+ DEFAULT_SERVER = "http://192.168.1.237:5050" # CodeGPT backend
14
+ DEFAULT_MODEL = "" # Empty = use server default
15
+ CONFIG_DIR = Path.home() / ".codegpt"
16
+ MOBILE_CONFIG = CONFIG_DIR / "mobile_config.json"
17
+
18
+ SYSTEM_PROMPT = """You are an AI modeled after a highly technical, system-focused developer mindset.
19
+ Be direct, concise, and dense with information. No fluff, no filler, no emojis.
20
+ Give conclusions first, then minimal necessary explanation.
21
+ Focus on: AI, coding, automation, cybersecurity, system design.
22
+ Blunt but intelligent. Slightly dark tone is acceptable.
23
+ Keep responses concise for mobile reading."""
24
+
25
+ PERSONAS = {
26
+ "Default": SYSTEM_PROMPT,
27
+ "Hacker": "You are a cybersecurity expert. Technical jargon, CVEs, attack vectors. Defensive security only. Be concise for mobile.",
28
+ "Teacher": "You are a patient programming teacher. Step by step, analogies, examples. Adapt to the student. Concise for mobile.",
29
+ "Roast": "You are a brutally sarcastic code reviewer. Roast bad code then give the fix. Dark humor. Concise for mobile.",
30
+ "Minimal": "Shortest possible answer. One line if possible. Code only, no commentary.",
31
+ }
32
+
33
+
34
+ def load_config():
35
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
36
+ if MOBILE_CONFIG.exists():
37
+ try:
38
+ return json.loads(MOBILE_CONFIG.read_text())
39
+ except Exception:
40
+ pass
41
+ return {"server": DEFAULT_SERVER, "model": DEFAULT_MODEL, "persona": "Default"}
42
+
43
+
44
+ def save_config(config):
45
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
46
+ MOBILE_CONFIG.write_text(json.dumps(config, indent=2))
47
+
48
+
49
+ # --- Main App ---
50
+
51
+ def main(page: ft.Page):
52
+ # Theme
53
+ page.title = "CodeGPT"
54
+ page.theme_mode = ft.ThemeMode.DARK
55
+ page.theme = ft.Theme(
56
+ color_scheme_seed=ft.Colors.CYAN,
57
+ font_family="Roboto",
58
+ )
59
+ page.padding = 0
60
+
61
+ # State
62
+ config = load_config()
63
+ messages = []
64
+ is_streaming = [False]
65
+
66
+ # --- UI Components ---
67
+
68
+ chat_list = ft.ListView(
69
+ expand=True,
70
+ spacing=8,
71
+ padding=ft.padding.symmetric(horizontal=12, vertical=8),
72
+ auto_scroll=True,
73
+ )
74
+
75
+ input_field = ft.TextField(
76
+ hint_text="Message CodeGPT...",
77
+ border_radius=24,
78
+ filled=True,
79
+ expand=True,
80
+ on_submit=lambda e: send_message(e),
81
+ text_size=14,
82
+ content_padding=ft.padding.symmetric(horizontal=16, vertical=10),
83
+ )
84
+
85
+ status_text = ft.Text(
86
+ f"{config['model']} | 0 msgs",
87
+ size=11,
88
+ color=ft.Colors.with_opacity(0.5, ft.Colors.WHITE),
89
+ )
90
+
91
+ def update_status():
92
+ status_text.value = f"{config['model']} | {len(messages)} msgs | {config.get('persona', 'Default')}"
93
+ try:
94
+ page.update()
95
+ except Exception:
96
+ pass
97
+
98
+ # --- Message Bubbles ---
99
+
100
+ def add_user_bubble(text):
101
+ chat_list.controls.append(
102
+ ft.Container(
103
+ content=ft.Column([
104
+ ft.Text("You", size=11, color=ft.Colors.CYAN, weight=ft.FontWeight.BOLD),
105
+ ft.Text(text, size=14, color=ft.Colors.WHITE, selectable=True),
106
+ ], spacing=4),
107
+ padding=ft.padding.all(12),
108
+ border_radius=ft.border_radius.only(
109
+ top_left=16, top_right=16, bottom_left=16, bottom_right=4,
110
+ ),
111
+ bgcolor=ft.Colors.with_opacity(0.15, ft.Colors.CYAN),
112
+ margin=ft.margin.only(left=48, bottom=4),
113
+ )
114
+ )
115
+
116
+ def add_ai_bubble(text, stats=""):
117
+ bubble = ft.Container(
118
+ content=ft.Column([
119
+ ft.Text("AI", size=11, color=ft.Colors.GREEN, weight=ft.FontWeight.BOLD),
120
+ ft.Markdown(
121
+ text,
122
+ selectable=True,
123
+ extension_set=ft.MarkdownExtensionSet.GITHUB_WEB,
124
+ code_theme=ft.MarkdownCodeTheme.MONOKAI,
125
+ on_tap_link=lambda e: page.launch_url(e.data),
126
+ ),
127
+ ft.Text(stats, size=10, color=ft.Colors.with_opacity(0.4, ft.Colors.WHITE)) if stats else ft.Container(),
128
+ ], spacing=4),
129
+ padding=ft.padding.all(12),
130
+ border_radius=ft.border_radius.only(
131
+ top_left=16, top_right=16, bottom_left=4, bottom_right=16,
132
+ ),
133
+ bgcolor=ft.Colors.with_opacity(0.1, ft.Colors.GREEN),
134
+ margin=ft.margin.only(right=48, bottom=4),
135
+ key="ai_latest",
136
+ )
137
+ chat_list.controls.append(bubble)
138
+
139
+ def add_thinking_bubble():
140
+ chat_list.controls.append(
141
+ ft.Container(
142
+ content=ft.Row([
143
+ ft.ProgressRing(width=16, height=16, stroke_width=2, color=ft.Colors.GREEN),
144
+ ft.Text("Thinking...", size=13, color=ft.Colors.with_opacity(0.5, ft.Colors.WHITE), italic=True),
145
+ ], spacing=8),
146
+ padding=ft.padding.all(12),
147
+ border_radius=16,
148
+ bgcolor=ft.Colors.with_opacity(0.05, ft.Colors.GREEN),
149
+ margin=ft.margin.only(right=48, bottom=4),
150
+ key="thinking",
151
+ )
152
+ )
153
+
154
+ def remove_thinking():
155
+ chat_list.controls[:] = [c for c in chat_list.controls if getattr(c, 'key', None) != "thinking"]
156
+
157
+ # --- Send Message ---
158
+
159
+ def send_message(e):
160
+ text = input_field.value.strip()
161
+ if not text or is_streaming[0]:
162
+ return
163
+
164
+ input_field.value = ""
165
+ add_user_bubble(text)
166
+ add_thinking_bubble()
167
+ page.update()
168
+
169
+ messages.append({"role": "user", "content": text})
170
+ is_streaming[0] = True
171
+
172
+ def do_request():
173
+ server = config.get("server", DEFAULT_SERVER)
174
+ model = config.get("model", DEFAULT_MODEL)
175
+ persona = config.get("persona", "Default")
176
+ system = PERSONAS.get(persona, SYSTEM_PROMPT)
177
+
178
+ ollama_messages = [{"role": "system", "content": system}]
179
+ ollama_messages.extend(messages)
180
+
181
+ try:
182
+ response = requests.post(
183
+ f"{server}/chat",
184
+ json={
185
+ "messages": messages,
186
+ "model": model,
187
+ "persona": persona,
188
+ },
189
+ stream=True,
190
+ timeout=120,
191
+ )
192
+ response.raise_for_status()
193
+
194
+ full = []
195
+ stats = ""
196
+ last_update = 0
197
+
198
+ for line in response.iter_lines():
199
+ if not line:
200
+ continue
201
+ try:
202
+ chunk = json.loads(line)
203
+ except json.JSONDecodeError:
204
+ continue
205
+
206
+ content = chunk.get("content", "")
207
+ if content:
208
+ full.append(content)
209
+
210
+ now = time.time()
211
+ if now - last_update >= 0.5:
212
+ remove_thinking()
213
+ chat_list.controls[:] = [
214
+ c for c in chat_list.controls
215
+ if getattr(c, 'key', None) != "ai_latest"
216
+ ]
217
+ add_ai_bubble("".join(full), "streaming...")
218
+ try:
219
+ page.update()
220
+ except Exception:
221
+ pass
222
+ last_update = now
223
+
224
+ if chunk.get("done"):
225
+ stats = chunk.get("stats", "")
226
+ if not stats:
227
+ tokens = chunk.get("tokens", {})
228
+ provider = chunk.get("provider", "")
229
+ out_tok = tokens.get("output", 0)
230
+ stats = f"{out_tok} tok | {provider}"
231
+
232
+ final_text = "".join(full)
233
+ messages.append({"role": "assistant", "content": final_text})
234
+
235
+ # Final render
236
+ remove_thinking()
237
+ chat_list.controls[:] = [
238
+ c for c in chat_list.controls
239
+ if getattr(c, 'key', None) != "ai_latest"
240
+ ]
241
+ add_ai_bubble(final_text, stats)
242
+
243
+ except requests.ConnectionError:
244
+ remove_thinking()
245
+ add_ai_bubble("Cannot connect to Ollama.\nCheck server IP in settings.", "error")
246
+ if messages and messages[-1]["role"] == "user":
247
+ messages.pop()
248
+ except requests.Timeout:
249
+ remove_thinking()
250
+ add_ai_bubble("Request timed out.", "error")
251
+ if messages and messages[-1]["role"] == "user":
252
+ messages.pop()
253
+ except Exception as ex:
254
+ remove_thinking()
255
+ add_ai_bubble(f"Error: {ex}", "error")
256
+ if messages and messages[-1]["role"] == "user":
257
+ messages.pop()
258
+ finally:
259
+ is_streaming[0] = False
260
+ update_status()
261
+ try:
262
+ page.update()
263
+ except Exception:
264
+ pass
265
+
266
+ threading.Thread(target=do_request, daemon=True).start()
267
+
268
+ # --- Settings Dialog ---
269
+
270
+ def open_settings(e):
271
+ server_field = ft.TextField(
272
+ value=config.get("server", DEFAULT_SERVER),
273
+ label="Ollama Server URL",
274
+ border_radius=12,
275
+ text_size=14,
276
+ )
277
+ model_field = ft.TextField(
278
+ value=config.get("model", DEFAULT_MODEL),
279
+ label="Model",
280
+ border_radius=12,
281
+ text_size=14,
282
+ )
283
+ persona_dropdown = ft.Dropdown(
284
+ value=config.get("persona", "Default"),
285
+ label="Persona",
286
+ options=[ft.dropdown.Option(p) for p in PERSONAS],
287
+ border_radius=12,
288
+ text_size=14,
289
+ )
290
+
291
+ def save_settings(e):
292
+ config["server"] = server_field.value.strip().rstrip("/")
293
+ config["model"] = model_field.value.strip()
294
+ config["persona"] = persona_dropdown.value
295
+ save_config(config)
296
+ update_status()
297
+ dlg.open = False
298
+ page.update()
299
+ page.open(ft.SnackBar(ft.Text("Settings saved"), duration=1500))
300
+
301
+ def test_connection(e):
302
+ server = server_field.value.strip().rstrip("/")
303
+ try:
304
+ resp = requests.get(f"{server}/health", timeout=5)
305
+ data = resp.json()
306
+ provider = data.get("provider", "?")
307
+ model = data.get("model", "?")
308
+ page.open(ft.SnackBar(
309
+ ft.Text(f"Connected. Provider: {provider}, Model: {model}"),
310
+ duration=3000,
311
+ ))
312
+ except Exception as ex:
313
+ page.open(ft.SnackBar(ft.Text(f"Failed: {ex}"), duration=3000))
314
+
315
+ dlg = ft.AlertDialog(
316
+ title=ft.Text("Settings"),
317
+ content=ft.Container(
318
+ content=ft.Column([
319
+ server_field,
320
+ ft.ElevatedButton("Test Connection", on_click=test_connection, icon=ft.Icons.WIFI),
321
+ model_field,
322
+ persona_dropdown,
323
+ ], spacing=16, tight=True),
324
+ width=320,
325
+ padding=ft.padding.only(top=8),
326
+ ),
327
+ actions=[
328
+ ft.TextButton("Cancel", on_click=lambda e: setattr(dlg, 'open', False) or page.update()),
329
+ ft.ElevatedButton("Save", on_click=save_settings),
330
+ ],
331
+ )
332
+ page.open(dlg)
333
+
334
+ # --- New Chat ---
335
+
336
+ def new_chat(e):
337
+ messages.clear()
338
+ chat_list.controls.clear()
339
+
340
+ # Welcome
341
+ chat_list.controls.append(
342
+ ft.Container(
343
+ content=ft.Column([
344
+ ft.Text("CodeGPT", size=28, weight=ft.FontWeight.BOLD, color=ft.Colors.CYAN,
345
+ text_align=ft.TextAlign.CENTER),
346
+ ft.Text("Local AI assistant on your phone.", size=14,
347
+ color=ft.Colors.with_opacity(0.5, ft.Colors.WHITE),
348
+ text_align=ft.TextAlign.CENTER),
349
+ ft.Divider(height=20, color=ft.Colors.TRANSPARENT),
350
+ *[
351
+ ft.OutlinedButton(
352
+ text=s,
353
+ on_click=lambda e, txt=s: (setattr(input_field, 'value', txt), send_message(e)),
354
+ style=ft.ButtonStyle(shape=ft.RoundedRectangleBorder(radius=12)),
355
+ )
356
+ for s in [
357
+ "Explain TCP/IP",
358
+ "Python CPU monitor script",
359
+ "OWASP top 10",
360
+ "Design a REST API",
361
+ ]
362
+ ],
363
+ ], horizontal_alignment=ft.CrossAxisAlignment.CENTER, spacing=8),
364
+ padding=ft.padding.all(32),
365
+ alignment=ft.alignment.center,
366
+ )
367
+ )
368
+ update_status()
369
+ page.update()
370
+
371
+ # --- App Bar ---
372
+
373
+ page.appbar = ft.AppBar(
374
+ leading=ft.Icon(ft.Icons.TERMINAL, color=ft.Colors.CYAN),
375
+ title=ft.Text("CodeGPT", weight=ft.FontWeight.BOLD),
376
+ center_title=False,
377
+ bgcolor=ft.Colors.with_opacity(0.9, ft.Colors.BLACK),
378
+ actions=[
379
+ ft.IconButton(ft.Icons.ADD_COMMENT, on_click=new_chat, tooltip="New Chat"),
380
+ ft.IconButton(ft.Icons.SETTINGS, on_click=open_settings, tooltip="Settings"),
381
+ ],
382
+ )
383
+
384
+ # --- Bottom Input Bar ---
385
+
386
+ send_btn = ft.IconButton(
387
+ ft.Icons.SEND_ROUNDED,
388
+ icon_color=ft.Colors.CYAN,
389
+ on_click=send_message,
390
+ tooltip="Send",
391
+ )
392
+
393
+ input_bar = ft.Container(
394
+ content=ft.Row([input_field, send_btn], spacing=8),
395
+ padding=ft.padding.symmetric(horizontal=12, vertical=8),
396
+ bgcolor=ft.Colors.with_opacity(0.95, ft.Colors.BLACK),
397
+ border=ft.border.only(top=ft.BorderSide(1, ft.Colors.with_opacity(0.1, ft.Colors.WHITE))),
398
+ )
399
+
400
+ # --- Status Bar ---
401
+
402
+ status_bar = ft.Container(
403
+ content=status_text,
404
+ padding=ft.padding.symmetric(horizontal=16, vertical=4),
405
+ bgcolor=ft.Colors.with_opacity(0.95, ft.Colors.BLACK),
406
+ )
407
+
408
+ # --- Layout ---
409
+
410
+ page.add(
411
+ ft.Column([
412
+ ft.Container(content=chat_list, expand=True),
413
+ status_bar,
414
+ input_bar,
415
+ ], expand=True, spacing=0),
416
+ )
417
+
418
+ # Show welcome
419
+ new_chat(None)
420
+
421
+
422
+ ft.app(target=main)