codegpt-ai 2.17.0 → 2.26.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/chat.py +50 -5
  2. package/desktop.py +839 -0
  3. package/package.json +3 -1
  4. package/tui.py +436 -0
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codegpt-ai",
3
- "version": "2.17.0",
3
+ "version": "2.26.0",
4
4
  "description": "Local AI Assistant Hub — 123 commands, 26 tools, 8 agents, multi-AI, security. No cloud needed.",
5
5
  "author": "ArukuX",
6
6
  "license": "MIT",
@@ -37,6 +37,8 @@
37
37
  "files": [
38
38
  "bin/",
39
39
  "chat.py",
40
+ "desktop.py",
41
+ "tui.py",
40
42
  "ai_cli/",
41
43
  "CLAUDE.md",
42
44
  "README.md"
package/tui.py ADDED
@@ -0,0 +1,436 @@
1
+ """CodeGPT TUI — Terminal UI that looks like the desktop app. Works on Termux."""
2
+ import json
3
+ import os
4
+ import sys
5
+ import time
6
+ import requests
7
+ from pathlib import Path
8
+ from datetime import datetime
9
+
10
+ from rich.console import Console
11
+ from rich.panel import Panel
12
+ from rich.text import Text
13
+ from rich.table import Table
14
+ from rich.columns import Columns
15
+ from rich.markdown import Markdown
16
+ from rich.rule import Rule
17
+ from rich.align import Align
18
+ from prompt_toolkit import prompt
19
+ from prompt_toolkit.history import InMemoryHistory
20
+ from prompt_toolkit.styles import Style as PtStyle
21
+
22
+ # Config
23
+ OLLAMA_URL = os.environ.get("OLLAMA_URL", "http://localhost:11434/api/chat")
24
+ MODEL = "llama3.2"
25
+ SYSTEM = "You are a helpful AI assistant. Be concise and technical."
26
+
27
+ PERSONAS = {
28
+ "default": "You are a helpful AI assistant. Be concise and technical.",
29
+ "hacker": "You are a cybersecurity expert. Technical jargon, dark humor. Defensive only.",
30
+ "teacher": "You are a patient teacher. Step by step, analogies, examples.",
31
+ "roast": "You are a sarcastic code reviewer. Roast bad code but give the fix.",
32
+ "architect": "You are a system architect. Scalability, ASCII diagrams, trade-offs.",
33
+ "minimal": "Shortest answer possible. Code only. No commentary.",
34
+ }
35
+
36
+ def _try(url):
37
+ try:
38
+ r = requests.get(url.replace("/api/chat", "/api/tags"), timeout=2)
39
+ return r.status_code == 200
40
+ except:
41
+ return False
42
+
43
+ # Auto-connect — try saved URL, localhost, common LAN IPs
44
+ _connected = False
45
+
46
+ # 1. Saved URL
47
+ saved_url = Path.home() / ".codegpt" / "ollama_url"
48
+ if saved_url.exists():
49
+ url = saved_url.read_text().strip()
50
+ if url:
51
+ if "/api/chat" not in url:
52
+ url = url.rstrip("/") + "/api/chat"
53
+ if _try(url):
54
+ OLLAMA_URL = url
55
+ _connected = True
56
+
57
+ # 2. Localhost
58
+ if not _connected and _try(OLLAMA_URL):
59
+ _connected = True
60
+
61
+ # 3. Common LAN IPs — scan for Ollama on the network
62
+ if not _connected:
63
+ for ip in ["192.168.1.237", "192.168.1.1", "192.168.0.1", "10.0.2.2",
64
+ "192.168.1.100", "192.168.1.50", "192.168.0.100", "192.168.0.50"]:
65
+ test = f"http://{ip}:11434/api/chat"
66
+ if _try(test):
67
+ OLLAMA_URL = test
68
+ # Save for next time
69
+ Path.home().joinpath(".codegpt").mkdir(parents=True, exist_ok=True)
70
+ Path.home().joinpath(".codegpt", "ollama_url").write_text(OLLAMA_URL)
71
+ _connected = True
72
+ break
73
+
74
+ # 4. Quick subnet scan (192.168.1.x)
75
+ if not _connected:
76
+ import socket
77
+ try:
78
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
79
+ s.connect(("8.8.8.8", 80))
80
+ my_ip = s.getsockname()[0]
81
+ s.close()
82
+ subnet = ".".join(my_ip.split(".")[:3])
83
+ for last in range(1, 20): # Scan .1 to .19
84
+ test = f"http://{subnet}.{last}:11434/api/chat"
85
+ if _try(test):
86
+ OLLAMA_URL = test
87
+ Path.home().joinpath(".codegpt").mkdir(parents=True, exist_ok=True)
88
+ Path.home().joinpath(".codegpt", "ollama_url").write_text(OLLAMA_URL)
89
+ _connected = True
90
+ break
91
+ except:
92
+ pass
93
+
94
+ # Load profile
95
+ profile_file = Path.home() / ".codegpt" / "profiles" / "cli_profile.json"
96
+ USERNAME = "User"
97
+ if profile_file.exists():
98
+ try:
99
+ p = json.loads(profile_file.read_text())
100
+ USERNAME = p.get("name", "User")
101
+ MODEL = p.get("model", MODEL)
102
+ except:
103
+ pass
104
+
105
+ OLLAMA_BASE = OLLAMA_URL.replace("/api/chat", "")
106
+
107
+ console = Console()
108
+ history = InMemoryHistory()
109
+ style = PtStyle.from_dict({
110
+ "prompt": "ansicyan bold",
111
+ "bottom-toolbar": "bg:#1a1a2e #888888",
112
+ })
113
+
114
+ messages = []
115
+ total_tokens = 0
116
+ persona = "default"
117
+ system = SYSTEM
118
+ show_sidebar = True
119
+
120
+
121
+ def try_connect():
122
+ try:
123
+ r = requests.get(OLLAMA_BASE + "/api/tags", timeout=3)
124
+ return [m["name"] for m in r.json().get("models", [])]
125
+ except:
126
+ return []
127
+
128
+
129
+ def get_models():
130
+ try:
131
+ r = requests.get(OLLAMA_BASE + "/api/tags", timeout=3)
132
+ return [m["name"] for m in r.json().get("models", [])]
133
+ except:
134
+ return []
135
+
136
+
137
+ def render_header():
138
+ """Render the top bar."""
139
+ w = min(console.width, 100)
140
+ models = try_connect()
141
+ status = "[green]online[/]" if models else "[red]offline[/]"
142
+
143
+ console.print(Text.from_markup(
144
+ f" [bold red]Code[/][bold bright_blue]GPT[/] [dim]TUI v2.0[/]"
145
+ f" {status}"
146
+ f" [dim]·[/] [bright_blue]{MODEL}[/]"
147
+ f" [dim]·[/] [dim]{persona}[/]"
148
+ f" [dim]·[/] [dim]{total_tokens} tok[/]"
149
+ ))
150
+ console.print(Rule(style="dim", characters="─"))
151
+
152
+
153
+ def render_sidebar():
154
+ """Build sidebar content."""
155
+ lines = []
156
+ lines.append(f"[bold]{USERNAME}[/]")
157
+ lines.append(f"[dim]{'─' * 20}[/]")
158
+ lines.append("")
159
+ lines.append(f"[dim]model[/] [bright_blue]{MODEL}[/]")
160
+ lines.append(f"[dim]persona[/] [green]{persona}[/]")
161
+ lines.append(f"[dim]msgs[/] {len(messages)}")
162
+ lines.append(f"[dim]tokens[/] {total_tokens}")
163
+ lines.append("")
164
+ lines.append("[bold]Commands[/]")
165
+ lines.append("[dim]/model[/] switch")
166
+ lines.append("[dim]/persona[/] change")
167
+ lines.append("[dim]/new[/] clear")
168
+ lines.append("[dim]/think[/] reason")
169
+ lines.append("[dim]/help[/] all cmds")
170
+ lines.append("[dim]/sidebar[/] toggle")
171
+ lines.append("[dim]/quit[/] exit")
172
+ return "\n".join(lines)
173
+
174
+
175
+ def print_msg(role, content, stats=""):
176
+ """Print a message."""
177
+ if role == "user":
178
+ console.print(Text(f" {content}", style="bold white"))
179
+ else:
180
+ console.print(Rule(style="green", characters="─"))
181
+ console.print(Markdown(content), width=min(console.width - 4, 90))
182
+ if stats:
183
+ console.print(Text(f" {stats}", style="dim"))
184
+ console.print()
185
+
186
+
187
+ def chat(text):
188
+ """Send a message and get a response."""
189
+ global total_tokens
190
+ messages.append({"role": "user", "content": text})
191
+
192
+ console.print(Text(" Thinking...", style="dim"))
193
+
194
+ try:
195
+ start = time.time()
196
+ resp = requests.post(OLLAMA_URL, json={
197
+ "model": MODEL,
198
+ "messages": [{"role": "system", "content": system}] + messages,
199
+ "stream": False,
200
+ }, timeout=120)
201
+ data = resp.json()
202
+ content = data.get("message", {}).get("content", "No response.")
203
+ elapsed = round(time.time() - start, 1)
204
+ tokens = data.get("eval_count", 0)
205
+ total_tokens += tokens
206
+ messages.append({"role": "assistant", "content": content})
207
+
208
+ # Clear "Thinking..."
209
+ console.print("\033[A\033[K", end="")
210
+ print_msg("ai", content, f"{tokens} tok · {elapsed}s")
211
+
212
+ except Exception as e:
213
+ console.print("\033[A\033[K", end="")
214
+ print_msg("ai", f"Error: {e}")
215
+
216
+
217
+ def handle_command(text):
218
+ """Handle slash commands. Returns True if handled."""
219
+ global MODEL, persona, system, show_sidebar, messages, total_tokens
220
+
221
+ cmd = text.split()[0].lower()
222
+ args = text[len(cmd):].strip()
223
+
224
+ if cmd == "/quit" or cmd == "/q":
225
+ return "quit"
226
+
227
+ elif cmd == "/new" or cmd == "/n":
228
+ messages = []
229
+ os.system("clear")
230
+ render_header()
231
+ console.print(Text(" New conversation.", style="dim"))
232
+ console.print()
233
+
234
+ elif cmd == "/model" or cmd == "/m":
235
+ if args:
236
+ MODEL = args
237
+ console.print(Text(f" Model: {MODEL}", style="green"))
238
+ else:
239
+ models = get_models()
240
+ if models:
241
+ for m in models:
242
+ mark = " *" if m == MODEL else ""
243
+ console.print(Text(f" {m}{mark}", style="bright_blue" if mark else "dim"))
244
+ else:
245
+ console.print(Text(" Ollama offline.", style="red"))
246
+ console.print()
247
+
248
+ elif cmd == "/persona" or cmd == "/p":
249
+ if args and args in PERSONAS:
250
+ persona = args
251
+ system = PERSONAS[args]
252
+ console.print(Text(f" Persona: {persona}", style="green"))
253
+ else:
254
+ for name in PERSONAS:
255
+ mark = " *" if name == persona else ""
256
+ console.print(Text(f" {name}{mark}", style="green" if mark else "dim"))
257
+ console.print()
258
+
259
+ elif cmd == "/think" or cmd == "/t":
260
+ if "step-by-step" in system:
261
+ system = PERSONAS.get(persona, SYSTEM)
262
+ console.print(Text(" Think mode: OFF", style="dim"))
263
+ else:
264
+ system += "\n\nThink step-by-step. Show your reasoning."
265
+ console.print(Text(" Think mode: ON", style="green"))
266
+ console.print()
267
+
268
+ elif cmd == "/sidebar":
269
+ show_sidebar = not show_sidebar
270
+ console.print(Text(f" Sidebar: {'on' if show_sidebar else 'off'}", style="dim"))
271
+ console.print()
272
+
273
+ elif cmd == "/tokens":
274
+ console.print(Text(f" Session: {total_tokens} tokens, {len(messages)} messages", style="dim"))
275
+ console.print()
276
+
277
+ elif cmd == "/clear":
278
+ os.system("clear")
279
+ render_header()
280
+
281
+ elif cmd == "/history":
282
+ if messages:
283
+ for m in messages[-10:]:
284
+ role = "You" if m["role"] == "user" else "AI"
285
+ console.print(Text(f" [{role}] {m['content'][:80]}", style="dim"))
286
+ else:
287
+ console.print(Text(" No messages.", style="dim"))
288
+ console.print()
289
+
290
+ elif cmd == "/connect":
291
+ global OLLAMA_URL, OLLAMA_BASE
292
+ if args:
293
+ url = args if args.startswith("http") else "http://" + args
294
+ if ":" not in url.split("//")[1]:
295
+ url += ":11434"
296
+ OLLAMA_URL = url.rstrip("/") + "/api/chat"
297
+ OLLAMA_BASE = OLLAMA_URL.replace("/api/chat", "")
298
+ models = try_connect()
299
+ if models:
300
+ console.print(Text(f" Connected: {OLLAMA_BASE} ({len(models)} models)", style="green"))
301
+ Path.home().joinpath(".codegpt", "ollama_url").write_text(OLLAMA_URL)
302
+ else:
303
+ console.print(Text(f" Cannot reach {url}", style="red"))
304
+ else:
305
+ console.print(Text(" Usage: /connect 192.168.1.100", style="dim"))
306
+ console.print()
307
+
308
+ elif cmd == "/server":
309
+ models = try_connect()
310
+ status = "online" if models else "offline"
311
+ console.print(Text(f" Server: {OLLAMA_BASE} ({status})", style="green" if models else "red"))
312
+ console.print()
313
+
314
+ elif cmd == "/weather":
315
+ city = args or "London"
316
+ try:
317
+ r = requests.get(f"https://wttr.in/{city}?format=j1", timeout=10)
318
+ d = r.json()["current_condition"][0]
319
+ console.print(Text(f" {city}: {d['weatherDesc'][0]['value']}, {d['temp_C']}C, {d['humidity']}% humidity", style="white"))
320
+ except:
321
+ console.print(Text(f" Cannot get weather for {city}", style="red"))
322
+ console.print()
323
+
324
+ elif cmd == "/agent":
325
+ parts = args.split(maxsplit=1)
326
+ if len(parts) >= 2:
327
+ agents = {
328
+ "coder": "You are an expert programmer. Write clean, working code.",
329
+ "debugger": "You are a debugging expert. Find and fix bugs.",
330
+ "reviewer": "You are a code reviewer. Check for bugs, security, performance.",
331
+ "architect": "You are a system architect. Design with ASCII diagrams.",
332
+ "pentester": "You are an ethical pentester. Find vulnerabilities.",
333
+ "explainer": "You are a teacher. Explain simply with analogies.",
334
+ "optimizer": "You are a performance engineer. Optimize code.",
335
+ "researcher": "You are a research analyst. Deep-dive into topics.",
336
+ }
337
+ name, task = parts
338
+ if name in agents:
339
+ console.print(Text(f" Running {name} agent...", style="dim"))
340
+ try:
341
+ resp = requests.post(OLLAMA_URL, json={
342
+ "model": MODEL,
343
+ "messages": [
344
+ {"role": "system", "content": agents[name]},
345
+ {"role": "user", "content": task},
346
+ ], "stream": False,
347
+ }, timeout=90)
348
+ content = resp.json().get("message", {}).get("content", "")
349
+ print_msg("ai", f"**{name}:** {content}")
350
+ except Exception as e:
351
+ console.print(Text(f" Error: {e}", style="red"))
352
+ else:
353
+ console.print(Text(f" Agents: {', '.join(agents.keys())}", style="dim"))
354
+ else:
355
+ console.print(Text(" Usage: /agent coder build a flask API", style="dim"))
356
+ console.print()
357
+
358
+ elif cmd == "/help" or cmd == "/h":
359
+ cmds = {
360
+ "/new": "New chat", "/model": "Switch model", "/persona": "Switch persona",
361
+ "/think": "Toggle reasoning", "/tokens": "Token count", "/clear": "Clear screen",
362
+ "/sidebar": "Toggle sidebar", "/history": "Show history", "/connect": "Remote Ollama",
363
+ "/server": "Server info", "/weather": "Get weather", "/agent": "Run agent",
364
+ "/help": "This list", "/quit": "Exit",
365
+ }
366
+ for c, d in cmds.items():
367
+ console.print(Text.from_markup(f" [bright_blue]{c:<12}[/] [dim]{d}[/]"))
368
+ console.print()
369
+
370
+ else:
371
+ return None # Not a command
372
+
373
+ return True
374
+
375
+
376
+ def toolbar():
377
+ return [("class:bottom-toolbar",
378
+ f" {len(messages)} msgs · {total_tokens} tok · {MODEL} · {persona} · type / for commands ")]
379
+
380
+
381
+ def main():
382
+ os.system("clear")
383
+
384
+ # Welcome
385
+ console.print()
386
+ console.print(Text.from_markup(
387
+ "[bold red] ╔══════════════════════════════════╗[/]\n"
388
+ "[bold red] ║[/] [bold red]Code[/][bold bright_blue]GPT[/] [dim]TUI v2.0[/] [bold red]║[/]\n"
389
+ "[bold red] ║[/] [dim]terminal ui · works everywhere[/] [bold red]║[/]\n"
390
+ "[bold red] ╚══════════════════════════════════╝[/]"
391
+ ))
392
+ console.print()
393
+
394
+ models = try_connect()
395
+ if models:
396
+ console.print(Text.from_markup(f" [green]connected[/] · {len(models)} models · [bright_blue]{MODEL}[/]"))
397
+ else:
398
+ console.print(Text.from_markup(" [yellow]offline[/] · use [bright_blue]/connect IP[/] to link"))
399
+
400
+ hour = datetime.now().hour
401
+ greeting = "Good morning" if hour < 12 else "Good afternoon" if hour < 18 else "Good evening"
402
+ console.print(Text(f"\n {greeting}, {USERNAME}.", style="bold"))
403
+ console.print(Text(" Type a message to chat. Type / for commands.\n", style="dim"))
404
+
405
+ while True:
406
+ try:
407
+ user_input = prompt(
408
+ [("class:prompt", " ❯ ")],
409
+ style=style,
410
+ history=history,
411
+ bottom_toolbar=toolbar,
412
+ ).strip()
413
+ except (KeyboardInterrupt, EOFError):
414
+ break
415
+
416
+ if not user_input:
417
+ continue
418
+
419
+ if user_input.startswith("/"):
420
+ result = handle_command(user_input)
421
+ if result == "quit":
422
+ break
423
+ elif result is None:
424
+ # Unknown command — treat as chat
425
+ print_msg("user", user_input)
426
+ chat(user_input)
427
+ continue
428
+
429
+ print_msg("user", user_input)
430
+ chat(user_input)
431
+
432
+ console.print(Text(f"\n {total_tokens} tokens · {len(messages)} messages\n", style="dim"))
433
+
434
+
435
+ if __name__ == "__main__":
436
+ main()