codegpt-ai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/app.py ADDED
@@ -0,0 +1,958 @@
1
+ """CodeGPT — Terminal UI with sidebar, powered by Ollama."""
2
+
3
+ import json
4
+ import os
5
+ import re
6
+ import subprocess
7
+ import shutil
8
+ import threading
9
+ import time
10
+ from pathlib import Path
11
+ from datetime import datetime
12
+
13
+ import requests
14
+ from textual import on, work
15
+ from textual.app import App, ComposeResult
16
+ from textual.binding import Binding
17
+ from textual.containers import Horizontal, Vertical, VerticalScroll, Center, Grid
18
+ from textual.css.query import NoMatches
19
+ from textual.reactive import reactive
20
+ from textual.screen import ModalScreen
21
+ from textual.widgets import (
22
+ Button, Footer, Header, Input, Label, ListItem, ListView,
23
+ Markdown, Static, RichLog,
24
+ )
25
+ from rich.text import Text
26
+
27
+ # --- Config ---
28
+
29
+ OLLAMA_URL = "http://localhost:11434/api/chat"
30
+ DEFAULT_MODEL = "llama3.2"
31
+ CHATS_DIR = Path.home() / ".codegpt" / "conversations"
32
+ CHATS_DIR.mkdir(parents=True, exist_ok=True)
33
+
34
+ SYSTEM_PROMPT = """You are an AI modeled after a highly technical, system-focused developer mindset.
35
+
36
+ Communication:
37
+ - Be direct, concise, and dense with information
38
+ - No fluff, no filler, no emojis
39
+ - No motivational or overly friendly tone
40
+ - Give conclusions first, then minimal necessary explanation
41
+
42
+ Thinking:
43
+ - Break problems into systems and components
44
+ - Optimize for efficiency, scalability, and control
45
+ - Focus on practical, real-world solutions
46
+ - Avoid over-explaining basic concepts
47
+
48
+ Behavior:
49
+ - Do not sugar-coat
50
+ - Do not moralize
51
+ - Do not give generic advice
52
+ - If uncertain, say so briefly
53
+ - If incorrect, correct immediately
54
+
55
+ Focus areas:
56
+ - AI, coding, automation, cybersecurity (defensive), system design
57
+
58
+ Style:
59
+ - Structured when useful (lists, steps, architecture)
60
+ - Blunt but intelligent
61
+ - Slightly dark, high-intensity tone is acceptable
62
+
63
+ Goal:
64
+ Deliver high-value, efficient, technically sharp responses with zero wasted words."""
65
+
66
+ SUGGESTIONS = [
67
+ "Explain how TCP/IP works under the hood",
68
+ "Write a Python script to monitor CPU usage",
69
+ "What are the OWASP top 10 vulnerabilities?",
70
+ "Design a REST API for a todo app",
71
+ ]
72
+
73
+ TIME_PATTERN = re.compile(r"^(\d+)\s*(s|sec|m|min|h|hr|hour)s?\b", re.IGNORECASE)
74
+ TIME_MULTIPLIERS = {"s": 1, "sec": 1, "m": 60, "min": 60, "h": 3600, "hr": 3600, "hour": 3600}
75
+
76
+
77
+ # --- Helpers ---
78
+
79
+ def get_saved_chats():
80
+ """Return list of saved chat files, newest first."""
81
+ if not CHATS_DIR.exists():
82
+ return []
83
+ return sorted(CHATS_DIR.glob("*.json"), reverse=True)
84
+
85
+
86
+ def chat_display_name(path):
87
+ """Extract display name from chat filename."""
88
+ stem = path.stem
89
+ if len(stem) > 14:
90
+ return stem[14:].replace("_", " ").title()
91
+ return stem.replace("_", " ")
92
+
93
+
94
+ def chat_date(path):
95
+ """Extract date string from chat filename."""
96
+ stem = path.stem
97
+ if len(stem) >= 13:
98
+ return stem[:8]
99
+ return ""
100
+
101
+
102
+ def save_chat(messages, model):
103
+ """Save conversation to disk."""
104
+ CHATS_DIR.mkdir(parents=True, exist_ok=True)
105
+ first_msg = next((m["content"] for m in messages if m["role"] == "user"), "untitled")
106
+ name = re.sub(r'[^\w\s-]', '', first_msg[:40]).strip().replace(' ', '_').lower()
107
+ ts = datetime.now().strftime("%Y%m%d_%H%M")
108
+ filename = f"{ts}_{name}.json"
109
+ data = {"model": model, "messages": messages, "saved_at": datetime.now().isoformat()}
110
+ (CHATS_DIR / filename).write_text(json.dumps(data, indent=2))
111
+ return filename
112
+
113
+
114
+ def load_chat(path):
115
+ """Load conversation from disk."""
116
+ data = json.loads(path.read_text())
117
+ return data.get("messages", []), data.get("model", DEFAULT_MODEL)
118
+
119
+
120
+ def ensure_ollama():
121
+ """Start Ollama if not running. Returns list of available models."""
122
+ try:
123
+ resp = requests.get("http://localhost:11434/api/tags", timeout=3)
124
+ return [m["name"] for m in resp.json().get("models", [])]
125
+ except (requests.ConnectionError, requests.Timeout):
126
+ subprocess.Popen(
127
+ ["ollama", "serve"],
128
+ stdout=subprocess.DEVNULL,
129
+ stderr=subprocess.DEVNULL,
130
+ creationflags=subprocess.DETACHED_PROCESS if os.name == "nt" else 0,
131
+ )
132
+ for _ in range(15):
133
+ time.sleep(1)
134
+ try:
135
+ resp = requests.get("http://localhost:11434/api/tags", timeout=2)
136
+ return [m["name"] for m in resp.json().get("models", [])]
137
+ except (requests.ConnectionError, requests.Timeout):
138
+ continue
139
+ return []
140
+
141
+
142
+ # --- Welcome Popup ---
143
+
144
+ WELCOME_FLAG = Path.home() / ".codegpt" / ".welcomed"
145
+
146
+ class WelcomeModal(ModalScreen):
147
+ """First-launch welcome popup."""
148
+
149
+ CSS = """
150
+ WelcomeModal {
151
+ align: center middle;
152
+ }
153
+
154
+ #welcome-dialog {
155
+ width: 60;
156
+ height: auto;
157
+ max-height: 30;
158
+ background: #161b22;
159
+ border: thick #58a6ff;
160
+ padding: 2 3;
161
+ }
162
+
163
+ #welcome-title {
164
+ text-align: center;
165
+ text-style: bold;
166
+ color: #58a6ff;
167
+ width: 100%;
168
+ margin: 0 0 1 0;
169
+ }
170
+
171
+ #welcome-ascii {
172
+ text-align: center;
173
+ color: #238636;
174
+ width: 100%;
175
+ margin: 0 0 1 0;
176
+ }
177
+
178
+ #welcome-body {
179
+ color: #c9d1d9;
180
+ margin: 1 0;
181
+ }
182
+
183
+ #welcome-version {
184
+ text-align: center;
185
+ color: #8b949e;
186
+ width: 100%;
187
+ margin: 1 0;
188
+ }
189
+
190
+ #welcome-features {
191
+ color: #8b949e;
192
+ margin: 1 2;
193
+ }
194
+
195
+ #welcome-keys {
196
+ color: #58a6ff;
197
+ margin: 1 2;
198
+ }
199
+
200
+ #welcome-footer {
201
+ text-align: center;
202
+ color: #484f58;
203
+ width: 100%;
204
+ margin: 1 0 0 0;
205
+ }
206
+
207
+ #welcome-go-btn {
208
+ width: 100%;
209
+ margin: 1 0 0 0;
210
+ background: #238636;
211
+ color: white;
212
+ text-style: bold;
213
+ border: none;
214
+ height: 3;
215
+ }
216
+
217
+ #welcome-go-btn:hover {
218
+ background: #2ea043;
219
+ }
220
+ """
221
+
222
+ def compose(self) -> ComposeResult:
223
+ with Vertical(id="welcome-dialog"):
224
+ yield Static(
225
+ "C O D E G P T",
226
+ id="welcome-title",
227
+ )
228
+ yield Static(
229
+ " ██████╗ ██████╗ ████████╗\n"
230
+ " ██╔════╝ ██╔══██╗╚══██╔══╝\n"
231
+ " ██║ ███╗██████╔╝ ██║ \n"
232
+ " ██║ ██║██╔═══╝ ██║ \n"
233
+ " ╚██████╔╝██║ ██║ \n"
234
+ " ╚═════╝ ╚═╝ ╚═╝ ",
235
+ id="welcome-ascii",
236
+ )
237
+ yield Static(
238
+ "Your local AI coding assistant.\n"
239
+ "Powered by Ollama. Runs 100% on your machine.",
240
+ id="welcome-body",
241
+ )
242
+ yield Static("v1.0.0", id="welcome-version")
243
+ yield Static(
244
+ "Features:\n"
245
+ " * Multi-turn conversations\n"
246
+ " * Streaming responses\n"
247
+ " * Save & load chat history\n"
248
+ " * Switch models on the fly\n"
249
+ " * Copy, regenerate, edit messages\n"
250
+ " * Reminders & suggestions\n"
251
+ " * Sidebar with saved chats",
252
+ id="welcome-features",
253
+ )
254
+ yield Static(
255
+ "Shortcuts:\n"
256
+ " Ctrl+N New chat Ctrl+S Save\n"
257
+ " Ctrl+B Sidebar Ctrl+R Regen\n"
258
+ " Ctrl+Q Quit Ctrl+C Copy",
259
+ id="welcome-keys",
260
+ )
261
+ yield Button("Let's go", id="welcome-go-btn")
262
+ yield Static("By Ark | Built with Textual", id="welcome-footer")
263
+
264
+ @on(Button.Pressed, "#welcome-go-btn")
265
+ def on_go(self) -> None:
266
+ # Mark as welcomed so it doesn't show again
267
+ WELCOME_FLAG.parent.mkdir(parents=True, exist_ok=True)
268
+ WELCOME_FLAG.touch()
269
+ self.dismiss()
270
+
271
+ def on_key(self, event) -> None:
272
+ # Any key dismisses
273
+ if event.key in ("enter", "escape", "space"):
274
+ WELCOME_FLAG.parent.mkdir(parents=True, exist_ok=True)
275
+ WELCOME_FLAG.touch()
276
+ self.dismiss()
277
+
278
+
279
+ # --- Widgets ---
280
+
281
+ class ChatMessage(Static):
282
+ """A single chat message bubble."""
283
+
284
+ def __init__(self, role: str, content: str, stats: str = "") -> None:
285
+ super().__init__()
286
+ self.role = role
287
+ self.content = content
288
+ self.stats = stats
289
+
290
+ def compose(self) -> ComposeResult:
291
+ if self.role == "user":
292
+ yield Static(self.content, classes="user-bubble")
293
+ else:
294
+ yield Markdown(self.content, classes="ai-bubble")
295
+ if self.stats:
296
+ yield Static(self.stats, classes="msg-stats")
297
+
298
+
299
+ class WelcomeView(Static):
300
+ """Welcome screen with suggestions."""
301
+
302
+ def compose(self) -> ComposeResult:
303
+ hour = datetime.now().hour
304
+ if hour < 12:
305
+ greeting = "Good morning."
306
+ elif hour < 18:
307
+ greeting = "Good afternoon."
308
+ else:
309
+ greeting = "Good evening."
310
+
311
+ yield Static(f"\n\n{greeting}", classes="welcome-greeting")
312
+ yield Static("How can I help you today?\n", classes="welcome-sub")
313
+ for i, s in enumerate(SUGGESTIONS, 1):
314
+ yield Button(f" {s}", id=f"suggest-{i}", classes="suggestion-btn")
315
+
316
+
317
+ class SidebarItem(ListItem):
318
+ """A conversation item in the sidebar."""
319
+
320
+ def __init__(self, path: Path) -> None:
321
+ super().__init__()
322
+ self.chat_path = path
323
+
324
+ def compose(self) -> ComposeResult:
325
+ name = chat_display_name(self.chat_path)
326
+ date = chat_date(self.chat_path)
327
+ yield Static(f"[bright_cyan]{name}[/]\n[dim]{date}[/]")
328
+
329
+
330
+ # --- Main App ---
331
+
332
+ class CodeGPT(App):
333
+ """CodeGPT Terminal UI."""
334
+
335
+ TITLE = "CodeGPT"
336
+ CSS = """
337
+ Screen {
338
+ layout: horizontal;
339
+ background: #0d1117;
340
+ }
341
+
342
+ #sidebar {
343
+ width: 28;
344
+ background: #161b22;
345
+ border-right: solid #30363d;
346
+ padding: 0;
347
+ }
348
+
349
+ #sidebar-header {
350
+ height: 3;
351
+ background: #1a1f29;
352
+ content-align: center middle;
353
+ text-style: bold;
354
+ color: #58a6ff;
355
+ border-bottom: solid #30363d;
356
+ }
357
+
358
+ #new-chat-btn {
359
+ width: 100%;
360
+ margin: 1 1;
361
+ background: #238636;
362
+ color: white;
363
+ text-style: bold;
364
+ border: none;
365
+ height: 3;
366
+ }
367
+
368
+ #new-chat-btn:hover {
369
+ background: #2ea043;
370
+ }
371
+
372
+ #chat-list {
373
+ background: #161b22;
374
+ scrollbar-size: 1 1;
375
+ }
376
+
377
+ #chat-list > ListItem {
378
+ padding: 1 1;
379
+ background: #161b22;
380
+ border-bottom: solid #21262d;
381
+ }
382
+
383
+ #chat-list > ListItem.-highlight {
384
+ background: #1a2332;
385
+ }
386
+
387
+ #main {
388
+ width: 1fr;
389
+ background: #0d1117;
390
+ }
391
+
392
+ #messages-scroll {
393
+ height: 1fr;
394
+ scrollbar-size: 1 1;
395
+ padding: 1 2;
396
+ }
397
+
398
+ #input-area {
399
+ height: auto;
400
+ max-height: 5;
401
+ dock: bottom;
402
+ padding: 1 2;
403
+ background: #161b22;
404
+ border-top: solid #30363d;
405
+ }
406
+
407
+ #chat-input {
408
+ background: #0d1117;
409
+ color: #c9d1d9;
410
+ border: tall #30363d;
411
+ padding: 0 1;
412
+ }
413
+
414
+ #chat-input:focus {
415
+ border: tall #58a6ff;
416
+ }
417
+
418
+ #status-bar {
419
+ height: 1;
420
+ dock: bottom;
421
+ background: #161b22;
422
+ color: #8b949e;
423
+ padding: 0 2;
424
+ border-top: solid #21262d;
425
+ }
426
+
427
+ .welcome-greeting {
428
+ text-align: center;
429
+ text-style: bold;
430
+ color: #c9d1d9;
431
+ width: 100%;
432
+ }
433
+
434
+ .welcome-sub {
435
+ text-align: center;
436
+ color: #8b949e;
437
+ width: 100%;
438
+ }
439
+
440
+ .suggestion-btn {
441
+ width: 100%;
442
+ margin: 0 4 1 4;
443
+ background: #161b22;
444
+ color: #c9d1d9;
445
+ border: tall #30363d;
446
+ height: 3;
447
+ text-align: left;
448
+ }
449
+
450
+ .suggestion-btn:hover {
451
+ background: #1a2332;
452
+ border: tall #58a6ff;
453
+ }
454
+
455
+ .user-bubble {
456
+ background: #1a2332;
457
+ color: #c9d1d9;
458
+ margin: 1 0 0 8;
459
+ padding: 1 2;
460
+ border: round #58a6ff;
461
+ }
462
+
463
+ .ai-bubble {
464
+ background: #161b22;
465
+ color: #c9d1d9;
466
+ margin: 1 8 0 0;
467
+ padding: 1 2;
468
+ border: round #238636;
469
+ }
470
+
471
+ .msg-stats {
472
+ color: #484f58;
473
+ text-align: right;
474
+ margin: 0 8 1 0;
475
+ }
476
+
477
+ .msg-label {
478
+ color: #58a6ff;
479
+ text-style: bold;
480
+ margin: 1 0 0 0;
481
+ }
482
+
483
+ .msg-label-ai {
484
+ color: #238636;
485
+ text-style: bold;
486
+ margin: 1 0 0 0;
487
+ }
488
+
489
+ .streaming-indicator {
490
+ color: #8b949e;
491
+ text-style: italic;
492
+ margin: 0 0 0 1;
493
+ }
494
+
495
+ #delete-btn {
496
+ width: 100%;
497
+ margin: 0 1;
498
+ background: #da3633;
499
+ color: white;
500
+ border: none;
501
+ height: 3;
502
+ display: none;
503
+ }
504
+
505
+ #delete-btn:hover {
506
+ background: #f85149;
507
+ }
508
+
509
+ #action-bar {
510
+ height: 1;
511
+ dock: bottom;
512
+ background: #0d1117;
513
+ padding: 0 2;
514
+ display: none;
515
+ }
516
+
517
+ .action-link {
518
+ color: #58a6ff;
519
+ margin: 0 2;
520
+ }
521
+ """
522
+
523
+ BINDINGS = [
524
+ Binding("ctrl+n", "new_chat", "New Chat"),
525
+ Binding("ctrl+s", "save_chat", "Save"),
526
+ Binding("ctrl+b", "toggle_sidebar", "Sidebar"),
527
+ Binding("ctrl+q", "quit_app", "Quit"),
528
+ Binding("ctrl+r", "regen", "Regenerate"),
529
+ Binding("ctrl+c", "copy_last", "Copy"),
530
+ ]
531
+
532
+ show_sidebar = reactive(True)
533
+
534
+ def __init__(self):
535
+ super().__init__()
536
+ self.messages = []
537
+ self.model = DEFAULT_MODEL
538
+ self.system = SYSTEM_PROMPT
539
+ self.available_models = []
540
+ self.last_ai_response = ""
541
+ self.session_start = time.time()
542
+ self.total_tokens = 0
543
+ self.is_streaming = False
544
+ self.active_chat_path = None
545
+
546
+ def compose(self) -> ComposeResult:
547
+ yield Header(show_clock=True)
548
+ with Horizontal():
549
+ with Vertical(id="sidebar"):
550
+ yield Static(" CodeGPT", id="sidebar-header")
551
+ yield Button("+ New Chat", id="new-chat-btn")
552
+ yield ListView(id="chat-list")
553
+ yield Button("Delete Selected", id="delete-btn", variant="error")
554
+ with Vertical(id="main"):
555
+ with VerticalScroll(id="messages-scroll"):
556
+ yield WelcomeView(id="welcome")
557
+ yield Static("", id="status-bar")
558
+ with Horizontal(id="action-bar"):
559
+ yield Button("Copy", classes="action-link", id="copy-btn")
560
+ yield Button("Regen", classes="action-link", id="regen-btn")
561
+ yield Input(placeholder="Message CodeGPT...", id="chat-input")
562
+ yield Footer()
563
+
564
+ def on_mount(self) -> None:
565
+ self.available_models = ensure_ollama()
566
+ self.refresh_sidebar()
567
+ self.update_status()
568
+ self.query_one("#chat-input", Input).focus()
569
+
570
+ # Refresh status bar every second
571
+ self.set_interval(1, self.update_status)
572
+
573
+ # Show welcome popup on first launch
574
+ if not WELCOME_FLAG.exists():
575
+ self.push_screen(WelcomeModal())
576
+
577
+ def refresh_sidebar(self) -> None:
578
+ """Reload sidebar conversation list."""
579
+ chat_list = self.query_one("#chat-list", ListView)
580
+ chat_list.clear()
581
+ for path in get_saved_chats()[:20]:
582
+ chat_list.append(SidebarItem(path))
583
+
584
+ # Show/hide delete button
585
+ try:
586
+ del_btn = self.query_one("#delete-btn", Button)
587
+ del_btn.display = len(get_saved_chats()) > 0
588
+ except NoMatches:
589
+ pass
590
+
591
+ def update_status(self) -> None:
592
+ """Update bottom status bar."""
593
+ elapsed = int(time.time() - self.session_start)
594
+ mins = elapsed // 60
595
+ secs = elapsed % 60
596
+ msg_count = len(self.messages)
597
+ now = datetime.now().strftime("%H:%M:%S")
598
+ status = self.query_one("#status-bar", Static)
599
+ status.update(
600
+ f" {self.model} | {msg_count} msgs | "
601
+ f"{self.total_tokens} tokens | {mins}m {secs}s | {now}"
602
+ )
603
+
604
+ def show_welcome(self) -> None:
605
+ """Show welcome screen."""
606
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
607
+ scroll.remove_children()
608
+ scroll.mount(WelcomeView(id="welcome"))
609
+
610
+ def add_user_message(self, text: str) -> None:
611
+ """Add user message to chat."""
612
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
613
+ # Remove welcome if present
614
+ try:
615
+ scroll.query_one("#welcome").remove()
616
+ except NoMatches:
617
+ pass
618
+
619
+ scroll.mount(Static("[bright_cyan bold]You[/]", classes="msg-label"))
620
+ scroll.mount(ChatMessage("user", text))
621
+ scroll.scroll_end(animate=False)
622
+
623
+ def add_ai_message(self, text: str, stats: str = "") -> None:
624
+ """Add AI message to chat."""
625
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
626
+ scroll.mount(Static("[bright_green bold]AI[/]", classes="msg-label-ai"))
627
+ scroll.mount(ChatMessage("assistant", text, stats))
628
+ scroll.scroll_end(animate=False)
629
+
630
+ # Show action bar
631
+ try:
632
+ self.query_one("#action-bar").display = True
633
+ except NoMatches:
634
+ pass
635
+
636
+ def add_streaming_placeholder(self) -> None:
637
+ """Add streaming indicator."""
638
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
639
+ scroll.mount(Static("[bright_green bold]AI[/]", classes="msg-label-ai"))
640
+ scroll.mount(Static("Thinking...", classes="streaming-indicator", id="stream-indicator"))
641
+ scroll.scroll_end(animate=False)
642
+
643
+ def update_streaming(self, text: str) -> None:
644
+ """Update streaming content."""
645
+ try:
646
+ indicator = self.query_one("#stream-indicator", Static)
647
+ indicator.update(text)
648
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
649
+ scroll.scroll_end(animate=False)
650
+ except NoMatches:
651
+ pass
652
+
653
+ def finish_streaming(self, text: str, stats: str = "") -> None:
654
+ """Replace streaming placeholder with final message."""
655
+ try:
656
+ indicator = self.query_one("#stream-indicator")
657
+ parent = indicator.parent
658
+ if parent:
659
+ children = list(parent.children)
660
+ idx = children.index(indicator)
661
+ indicator.remove()
662
+ # Remove the AI label that was mounted before the indicator
663
+ if idx > 0:
664
+ children[idx - 1].remove()
665
+ except (NoMatches, ValueError):
666
+ pass
667
+ self.add_ai_message(text, stats)
668
+
669
+ @work(thread=True)
670
+ def send_message(self, user_text: str) -> None:
671
+ """Send message and stream response."""
672
+ self.is_streaming = True
673
+ self.call_from_thread(self.add_user_message, user_text)
674
+
675
+ self.messages.append({"role": "user", "content": user_text})
676
+ self.call_from_thread(self.add_streaming_placeholder)
677
+
678
+ ollama_messages = [{"role": "system", "content": self.system}]
679
+ for msg in self.messages:
680
+ ollama_messages.append({"role": msg["role"], "content": msg["content"]})
681
+
682
+ try:
683
+ response = requests.post(
684
+ OLLAMA_URL,
685
+ json={"model": self.model, "messages": ollama_messages, "stream": True},
686
+ stream=True,
687
+ timeout=120,
688
+ )
689
+ response.raise_for_status()
690
+
691
+ full_response = []
692
+ stats = ""
693
+
694
+ for line in response.iter_lines():
695
+ if not line:
696
+ continue
697
+ try:
698
+ chunk = json.loads(line)
699
+ except json.JSONDecodeError:
700
+ continue
701
+ if "message" in chunk and "content" in chunk["message"]:
702
+ token = chunk["message"]["content"]
703
+ full_response.append(token)
704
+ current = "".join(full_response)
705
+ # Update every few tokens to avoid UI lag
706
+ if len(full_response) % 3 == 0:
707
+ self.call_from_thread(self.update_streaming, current)
708
+
709
+ if chunk.get("done"):
710
+ td = chunk.get("total_duration", 0)
711
+ ec = chunk.get("eval_count", 0)
712
+ pec = chunk.get("prompt_eval_count", 0)
713
+ ds = td / 1e9 if td else 0
714
+ tps = ec / ds if ds > 0 else 0
715
+ stats = f"{ec} tok | {ds:.1f}s | {tps:.0f} tok/s"
716
+ self.total_tokens += ec
717
+
718
+ final = "".join(full_response)
719
+ self.last_ai_response = final
720
+ self.messages.append({"role": "assistant", "content": final})
721
+
722
+ self.call_from_thread(self.finish_streaming, final, stats)
723
+ self.call_from_thread(self.update_status)
724
+
725
+ except requests.ConnectionError:
726
+ # Roll back the user message so history stays consistent
727
+ if self.messages and self.messages[-1]["role"] == "user":
728
+ self.messages.pop()
729
+ self.call_from_thread(self.update_streaming, "Error: Cannot connect to Ollama.")
730
+ except requests.Timeout:
731
+ if self.messages and self.messages[-1]["role"] == "user":
732
+ self.messages.pop()
733
+ self.call_from_thread(self.update_streaming, "Error: Request timed out.")
734
+ except Exception as e:
735
+ if self.messages and self.messages[-1]["role"] == "user":
736
+ self.messages.pop()
737
+ self.call_from_thread(self.update_streaming, f"Error: {e}")
738
+ finally:
739
+ self.is_streaming = False
740
+
741
+ # --- Events ---
742
+
743
+ @on(Input.Submitted, "#chat-input")
744
+ def on_input_submitted(self, event: Input.Submitted) -> None:
745
+ text = event.value.strip()
746
+ if not text or self.is_streaming:
747
+ return
748
+
749
+ event.input.clear()
750
+
751
+ # Commands
752
+ if text.startswith("/"):
753
+ cmd = text.split()[0].lower()
754
+ self.handle_command(cmd, text)
755
+ return
756
+
757
+ # Suggestion number
758
+ if text.isdigit():
759
+ idx = int(text) - 1
760
+ if 0 <= idx < len(SUGGESTIONS):
761
+ text = SUGGESTIONS[idx]
762
+
763
+ self.send_message(text)
764
+
765
+ @on(Button.Pressed, "#new-chat-btn")
766
+ def on_new_chat(self) -> None:
767
+ self.action_new_chat()
768
+
769
+ @on(Button.Pressed, "#delete-btn")
770
+ def on_delete_chat(self) -> None:
771
+ chat_list = self.query_one("#chat-list", ListView)
772
+ if chat_list.highlighted_child is not None:
773
+ item = chat_list.highlighted_child
774
+ if isinstance(item, SidebarItem):
775
+ item.chat_path.unlink(missing_ok=True)
776
+ self.refresh_sidebar()
777
+ self.notify("Chat deleted.", severity="warning")
778
+
779
+ @on(Button.Pressed, "#copy-btn")
780
+ def on_copy(self) -> None:
781
+ self.action_copy_last()
782
+
783
+ @on(Button.Pressed, "#regen-btn")
784
+ def on_regen(self) -> None:
785
+ self.action_regen()
786
+
787
+ @on(Button.Pressed, ".suggestion-btn")
788
+ def on_suggestion(self, event: Button.Pressed) -> None:
789
+ btn_id = event.button.id or ""
790
+ if btn_id.startswith("suggest-"):
791
+ idx = int(btn_id.split("-")[1]) - 1
792
+ if 0 <= idx < len(SUGGESTIONS):
793
+ self.send_message(SUGGESTIONS[idx])
794
+
795
+ @on(ListView.Selected, "#chat-list")
796
+ def on_chat_selected(self, event: ListView.Selected) -> None:
797
+ item = event.item
798
+ if isinstance(item, SidebarItem):
799
+ msgs, model = load_chat(item.chat_path)
800
+ self.messages = msgs
801
+ self.model = model
802
+ self.active_chat_path = item.chat_path
803
+
804
+ # Rebuild chat view
805
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
806
+ scroll.remove_children()
807
+
808
+ for msg in self.messages:
809
+ if msg["role"] == "user":
810
+ scroll.mount(Static("[bright_cyan bold]You[/]", classes="msg-label"))
811
+ scroll.mount(ChatMessage("user", msg["content"]))
812
+ else:
813
+ scroll.mount(Static("[bright_green bold]AI[/]", classes="msg-label-ai"))
814
+ scroll.mount(ChatMessage("assistant", msg["content"]))
815
+
816
+ scroll.scroll_end(animate=False)
817
+ self.update_status()
818
+
819
+ last_ai = [m for m in self.messages if m["role"] == "assistant"]
820
+ if last_ai:
821
+ self.last_ai_response = last_ai[-1]["content"]
822
+
823
+ self.notify(f"Loaded: {chat_display_name(item.chat_path)}")
824
+
825
+ # --- Commands ---
826
+
827
+ def handle_command(self, cmd: str, full_text: str) -> None:
828
+ if cmd == "/help":
829
+ help_text = "\n".join(f" {k:<12} {v}" for k, v in {
830
+ "/new": "New conversation (Ctrl+N)",
831
+ "/save": "Save conversation (Ctrl+S)",
832
+ "/model": "Switch model (/model <id>)",
833
+ "/system": "Set system prompt",
834
+ "/copy": "Copy last response (Ctrl+C)",
835
+ "/regen": "Regenerate (Ctrl+R)",
836
+ "/clear": "Clear screen",
837
+ "/quit": "Exit (Ctrl+Q)",
838
+ }.items())
839
+ self.notify(help_text, title="Commands", timeout=8)
840
+
841
+ elif cmd == "/model":
842
+ new_model = full_text[len("/model "):].strip()
843
+ if new_model:
844
+ self.model = new_model
845
+ self.update_status()
846
+ self.notify(f"Model: {self.model}")
847
+ else:
848
+ models = ", ".join(self.available_models[:5]) if self.available_models else "none found"
849
+ self.notify(f"Available: {models}\nCurrent: {self.model}", title="Models", timeout=6)
850
+
851
+ elif cmd == "/system":
852
+ new_sys = full_text[len("/system "):].strip()
853
+ if new_sys:
854
+ self.system = new_sys
855
+ self.notify(f"System prompt updated.")
856
+ else:
857
+ self.notify(f"Current: {self.system[:80]}...", timeout=6)
858
+
859
+ elif cmd == "/clear":
860
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
861
+ scroll.remove_children()
862
+ self.show_welcome()
863
+
864
+ elif cmd == "/save":
865
+ self.action_save_chat()
866
+
867
+ elif cmd == "/new":
868
+ self.action_new_chat()
869
+
870
+ elif cmd == "/copy":
871
+ self.action_copy_last()
872
+
873
+ elif cmd == "/regen":
874
+ self.action_regen()
875
+
876
+ elif cmd == "/quit":
877
+ self.action_quit_app()
878
+
879
+ else:
880
+ self.notify(f"Unknown: {cmd}. Type /help", severity="warning")
881
+
882
+ # --- Actions ---
883
+
884
+ def action_new_chat(self) -> None:
885
+ if self.messages:
886
+ save_chat(self.messages, self.model)
887
+ self.refresh_sidebar()
888
+
889
+ self.messages = []
890
+ self.last_ai_response = ""
891
+ self.active_chat_path = None
892
+ self.show_welcome()
893
+ self.update_status()
894
+ self.query_one("#chat-input", Input).focus()
895
+ try:
896
+ self.query_one("#action-bar").display = False
897
+ except NoMatches:
898
+ pass
899
+ self.notify("New chat started.")
900
+
901
+ def action_save_chat(self) -> None:
902
+ if self.messages:
903
+ filename = save_chat(self.messages, self.model)
904
+ self.refresh_sidebar()
905
+ self.notify(f"Saved: {filename}")
906
+ else:
907
+ self.notify("Nothing to save.", severity="warning")
908
+
909
+ def action_toggle_sidebar(self) -> None:
910
+ sidebar = self.query_one("#sidebar")
911
+ sidebar.display = not sidebar.display
912
+
913
+ def action_copy_last(self) -> None:
914
+ if not self.last_ai_response:
915
+ self.notify("No response to copy.", severity="warning")
916
+ return
917
+ try:
918
+ if os.name == "nt":
919
+ subprocess.run("clip", input=self.last_ai_response.encode("utf-8"), check=True)
920
+ elif shutil.which("xclip"):
921
+ subprocess.run(["xclip", "-selection", "clipboard"],
922
+ input=self.last_ai_response.encode(), check=True)
923
+ elif shutil.which("pbcopy"):
924
+ subprocess.run("pbcopy", input=self.last_ai_response.encode(), check=True)
925
+ else:
926
+ self.notify("No clipboard tool.", severity="error")
927
+ return
928
+ self.notify("Copied to clipboard.")
929
+ except Exception as e:
930
+ self.notify(f"Copy failed: {e}", severity="error")
931
+
932
+ def action_regen(self) -> None:
933
+ if self.is_streaming:
934
+ return
935
+ if self.messages and self.messages[-1]["role"] == "assistant":
936
+ self.messages.pop()
937
+ # Remove last AI message from view (label + ChatMessage = 2 widgets)
938
+ scroll = self.query_one("#messages-scroll", VerticalScroll)
939
+ children = list(scroll.children)
940
+ for child in children[-2:]:
941
+ child.remove()
942
+
943
+ # Re-send
944
+ last_user = self.messages[-1]["content"] if self.messages else ""
945
+ if last_user:
946
+ self.send_message(last_user)
947
+ else:
948
+ self.notify("Nothing to regenerate.", severity="warning")
949
+
950
+ def action_quit_app(self) -> None:
951
+ if self.messages:
952
+ save_chat(self.messages, self.model)
953
+ self.exit()
954
+
955
+
956
+ if __name__ == "__main__":
957
+ app = CodeGPT()
958
+ app.run()