codegpt-ai 2.18.0 → 2.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chat.py +50 -5
- package/desktop.py +642 -177
- package/package.json +2 -1
- package/tui.py +469 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "codegpt-ai",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.27.0",
|
|
4
4
|
"description": "Local AI Assistant Hub — 123 commands, 26 tools, 8 agents, multi-AI, security. No cloud needed.",
|
|
5
5
|
"author": "ArukuX",
|
|
6
6
|
"license": "MIT",
|
|
@@ -38,6 +38,7 @@
|
|
|
38
38
|
"bin/",
|
|
39
39
|
"chat.py",
|
|
40
40
|
"desktop.py",
|
|
41
|
+
"tui.py",
|
|
41
42
|
"ai_cli/",
|
|
42
43
|
"CLAUDE.md",
|
|
43
44
|
"README.md"
|
package/tui.py
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
1
|
+
"""CodeGPT TUI — Terminal UI that looks like the desktop app. Works on Termux."""
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
import time
|
|
6
|
+
import requests
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
from rich.text import Text
|
|
13
|
+
from rich.table import Table
|
|
14
|
+
from rich.columns import Columns
|
|
15
|
+
from rich.markdown import Markdown
|
|
16
|
+
from rich.rule import Rule
|
|
17
|
+
from rich.align import Align
|
|
18
|
+
from prompt_toolkit import prompt
|
|
19
|
+
from prompt_toolkit.history import InMemoryHistory
|
|
20
|
+
from prompt_toolkit.completion import Completer, Completion
|
|
21
|
+
from prompt_toolkit.styles import Style as PtStyle
|
|
22
|
+
|
|
23
|
+
# Config
|
|
24
|
+
OLLAMA_URL = os.environ.get("OLLAMA_URL", "http://localhost:11434/api/chat")
|
|
25
|
+
MODEL = "llama3.2"
|
|
26
|
+
SYSTEM = "You are a helpful AI assistant. Be concise and technical."
|
|
27
|
+
|
|
28
|
+
PERSONAS = {
|
|
29
|
+
"default": "You are a helpful AI assistant. Be concise and technical.",
|
|
30
|
+
"hacker": "You are a cybersecurity expert. Technical jargon, dark humor. Defensive only.",
|
|
31
|
+
"teacher": "You are a patient teacher. Step by step, analogies, examples.",
|
|
32
|
+
"roast": "You are a sarcastic code reviewer. Roast bad code but give the fix.",
|
|
33
|
+
"architect": "You are a system architect. Scalability, ASCII diagrams, trade-offs.",
|
|
34
|
+
"minimal": "Shortest answer possible. Code only. No commentary.",
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
def _try(url):
|
|
38
|
+
try:
|
|
39
|
+
r = requests.get(url.replace("/api/chat", "/api/tags"), timeout=2)
|
|
40
|
+
return r.status_code == 200
|
|
41
|
+
except:
|
|
42
|
+
return False
|
|
43
|
+
|
|
44
|
+
# Auto-connect — try saved URL, localhost, common LAN IPs
|
|
45
|
+
_connected = False
|
|
46
|
+
|
|
47
|
+
# 1. Saved URL
|
|
48
|
+
saved_url = Path.home() / ".codegpt" / "ollama_url"
|
|
49
|
+
if saved_url.exists():
|
|
50
|
+
url = saved_url.read_text().strip()
|
|
51
|
+
if url:
|
|
52
|
+
if "/api/chat" not in url:
|
|
53
|
+
url = url.rstrip("/") + "/api/chat"
|
|
54
|
+
if _try(url):
|
|
55
|
+
OLLAMA_URL = url
|
|
56
|
+
_connected = True
|
|
57
|
+
|
|
58
|
+
# 2. Localhost
|
|
59
|
+
if not _connected and _try(OLLAMA_URL):
|
|
60
|
+
_connected = True
|
|
61
|
+
|
|
62
|
+
# 3. Common LAN IPs — scan for Ollama on the network
|
|
63
|
+
if not _connected:
|
|
64
|
+
for ip in ["192.168.1.237", "192.168.1.1", "192.168.0.1", "10.0.2.2",
|
|
65
|
+
"192.168.1.100", "192.168.1.50", "192.168.0.100", "192.168.0.50"]:
|
|
66
|
+
test = f"http://{ip}:11434/api/chat"
|
|
67
|
+
if _try(test):
|
|
68
|
+
OLLAMA_URL = test
|
|
69
|
+
# Save for next time
|
|
70
|
+
Path.home().joinpath(".codegpt").mkdir(parents=True, exist_ok=True)
|
|
71
|
+
Path.home().joinpath(".codegpt", "ollama_url").write_text(OLLAMA_URL)
|
|
72
|
+
_connected = True
|
|
73
|
+
break
|
|
74
|
+
|
|
75
|
+
# 4. Quick subnet scan (192.168.1.x)
|
|
76
|
+
if not _connected:
|
|
77
|
+
import socket
|
|
78
|
+
try:
|
|
79
|
+
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
80
|
+
s.connect(("8.8.8.8", 80))
|
|
81
|
+
my_ip = s.getsockname()[0]
|
|
82
|
+
s.close()
|
|
83
|
+
subnet = ".".join(my_ip.split(".")[:3])
|
|
84
|
+
for last in range(1, 20): # Scan .1 to .19
|
|
85
|
+
test = f"http://{subnet}.{last}:11434/api/chat"
|
|
86
|
+
if _try(test):
|
|
87
|
+
OLLAMA_URL = test
|
|
88
|
+
Path.home().joinpath(".codegpt").mkdir(parents=True, exist_ok=True)
|
|
89
|
+
Path.home().joinpath(".codegpt", "ollama_url").write_text(OLLAMA_URL)
|
|
90
|
+
_connected = True
|
|
91
|
+
break
|
|
92
|
+
except:
|
|
93
|
+
pass
|
|
94
|
+
|
|
95
|
+
# Load profile
|
|
96
|
+
profile_file = Path.home() / ".codegpt" / "profiles" / "cli_profile.json"
|
|
97
|
+
USERNAME = "User"
|
|
98
|
+
if profile_file.exists():
|
|
99
|
+
try:
|
|
100
|
+
p = json.loads(profile_file.read_text())
|
|
101
|
+
USERNAME = p.get("name", "User")
|
|
102
|
+
MODEL = p.get("model", MODEL)
|
|
103
|
+
except:
|
|
104
|
+
pass
|
|
105
|
+
|
|
106
|
+
OLLAMA_BASE = OLLAMA_URL.replace("/api/chat", "")
|
|
107
|
+
|
|
108
|
+
console = Console()
|
|
109
|
+
|
|
110
|
+
TUI_COMMANDS = {
|
|
111
|
+
"/help": "Show all commands",
|
|
112
|
+
"/new": "New conversation",
|
|
113
|
+
"/model": "Switch model",
|
|
114
|
+
"/models": "List all models",
|
|
115
|
+
"/persona": "Switch persona",
|
|
116
|
+
"/think": "Toggle deep thinking",
|
|
117
|
+
"/tokens": "Token count",
|
|
118
|
+
"/clear": "Clear screen",
|
|
119
|
+
"/sidebar": "Toggle sidebar",
|
|
120
|
+
"/history": "Show history",
|
|
121
|
+
"/connect": "Connect to remote Ollama",
|
|
122
|
+
"/server": "Server info",
|
|
123
|
+
"/weather": "Get weather",
|
|
124
|
+
"/agent": "Run an AI agent",
|
|
125
|
+
"/quit": "Exit",
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class TuiCompleter(Completer):
|
|
130
|
+
def get_completions(self, document, complete_event):
|
|
131
|
+
text = document.text_before_cursor.lstrip()
|
|
132
|
+
if text.startswith("/"):
|
|
133
|
+
typed = text.lower()
|
|
134
|
+
for cmd, desc in TUI_COMMANDS.items():
|
|
135
|
+
if cmd.startswith(typed):
|
|
136
|
+
yield Completion(cmd, start_position=-len(text), display=cmd, display_meta=desc)
|
|
137
|
+
|
|
138
|
+
cmd_completer = TuiCompleter()
|
|
139
|
+
history = InMemoryHistory()
|
|
140
|
+
style = PtStyle.from_dict({
|
|
141
|
+
"prompt": "ansicyan bold",
|
|
142
|
+
"bottom-toolbar": "bg:#1a1a2e #888888",
|
|
143
|
+
})
|
|
144
|
+
|
|
145
|
+
messages = []
|
|
146
|
+
total_tokens = 0
|
|
147
|
+
persona = "default"
|
|
148
|
+
system = SYSTEM
|
|
149
|
+
show_sidebar = True
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def try_connect():
|
|
153
|
+
try:
|
|
154
|
+
r = requests.get(OLLAMA_BASE + "/api/tags", timeout=3)
|
|
155
|
+
return [m["name"] for m in r.json().get("models", [])]
|
|
156
|
+
except:
|
|
157
|
+
return []
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def get_models():
|
|
161
|
+
try:
|
|
162
|
+
r = requests.get(OLLAMA_BASE + "/api/tags", timeout=3)
|
|
163
|
+
return [m["name"] for m in r.json().get("models", [])]
|
|
164
|
+
except:
|
|
165
|
+
return []
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def render_header():
|
|
169
|
+
"""Render the top bar."""
|
|
170
|
+
w = min(console.width, 100)
|
|
171
|
+
models = try_connect()
|
|
172
|
+
status = "[green]online[/]" if models else "[red]offline[/]"
|
|
173
|
+
|
|
174
|
+
console.print(Text.from_markup(
|
|
175
|
+
f" [bold red]Code[/][bold bright_blue]GPT[/] [dim]TUI v2.0[/]"
|
|
176
|
+
f" {status}"
|
|
177
|
+
f" [dim]·[/] [bright_blue]{MODEL}[/]"
|
|
178
|
+
f" [dim]·[/] [dim]{persona}[/]"
|
|
179
|
+
f" [dim]·[/] [dim]{total_tokens} tok[/]"
|
|
180
|
+
))
|
|
181
|
+
console.print(Rule(style="dim", characters="─"))
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def render_sidebar():
|
|
185
|
+
"""Build sidebar content."""
|
|
186
|
+
lines = []
|
|
187
|
+
lines.append(f"[bold]{USERNAME}[/]")
|
|
188
|
+
lines.append(f"[dim]{'─' * 20}[/]")
|
|
189
|
+
lines.append("")
|
|
190
|
+
lines.append(f"[dim]model[/] [bright_blue]{MODEL}[/]")
|
|
191
|
+
lines.append(f"[dim]persona[/] [green]{persona}[/]")
|
|
192
|
+
lines.append(f"[dim]msgs[/] {len(messages)}")
|
|
193
|
+
lines.append(f"[dim]tokens[/] {total_tokens}")
|
|
194
|
+
lines.append("")
|
|
195
|
+
lines.append("[bold]Commands[/]")
|
|
196
|
+
lines.append("[dim]/model[/] switch")
|
|
197
|
+
lines.append("[dim]/persona[/] change")
|
|
198
|
+
lines.append("[dim]/new[/] clear")
|
|
199
|
+
lines.append("[dim]/think[/] reason")
|
|
200
|
+
lines.append("[dim]/help[/] all cmds")
|
|
201
|
+
lines.append("[dim]/sidebar[/] toggle")
|
|
202
|
+
lines.append("[dim]/quit[/] exit")
|
|
203
|
+
return "\n".join(lines)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def print_msg(role, content, stats=""):
|
|
207
|
+
"""Print a message."""
|
|
208
|
+
if role == "user":
|
|
209
|
+
console.print(Text(f" {content}", style="bold white"))
|
|
210
|
+
else:
|
|
211
|
+
console.print(Rule(style="green", characters="─"))
|
|
212
|
+
console.print(Markdown(content), width=min(console.width - 4, 90))
|
|
213
|
+
if stats:
|
|
214
|
+
console.print(Text(f" {stats}", style="dim"))
|
|
215
|
+
console.print()
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def chat(text):
|
|
219
|
+
"""Send a message and get a response."""
|
|
220
|
+
global total_tokens
|
|
221
|
+
messages.append({"role": "user", "content": text})
|
|
222
|
+
|
|
223
|
+
console.print(Text(" Thinking...", style="dim"))
|
|
224
|
+
|
|
225
|
+
try:
|
|
226
|
+
start = time.time()
|
|
227
|
+
resp = requests.post(OLLAMA_URL, json={
|
|
228
|
+
"model": MODEL,
|
|
229
|
+
"messages": [{"role": "system", "content": system}] + messages,
|
|
230
|
+
"stream": False,
|
|
231
|
+
}, timeout=120)
|
|
232
|
+
data = resp.json()
|
|
233
|
+
content = data.get("message", {}).get("content", "No response.")
|
|
234
|
+
elapsed = round(time.time() - start, 1)
|
|
235
|
+
tokens = data.get("eval_count", 0)
|
|
236
|
+
total_tokens += tokens
|
|
237
|
+
messages.append({"role": "assistant", "content": content})
|
|
238
|
+
|
|
239
|
+
# Clear "Thinking..."
|
|
240
|
+
console.print("\033[A\033[K", end="")
|
|
241
|
+
print_msg("ai", content, f"{tokens} tok · {elapsed}s")
|
|
242
|
+
|
|
243
|
+
except Exception as e:
|
|
244
|
+
console.print("\033[A\033[K", end="")
|
|
245
|
+
print_msg("ai", f"Error: {e}")
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def handle_command(text):
|
|
249
|
+
"""Handle slash commands. Returns True if handled."""
|
|
250
|
+
global MODEL, persona, system, show_sidebar, messages, total_tokens
|
|
251
|
+
|
|
252
|
+
cmd = text.split()[0].lower()
|
|
253
|
+
args = text[len(cmd):].strip()
|
|
254
|
+
|
|
255
|
+
if cmd == "/quit" or cmd == "/q":
|
|
256
|
+
return "quit"
|
|
257
|
+
|
|
258
|
+
elif cmd == "/new" or cmd == "/n":
|
|
259
|
+
messages = []
|
|
260
|
+
os.system("clear")
|
|
261
|
+
render_header()
|
|
262
|
+
console.print(Text(" New conversation.", style="dim"))
|
|
263
|
+
console.print()
|
|
264
|
+
|
|
265
|
+
elif cmd == "/model" or cmd == "/m":
|
|
266
|
+
if args:
|
|
267
|
+
MODEL = args
|
|
268
|
+
console.print(Text(f" Model: {MODEL}", style="green"))
|
|
269
|
+
else:
|
|
270
|
+
models = get_models()
|
|
271
|
+
if models:
|
|
272
|
+
for m in models:
|
|
273
|
+
mark = " *" if m == MODEL else ""
|
|
274
|
+
console.print(Text(f" {m}{mark}", style="bright_blue" if mark else "dim"))
|
|
275
|
+
else:
|
|
276
|
+
console.print(Text(" Ollama offline.", style="red"))
|
|
277
|
+
console.print()
|
|
278
|
+
|
|
279
|
+
elif cmd == "/persona" or cmd == "/p":
|
|
280
|
+
if args and args in PERSONAS:
|
|
281
|
+
persona = args
|
|
282
|
+
system = PERSONAS[args]
|
|
283
|
+
console.print(Text(f" Persona: {persona}", style="green"))
|
|
284
|
+
else:
|
|
285
|
+
for name in PERSONAS:
|
|
286
|
+
mark = " *" if name == persona else ""
|
|
287
|
+
console.print(Text(f" {name}{mark}", style="green" if mark else "dim"))
|
|
288
|
+
console.print()
|
|
289
|
+
|
|
290
|
+
elif cmd == "/think" or cmd == "/t":
|
|
291
|
+
if "step-by-step" in system:
|
|
292
|
+
system = PERSONAS.get(persona, SYSTEM)
|
|
293
|
+
console.print(Text(" Think mode: OFF", style="dim"))
|
|
294
|
+
else:
|
|
295
|
+
system += "\n\nThink step-by-step. Show your reasoning."
|
|
296
|
+
console.print(Text(" Think mode: ON", style="green"))
|
|
297
|
+
console.print()
|
|
298
|
+
|
|
299
|
+
elif cmd == "/sidebar":
|
|
300
|
+
show_sidebar = not show_sidebar
|
|
301
|
+
console.print(Text(f" Sidebar: {'on' if show_sidebar else 'off'}", style="dim"))
|
|
302
|
+
console.print()
|
|
303
|
+
|
|
304
|
+
elif cmd == "/tokens":
|
|
305
|
+
console.print(Text(f" Session: {total_tokens} tokens, {len(messages)} messages", style="dim"))
|
|
306
|
+
console.print()
|
|
307
|
+
|
|
308
|
+
elif cmd == "/clear":
|
|
309
|
+
os.system("clear")
|
|
310
|
+
render_header()
|
|
311
|
+
|
|
312
|
+
elif cmd == "/history":
|
|
313
|
+
if messages:
|
|
314
|
+
for m in messages[-10:]:
|
|
315
|
+
role = "You" if m["role"] == "user" else "AI"
|
|
316
|
+
console.print(Text(f" [{role}] {m['content'][:80]}", style="dim"))
|
|
317
|
+
else:
|
|
318
|
+
console.print(Text(" No messages.", style="dim"))
|
|
319
|
+
console.print()
|
|
320
|
+
|
|
321
|
+
elif cmd == "/connect":
|
|
322
|
+
global OLLAMA_URL, OLLAMA_BASE
|
|
323
|
+
if args:
|
|
324
|
+
url = args if args.startswith("http") else "http://" + args
|
|
325
|
+
if ":" not in url.split("//")[1]:
|
|
326
|
+
url += ":11434"
|
|
327
|
+
OLLAMA_URL = url.rstrip("/") + "/api/chat"
|
|
328
|
+
OLLAMA_BASE = OLLAMA_URL.replace("/api/chat", "")
|
|
329
|
+
models = try_connect()
|
|
330
|
+
if models:
|
|
331
|
+
console.print(Text(f" Connected: {OLLAMA_BASE} ({len(models)} models)", style="green"))
|
|
332
|
+
Path.home().joinpath(".codegpt", "ollama_url").write_text(OLLAMA_URL)
|
|
333
|
+
else:
|
|
334
|
+
console.print(Text(f" Cannot reach {url}", style="red"))
|
|
335
|
+
else:
|
|
336
|
+
console.print(Text(" Usage: /connect 192.168.1.100", style="dim"))
|
|
337
|
+
console.print()
|
|
338
|
+
|
|
339
|
+
elif cmd == "/server":
|
|
340
|
+
models = try_connect()
|
|
341
|
+
status = "online" if models else "offline"
|
|
342
|
+
console.print(Text(f" Server: {OLLAMA_BASE} ({status})", style="green" if models else "red"))
|
|
343
|
+
console.print()
|
|
344
|
+
|
|
345
|
+
elif cmd == "/weather":
|
|
346
|
+
city = args or "London"
|
|
347
|
+
try:
|
|
348
|
+
r = requests.get(f"https://wttr.in/{city}?format=j1", timeout=10)
|
|
349
|
+
d = r.json()["current_condition"][0]
|
|
350
|
+
console.print(Text(f" {city}: {d['weatherDesc'][0]['value']}, {d['temp_C']}C, {d['humidity']}% humidity", style="white"))
|
|
351
|
+
except:
|
|
352
|
+
console.print(Text(f" Cannot get weather for {city}", style="red"))
|
|
353
|
+
console.print()
|
|
354
|
+
|
|
355
|
+
elif cmd == "/agent":
|
|
356
|
+
parts = args.split(maxsplit=1)
|
|
357
|
+
if len(parts) >= 2:
|
|
358
|
+
agents = {
|
|
359
|
+
"coder": "You are an expert programmer. Write clean, working code.",
|
|
360
|
+
"debugger": "You are a debugging expert. Find and fix bugs.",
|
|
361
|
+
"reviewer": "You are a code reviewer. Check for bugs, security, performance.",
|
|
362
|
+
"architect": "You are a system architect. Design with ASCII diagrams.",
|
|
363
|
+
"pentester": "You are an ethical pentester. Find vulnerabilities.",
|
|
364
|
+
"explainer": "You are a teacher. Explain simply with analogies.",
|
|
365
|
+
"optimizer": "You are a performance engineer. Optimize code.",
|
|
366
|
+
"researcher": "You are a research analyst. Deep-dive into topics.",
|
|
367
|
+
}
|
|
368
|
+
name, task = parts
|
|
369
|
+
if name in agents:
|
|
370
|
+
console.print(Text(f" Running {name} agent...", style="dim"))
|
|
371
|
+
try:
|
|
372
|
+
resp = requests.post(OLLAMA_URL, json={
|
|
373
|
+
"model": MODEL,
|
|
374
|
+
"messages": [
|
|
375
|
+
{"role": "system", "content": agents[name]},
|
|
376
|
+
{"role": "user", "content": task},
|
|
377
|
+
], "stream": False,
|
|
378
|
+
}, timeout=90)
|
|
379
|
+
content = resp.json().get("message", {}).get("content", "")
|
|
380
|
+
print_msg("ai", f"**{name}:** {content}")
|
|
381
|
+
except Exception as e:
|
|
382
|
+
console.print(Text(f" Error: {e}", style="red"))
|
|
383
|
+
else:
|
|
384
|
+
console.print(Text(f" Agents: {', '.join(agents.keys())}", style="dim"))
|
|
385
|
+
else:
|
|
386
|
+
console.print(Text(" Usage: /agent coder build a flask API", style="dim"))
|
|
387
|
+
console.print()
|
|
388
|
+
|
|
389
|
+
elif cmd == "/help" or cmd == "/h":
|
|
390
|
+
cmds = {
|
|
391
|
+
"/new": "New chat", "/model": "Switch model", "/persona": "Switch persona",
|
|
392
|
+
"/think": "Toggle reasoning", "/tokens": "Token count", "/clear": "Clear screen",
|
|
393
|
+
"/sidebar": "Toggle sidebar", "/history": "Show history", "/connect": "Remote Ollama",
|
|
394
|
+
"/server": "Server info", "/weather": "Get weather", "/agent": "Run agent",
|
|
395
|
+
"/help": "This list", "/quit": "Exit",
|
|
396
|
+
}
|
|
397
|
+
for c, d in cmds.items():
|
|
398
|
+
console.print(Text.from_markup(f" [bright_blue]{c:<12}[/] [dim]{d}[/]"))
|
|
399
|
+
console.print()
|
|
400
|
+
|
|
401
|
+
else:
|
|
402
|
+
return None # Not a command
|
|
403
|
+
|
|
404
|
+
return True
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def toolbar():
|
|
408
|
+
return [("class:bottom-toolbar",
|
|
409
|
+
f" {len(messages)} msgs · {total_tokens} tok · {MODEL} · {persona} · type / for commands ")]
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
def main():
|
|
413
|
+
os.system("clear")
|
|
414
|
+
|
|
415
|
+
# Welcome
|
|
416
|
+
console.print()
|
|
417
|
+
console.print(Text.from_markup(
|
|
418
|
+
"[bold red] ╔══════════════════════════════════╗[/]\n"
|
|
419
|
+
"[bold red] ║[/] [bold red]Code[/][bold bright_blue]GPT[/] [dim]TUI v2.0[/] [bold red]║[/]\n"
|
|
420
|
+
"[bold red] ║[/] [dim]terminal ui · works everywhere[/] [bold red]║[/]\n"
|
|
421
|
+
"[bold red] ╚══════════════════════════════════╝[/]"
|
|
422
|
+
))
|
|
423
|
+
console.print()
|
|
424
|
+
|
|
425
|
+
models = try_connect()
|
|
426
|
+
if models:
|
|
427
|
+
console.print(Text.from_markup(f" [green]connected[/] · {len(models)} models · [bright_blue]{MODEL}[/]"))
|
|
428
|
+
else:
|
|
429
|
+
console.print(Text.from_markup(" [yellow]offline[/] · use [bright_blue]/connect IP[/] to link"))
|
|
430
|
+
|
|
431
|
+
hour = datetime.now().hour
|
|
432
|
+
greeting = "Good morning" if hour < 12 else "Good afternoon" if hour < 18 else "Good evening"
|
|
433
|
+
console.print(Text(f"\n {greeting}, {USERNAME}.", style="bold"))
|
|
434
|
+
console.print(Text(" Type a message to chat. Type / for commands.\n", style="dim"))
|
|
435
|
+
|
|
436
|
+
while True:
|
|
437
|
+
try:
|
|
438
|
+
user_input = prompt(
|
|
439
|
+
[("class:prompt", " ❯ ")],
|
|
440
|
+
style=style,
|
|
441
|
+
history=history,
|
|
442
|
+
completer=cmd_completer,
|
|
443
|
+
complete_while_typing=True,
|
|
444
|
+
bottom_toolbar=toolbar,
|
|
445
|
+
).strip()
|
|
446
|
+
except (KeyboardInterrupt, EOFError):
|
|
447
|
+
break
|
|
448
|
+
|
|
449
|
+
if not user_input:
|
|
450
|
+
continue
|
|
451
|
+
|
|
452
|
+
if user_input.startswith("/"):
|
|
453
|
+
result = handle_command(user_input)
|
|
454
|
+
if result == "quit":
|
|
455
|
+
break
|
|
456
|
+
elif result is None:
|
|
457
|
+
# Unknown command — treat as chat
|
|
458
|
+
print_msg("user", user_input)
|
|
459
|
+
chat(user_input)
|
|
460
|
+
continue
|
|
461
|
+
|
|
462
|
+
print_msg("user", user_input)
|
|
463
|
+
chat(user_input)
|
|
464
|
+
|
|
465
|
+
console.print(Text(f"\n {total_tokens} tokens · {len(messages)} messages\n", style="dim"))
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
if __name__ == "__main__":
|
|
469
|
+
main()
|