codegpt-ai 1.22.0 → 1.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/chat.py +94 -125
  2. package/package.json +1 -1
package/chat.py CHANGED
@@ -1170,108 +1170,45 @@ def print_header(model):
1170
1170
  compact = is_compact()
1171
1171
  console.print()
1172
1172
 
1173
- # Responsive logo
1174
- logo = LOGO_COMPACT if compact else LOGO_FULL
1175
- console.print(Panel(
1176
- Text.from_markup(logo),
1177
- border_style="bright_cyan",
1178
- padding=(0 if compact else 1, 1 if compact else 2),
1179
- width=w,
1180
- ))
1181
-
1182
- # Status bar — compact version for small screens
1183
- now = datetime.now().strftime("%H:%M")
1184
- elapsed = int(time.time() - session_stats["start"])
1185
- uptime = f"{elapsed // 60}m"
1186
- tok = session_stats["tokens_out"]
1187
-
1188
- bar = Text()
1189
1173
  if compact:
1190
- bar.append(f" {model}", style="bright_cyan")
1191
- bar.append(f" {session_stats['messages']}msg", style="dim")
1192
- bar.append(f" {now}", style="dim")
1174
+ console.print(Text.from_markup(f" [bold bright_cyan]CodeGPT[/] [dim]· {model}[/]"))
1175
+ console.print(Rule(style="dim", characters=""))
1193
1176
  else:
1194
- bar.append(f" {model}", style="bright_cyan")
1195
- bar.append(" | ", style="dim")
1196
- bar.append(f"{session_stats['messages']} msgs", style="dim")
1197
- bar.append(" | ", style="dim")
1198
- bar.append(f"{tok} tokens", style="dim")
1199
- bar.append(" | ", style="dim")
1200
- bar.append(f"{uptime}", style="dim")
1201
- bar.append(" | ", style="dim")
1202
- bar.append(now, style="dim")
1203
-
1204
- console.print(Panel(bar, border_style="dim", padding=0, width=w))
1177
+ console.print(Text.from_markup(LOGO_FULL))
1178
+ console.print()
1179
+ now = datetime.now().strftime("%H:%M")
1180
+ elapsed = int(time.time() - session_stats["start"])
1181
+ console.print(Text.from_markup(
1182
+ f" [bright_cyan]{model}[/]"
1183
+ f" [dim]·[/] [dim]{session_stats['messages']} msgs[/]"
1184
+ f" [dim]·[/] [dim]{session_stats['tokens_out']} tok[/]"
1185
+ f" [dim]·[/] [dim]{elapsed // 60}m[/]"
1186
+ f" [dim]·[/] [dim]{now}[/]"
1187
+ ))
1188
+ console.print(Rule(style="dim", characters="─"))
1205
1189
  console.print()
1206
1190
 
1207
1191
 
1208
1192
  def print_welcome(model, available_models):
1209
1193
  w = tw()
1210
1194
  import random
1211
-
1212
- # Greeting
1213
- hour = datetime.now().hour
1214
- if hour < 12:
1215
- greeting = "Good morning"
1216
- elif hour < 18:
1217
- greeting = "Good afternoon"
1218
- else:
1219
- greeting = "Good evening"
1220
-
1221
1195
  compact = is_compact()
1222
1196
 
1223
- console.print(Align.center(Text(f"\n{greeting}.\n", style="bold white")), width=w)
1224
-
1225
- # Connection status bar
1226
- is_local = "localhost" in OLLAMA_URL or "127.0.0.1" in OLLAMA_URL
1227
- server_type = "local" if is_local else OLLAMA_URL.split("//")[1].split("/")[0]
1228
- model_count = len(available_models)
1229
- mem_count = len(load_memories())
1230
- profile = load_profile()
1231
- streak = profile.get("total_sessions", 0)
1197
+ # Clean welcome — no heavy panels
1198
+ hour = datetime.now().hour
1199
+ greeting = "Good morning" if hour < 12 else "Good afternoon" if hour < 18 else "Good evening"
1200
+ console.print(Text(f" {greeting}. How can I help?", style="bold white"))
1201
+ console.print()
1232
1202
 
1233
- status = Text()
1234
- if compact:
1235
- status.append(f" {model}", style="bold bright_cyan")
1236
- status.append(f" {server_type}", style="green" if model_count > 0 else "red")
1237
- status.append(f" {model_count}m", style="dim")
1238
- else:
1239
- status.append(" ◈ ", style="bright_cyan")
1240
- status.append(f"{model}", style="bold bright_cyan")
1241
- status.append(" │ ", style="dim")
1242
- status.append(f"◇ {server_type}", style="green" if model_count > 0 else "red")
1243
- status.append(" │ ", style="dim")
1244
- status.append(f"△ {model_count} models", style="dim")
1245
- status.append(" │ ", style="dim")
1246
- status.append(f"◇ {mem_count} memories", style="dim")
1247
- if streak > 1:
1248
- status.append(" │ ", style="dim")
1249
- status.append(f"▸ {streak} sessions", style="dim")
1250
- console.print(Panel(status, border_style="bright_black", padding=0, width=w))
1251
-
1252
- # Suggestion chips — fewer on compact
1253
- if compact:
1254
- console.print(Panel(
1255
- _build_suggestions(max_items=3),
1256
- title="[dim]Try[/]",
1257
- title_align="left",
1258
- border_style="bright_black",
1259
- padding=(0, 1),
1260
- width=w,
1261
- ))
1262
- else:
1263
- console.print(Panel(
1264
- _build_suggestions(),
1265
- title="[dim]Suggestions (type a number)[/]",
1266
- title_align="left",
1267
- border_style="bright_black",
1268
- padding=(1, 2),
1269
- width=w,
1270
- ))
1203
+ # Suggestions — clean list
1204
+ items = SUGGESTIONS[:3] if compact else SUGGESTIONS[:5]
1205
+ for i, s in enumerate(items, 1):
1206
+ console.print(Text.from_markup(f" [bright_cyan]{i}.[/] [dim]{s}[/]"))
1207
+ console.print()
1271
1208
 
1272
- # Tip of the day
1209
+ # Tip
1273
1210
  tip = random.choice(TIPS)
1274
- console.print(Text(f" Tip: {tip}", style="dim italic"))
1211
+ console.print(Text(f" tip: {tip}", style="dim italic"))
1275
1212
  console.print()
1276
1213
 
1277
1214
 
@@ -1289,46 +1226,40 @@ def _build_suggestions(max_items=None):
1289
1226
 
1290
1227
 
1291
1228
  def print_user_msg(text):
1292
- pad = (0, 1) if is_compact() else (0, 2)
1293
- console.print(Panel(
1294
- Text(text, style="white"),
1295
- title="[bold bright_cyan]You[/]",
1296
- title_align="left",
1297
- border_style="bright_cyan",
1298
- padding=pad,
1299
- width=tw(),
1300
- ))
1229
+ # Clean inline style like Claude Code no heavy panels
1230
+ console.print()
1231
+ console.print(Text(f" {text}", style="bold white"))
1232
+ console.print()
1301
1233
 
1302
1234
 
1303
1235
  def print_ai_msg(text, stats=""):
1304
- pad = (0, 1) if is_compact() else (0, 2)
1236
+ # Minimal border, clean markdown like Claude Code output
1237
+ w = tw()
1305
1238
  compact = is_compact()
1306
- panel = Panel(
1307
- Markdown(text),
1308
- title="[bold bright_green]AI[/]",
1309
- title_align="left",
1310
- border_style="bright_green",
1311
- subtitle=stats if not compact else "",
1312
- subtitle_align="right",
1313
- padding=pad,
1314
- width=tw(),
1315
- )
1316
- print_with_sidebar(panel)
1239
+
1240
+ console.print(Rule(style="bright_green", characters="─"))
1241
+ console.print()
1242
+ console.print(Markdown(text), width=w - 4)
1243
+ if stats and not compact:
1244
+ console.print(Text(f" {stats}", style="dim"))
1245
+ console.print()
1317
1246
 
1318
1247
 
1319
1248
  def print_sys(text):
1320
- if is_compact():
1321
- console.print(Text(f" {text}", style="dim italic"))
1322
- else:
1323
- console.print(Panel(
1324
- Text(text, style="dim italic"),
1325
- border_style="bright_black",
1326
- padding=(0, 1),
1327
- width=tw(),
1328
- ))
1249
+ # Simple dim text — no panels, no borders
1250
+ console.print(Text(f" {text}", style="dim"))
1329
1251
 
1330
1252
 
1331
1253
  def print_err(text):
1254
+ console.print(Text(f" ✗ {text}", style="bold red"))
1255
+
1256
+
1257
+ def print_success(text):
1258
+ console.print(Text(f" ✓ {text}", style="bold green"))
1259
+
1260
+
1261
+ def _print_err_panel(text):
1262
+ """Legacy panel error for important errors."""
1332
1263
  console.print(Panel(
1333
1264
  Text(text, style="bold red"),
1334
1265
  title="[bold red]Error[/]",
@@ -4241,21 +4172,21 @@ def stream_response(messages, system, model):
4241
4172
  # --- Input ---
4242
4173
 
4243
4174
  def _bottom_toolbar():
4244
- """Live stats in the bottom toolbar."""
4175
+ """Clean status bar like Claude Code."""
4245
4176
  elapsed = int(time.time() - session_stats["start"])
4246
4177
  mins = elapsed // 60
4247
4178
  msgs = session_stats["messages"]
4248
4179
  tok = session_stats["tokens_out"]
4249
4180
  if is_compact():
4250
- return [("class:bottom-toolbar", f" {msgs}msg {tok}tok {mins}m │ / cmds ")]
4181
+ return [("class:bottom-toolbar", f" {msgs} msgs · {tok} tok · {mins}m ")]
4251
4182
  return [("class:bottom-toolbar",
4252
- f" {msgs} msgs {tok} tok {mins}m / for commands │ Ctrl+C to exit ")]
4183
+ f" {msgs} msgs · {tok} tokens · {mins}m · type / for commands ")]
4253
4184
 
4254
4185
 
4255
4186
  def get_input():
4256
4187
  try:
4257
4188
  return prompt(
4258
- [("class:prompt", " > ")],
4189
+ [("class:prompt", " ")],
4259
4190
  style=input_style,
4260
4191
  history=input_history,
4261
4192
  completer=cmd_completer,
@@ -5743,10 +5674,48 @@ def main():
5743
5674
  tool = AI_TOOLS[tool_key]
5744
5675
  tool_bin = tool["bin"]
5745
5676
 
5746
- # Block unsupported tools on Termux
5677
+ # Block unsupported tools on Termux — explain why
5747
5678
  is_termux = os.path.exists("/data/data/com.termux")
5748
5679
  if is_termux and not tool.get("termux", True):
5749
- print_err(f"{tool['name']} doesn't work on Termux.")
5680
+ reasons = {
5681
+ "opencode": "OpenCode needs Bun runtime and native x86/x64 binaries that aren't available for ARM processors.",
5682
+ "codex": "Codex requires native binaries that don't compile on ARM/Android.",
5683
+ "gpt4all": "GPT4All needs a C++ backend (llama.cpp) that requires desktop-level hardware to run.",
5684
+ }
5685
+ reason = reasons.get(tool_key, "This tool requires native binaries that aren't available for ARM/Android.")
5686
+
5687
+ # Suggest alternatives
5688
+ alternatives = {
5689
+ "opencode": "/cline or /gemini",
5690
+ "codex": "/gemini or /cline",
5691
+ "gpt4all": "/ollama (if available) or /connect PC_IP",
5692
+ }
5693
+ alt = alternatives.get(tool_key, "Check /tools for available alternatives")
5694
+
5695
+ compact = is_compact()
5696
+ if compact:
5697
+ console.print(Panel(
5698
+ Text.from_markup(
5699
+ f"[bold red]{tool['name']}[/]\n\n"
5700
+ f" [dim]{reason[:60]}[/]\n\n"
5701
+ f" Try: [bright_cyan]{alt}[/]"
5702
+ ),
5703
+ title="[bold red]Not Available[/]",
5704
+ border_style="red", padding=(0, 1), width=tw(),
5705
+ ))
5706
+ else:
5707
+ console.print(Panel(
5708
+ Text.from_markup(
5709
+ f"[bold red]{tool['name']} is not available on Termux[/]\n\n"
5710
+ f" [bold]Why:[/]\n"
5711
+ f" {reason}\n\n"
5712
+ f" [bold]Alternatives:[/]\n"
5713
+ f" [bright_cyan]{alt}[/]\n\n"
5714
+ f" [dim]Or use this tool on your PC instead.[/]"
5715
+ ),
5716
+ title="[bold red]Not Supported[/]",
5717
+ border_style="red", padding=(1, 2), width=tw(),
5718
+ ))
5750
5719
  continue
5751
5720
  tool_args = user_input[len(cmd):].strip()
5752
5721
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codegpt-ai",
3
- "version": "1.22.0",
3
+ "version": "1.24.0",
4
4
  "description": "Local AI Assistant Hub — 80+ commands, 29 tools, 8 agents, training, security",
5
5
  "author": "ArukuX",
6
6
  "license": "MIT",