codegpt-ai 1.28.1 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2 +1,2 @@
1
1
  """CodeGPT — Local AI Assistant Hub."""
2
- __version__ = "1.0.0"
2
+ __version__ = "2.0.0"
package/chat.py CHANGED
@@ -609,32 +609,27 @@ def ask_permission(action, detail=""):
609
609
 
610
610
  risk_color = RISK_COLORS.get(risk, "yellow")
611
611
  risk_icon = RISK_ICONS.get(risk, "?")
612
- compact = is_compact()
613
612
 
614
- if compact:
615
- console.print(Panel(
616
- Text.from_markup(
617
- f"[{risk_color}]{risk_icon} {risk}[/]\n"
618
- f" {action_desc}\n"
619
- + (f" [dim]{detail[:35]}[/]\n" if detail else "")
620
- ),
621
- border_style=risk_color.replace("bold ", ""), padding=(0, 1), width=tw(),
622
- ))
623
- else:
624
- console.print(Panel(
625
- Text.from_markup(
626
- f"[{risk_color}]{risk_icon} Risk: {risk}[/]\n\n"
627
- f" Action: [bright_cyan]{action_desc}[/]\n"
628
- + (f" Detail: [dim]{detail[:60]}[/]\n" if detail else "")
629
- + f"\n [dim](y)es (n)o (a)lways allow this[/]"
630
- ),
631
- title=f"[{risk_color}]Permission[/]",
632
- border_style=risk_color.replace("bold ", ""), padding=(0, 2), width=tw(),
633
- ))
613
+ # Risk warnings — explain what could happen
614
+ risk_warnings = {
615
+ "CRITICAL": "This can execute code, modify your system, or expose data.",
616
+ "HIGH": "This accesses external services or modifies important data.",
617
+ "MEDIUM": "This uses resources or changes session settings.",
618
+ "LOW": "This is a safe operation with minimal impact.",
619
+ }
620
+ warning = risk_warnings.get(risk, "")
621
+
622
+ # Clean minimal prompt — like Claude Code
623
+ console.print()
624
+ console.print(Text.from_markup(f" [{risk_color}]{risk_icon} {action_desc}[/]"))
625
+ if detail:
626
+ console.print(Text(f" {detail[:70]}", style="dim"))
627
+ console.print(Text.from_markup(f" [{risk_color}]{risk} — {warning}[/]"))
628
+ console.print()
634
629
 
635
630
  try:
636
631
  answer = prompt(
637
- [("class:prompt", " Allow? (y/n/a) > ")],
632
+ [("class:prompt", " Allow? (y)es / (n)o / (a)lways > ")],
638
633
  style=input_style,
639
634
  ).strip().lower()
640
635
  except (KeyboardInterrupt, EOFError):
@@ -643,9 +638,9 @@ def ask_permission(action, detail=""):
643
638
  if answer in ("a", "always"):
644
639
  PERMISSION_ALWAYS_ALLOW.add(action)
645
640
  save_permissions()
646
- print_sys(f"Always allowed: {action_desc}")
641
+ console.print(Text(f"Always allowed", style="green"))
647
642
  return True
648
- elif answer in ("y", "yes"):
643
+ elif answer in ("y", "yes", ""):
649
644
  return True
650
645
  else:
651
646
  print_sys("Denied.")
@@ -1174,27 +1169,29 @@ HISTORY_FILE = Path.home() / ".codegpt" / "input_history"
1174
1169
 
1175
1170
  def print_header(model):
1176
1171
  clear_screen()
1177
- w = tw()
1178
1172
  compact = is_compact()
1179
- console.print()
1180
1173
 
1181
1174
  if compact:
1182
- console.print(Text.from_markup(f" [bold bright_cyan]CodeGPT[/] [dim]· {model}[/]"))
1183
- console.print(Rule(style="dim", characters="─"))
1175
+ console.print(Text.from_markup(f"\n [bold bright_cyan]CodeGPT[/] [dim]v2.0 · {model}[/]\n"))
1184
1176
  else:
1185
- console.print(Text.from_markup(LOGO_FULL))
1177
+ # Clean startup like Claude Code — no ASCII art on repeat, just info
1178
+ is_local = "localhost" in OLLAMA_URL or "127.0.0.1" in OLLAMA_URL
1179
+ server = "local" if is_local else OLLAMA_URL.split("//")[1].split("/")[0] if "//" in OLLAMA_URL else "?"
1180
+ profile = load_profile()
1181
+ name = profile.get("name", "")
1182
+ mem_count = len(load_memories())
1183
+
1184
+ console.print()
1185
+ console.print(Text.from_markup(f" [bold bright_cyan]CodeGPT[/] [dim]v2.0[/]"))
1186
1186
  console.print()
1187
- now = datetime.now().strftime("%H:%M")
1188
- elapsed = int(time.time() - session_stats["start"])
1189
1187
  console.print(Text.from_markup(
1190
- f" [bright_cyan]{model}[/]"
1191
- f" [dim]·[/] [dim]{session_stats['messages']} msgs[/]"
1192
- f" [dim]·[/] [dim]{session_stats['tokens_out']} tok[/]"
1193
- f" [dim]·[/] [dim]{elapsed // 60}m[/]"
1194
- f" [dim]·[/] [dim]{now}[/]"
1188
+ f" [dim]model[/] [bright_cyan]{model}[/]\n"
1189
+ f" [dim]server[/] [green]{server}[/]\n"
1190
+ f" [dim]user[/] {name}\n"
1191
+ f" [dim]memory[/] {mem_count} items\n"
1192
+ f" [dim]commands[/] {len(COMMANDS)}"
1195
1193
  ))
1196
- console.print(Rule(style="dim", characters="─"))
1197
- console.print()
1194
+ console.print()
1198
1195
 
1199
1196
 
1200
1197
  def print_welcome(model, available_models):
@@ -1279,13 +1276,38 @@ def _print_err_panel(text):
1279
1276
 
1280
1277
 
1281
1278
  def print_help():
1282
- table = Table(border_style="bright_black", show_header=True,
1283
- header_style="bold bright_cyan", padding=(0, 2))
1284
- table.add_column("Command", style="bright_cyan", min_width=12)
1285
- table.add_column("Description", style="dim")
1286
- for cmd, desc in COMMANDS.items():
1287
- table.add_row(cmd, desc)
1288
- console.print(table)
1279
+ # Group commands by category — clean minimal list
1280
+ categories = {
1281
+ "Chat": ["/new", "/save", "/load", "/delete", "/copy", "/regen", "/edit", "/history", "/clear", "/quit"],
1282
+ "Model": ["/model", "/modelinfo", "/params", "/temp", "/think", "/tokens", "/compact"],
1283
+ "AI": ["/agent", "/agents", "/all", "/vote", "/swarm", "/team", "/room", "/spectate", "/dm", "/chat-link"],
1284
+ "Lab": ["/lab", "/chain", "/race", "/prompts"],
1285
+ "Tools": ["/tools", "/bg", "/split", "/grid", "/running", "/killall"],
1286
+ "Connect": ["/connect", "/disconnect", "/server", "/qr", "/scan"],
1287
+ "Files": ["/file", "/run", "/code", "/shell", "/browse", "/open", "/export"],
1288
+ "Memory": ["/mem", "/train", "/pin", "/pins", "/search", "/fork", "/rate", "/tag"],
1289
+ "Profile": ["/profile", "/setname", "/setbio", "/persona", "/personas", "/usage"],
1290
+ "Skills": ["/skill", "/skills", "/auto", "/cron", "/crons"],
1291
+ "Comms": ["/broadcast", "/inbox", "/feed", "/monitor", "/hub"],
1292
+ "System": ["/github", "/weather", "/spotify", "/volume", "/bright", "/sysinfo"],
1293
+ "Security": ["/pin-set", "/pin-remove", "/lock", "/audit", "/security", "/permissions"],
1294
+ }
1295
+
1296
+ on_termux = os.path.exists("/data/data/com.termux")
1297
+
1298
+ for cat, cmds in categories.items():
1299
+ console.print(Text(f"\n {cat}", style="bold bright_cyan"))
1300
+ for cmd in cmds:
1301
+ desc = COMMANDS.get(cmd, "")
1302
+ if not desc:
1303
+ continue
1304
+ # Hide unsupported tool commands on Termux
1305
+ tool_name = cmd[1:]
1306
+ if on_termux and tool_name in AI_TOOLS and not AI_TOOLS[tool_name].get("termux", True):
1307
+ continue
1308
+ console.print(Text.from_markup(f" [bright_cyan]{cmd:<16}[/] [dim]{desc}[/]"))
1309
+
1310
+ console.print(Text("\n Type / to autocomplete. Aliases: /q /n /s /m /h /a /t /f", style="dim"))
1289
1311
  console.print()
1290
1312
 
1291
1313
 
@@ -2443,8 +2465,24 @@ def open_url(url):
2443
2465
  elif not url.startswith("http"):
2444
2466
  url = "https://" + url
2445
2467
 
2446
- webbrowser.open(url)
2447
- print_sys(f"Opened: {url}")
2468
+ # Platform-specific browser open
2469
+ try:
2470
+ if os.path.exists("/data/data/com.termux"):
2471
+ # Termux — use termux-open or am start
2472
+ try:
2473
+ subprocess.run(["termux-open-url", url], timeout=5)
2474
+ except FileNotFoundError:
2475
+ subprocess.run(["am", "start", "-a", "android.intent.action.VIEW", "-d", url], timeout=5)
2476
+ elif os.name == "nt":
2477
+ os.startfile(url)
2478
+ elif sys.platform == "darwin":
2479
+ subprocess.run(["open", url], timeout=5)
2480
+ else:
2481
+ webbrowser.open(url)
2482
+ print_sys(f"Opened: {url}")
2483
+ except Exception as e:
2484
+ print_err(f"Cannot open browser: {e}")
2485
+ print_sys(f"URL: {url}")
2448
2486
  audit_log("OPEN_URL", url)
2449
2487
 
2450
2488
 
@@ -3913,7 +3951,7 @@ def team_chat(name1, name2, default_model, system):
3913
3951
 
3914
3952
  # --- Chat Room ---
3915
3953
 
3916
- def chat_room(member_names, default_model, system, user_joins=True):
3954
+ def chat_room(member_names, default_model, system, user_joins=True, topic=""):
3917
3955
  """Multi-AI chat room. User can join or spectate."""
3918
3956
  members = [resolve_team_member(n) for n in member_names]
3919
3957
 
@@ -3999,7 +4037,8 @@ def chat_room(member_names, default_model, system, user_joins=True):
3999
4037
  # Spectate mode — AIs chat with each other, user watches
4000
4038
  try:
4001
4039
  # Get initial topic from last arg or default
4002
- topic = "Introduce yourselves and start a technical discussion."
4040
+ if not topic:
4041
+ topic = "Introduce yourselves and start a technical discussion."
4003
4042
  if history:
4004
4043
  topic = history[-1]["content"]
4005
4044
 
@@ -4253,7 +4292,7 @@ def delete_skill(name):
4253
4292
 
4254
4293
  # --- Browser ---
4255
4294
 
4256
- def browse_url(url):
4295
+ def browse_url(url, model=None):
4257
4296
  """Fetch a URL, extract text, and summarize it."""
4258
4297
  if not url.startswith("http"):
4259
4298
  url = "https://" + url
@@ -4282,7 +4321,7 @@ def browse_url(url):
4282
4321
  # Ask AI to summarize
4283
4322
  try:
4284
4323
  ai_resp = requests.post(OLLAMA_URL, json={
4285
- "model": MODEL,
4324
+ "model": model or MODEL,
4286
4325
  "messages": [
4287
4326
  {"role": "system", "content": "Summarize this web page content in 3-5 bullet points. Be concise."},
4288
4327
  {"role": "user", "content": f"URL: {url}\n\nContent:\n{text}"},
@@ -4306,6 +4345,7 @@ def browse_url(url):
4306
4345
  # --- Cron / Scheduled Tasks ---
4307
4346
 
4308
4347
  active_crons = []
4348
+ cron_command_queue = [] # Thread-safe command queue for cron execution
4309
4349
 
4310
4350
 
4311
4351
  def add_cron(interval_str, command):
@@ -4331,10 +4371,10 @@ def add_cron(interval_str, command):
4331
4371
  # Check if still active
4332
4372
  if cron_entry not in active_crons:
4333
4373
  break
4334
- print_sys(f"[cron] Running: {command}")
4335
- # Execute as if user typed it
4336
4374
  cron_entry["last_run"] = datetime.now().isoformat()
4337
4375
  cron_entry["runs"] += 1
4376
+ # Queue the command for main loop to execute
4377
+ cron_command_queue.append(command)
4338
4378
 
4339
4379
  cron_entry = {
4340
4380
  "command": command,
@@ -4614,7 +4654,7 @@ def get_input():
4614
4654
  # --- Main ---
4615
4655
 
4616
4656
  def main():
4617
- global last_ai_response, code_exec_count, OLLAMA_URL, sidebar_enabled
4657
+ global last_ai_response, code_exec_count, OLLAMA_URL, sidebar_enabled, think_mode, temperature
4618
4658
 
4619
4659
  # CLI args mode: python chat.py --ask "question" or python chat.py --cmd "/tools"
4620
4660
  if len(sys.argv) > 1:
@@ -4701,7 +4741,11 @@ def main():
4701
4741
  ollama_status = "offline"
4702
4742
  tool_count = sum(1 for t in AI_TOOLS.values() if shutil.which(t["bin"]))
4703
4743
 
4704
- print(f" CodeGPT v1.0.0")
4744
+ try:
4745
+ from ai_cli import __version__ as _v
4746
+ except ImportError:
4747
+ _v = "2.0.0"
4748
+ print(f" CodeGPT v{_v}")
4705
4749
  print(f" User: {profile.get('name', 'not set')}")
4706
4750
  print(f" Model: {profile.get('model', MODEL)}")
4707
4751
  print(f" Persona: {profile.get('persona', 'default')}")
@@ -4829,53 +4873,19 @@ def main():
4829
4873
 
4830
4874
  print_header(model)
4831
4875
 
4832
- # Welcome popupalways show
4876
+ # Clean welcomelike Claude Code
4833
4877
  if not first_time:
4834
- w = tw()
4835
- compact = is_compact()
4836
- name = profile.get("name", "User")
4837
- is_local = "localhost" in OLLAMA_URL or "127.0.0.1" in OLLAMA_URL
4838
- server = "local" if is_local else OLLAMA_URL.split("//")[1].split("/")[0] if "//" in OLLAMA_URL else "unknown"
4839
- model_count = len(available_models)
4840
- sessions = profile.get("total_sessions", 0)
4841
- total_msgs = profile.get("total_messages", 0)
4842
-
4843
- hour = datetime.now().hour
4844
- greeting = "Good morning" if hour < 12 else "Good afternoon" if hour < 18 else "Good evening"
4845
-
4846
- if compact:
4847
- if offline_mode:
4848
- status_line = "[yellow]offline[/] — /connect IP"
4849
- else:
4850
- status_line = f"[green]connected[/] {model_count} models"
4851
-
4852
- console.print(Panel(
4853
- Text.from_markup(
4854
- f"[bold]{greeting}, {name}![/]\n\n"
4855
- f" Model [bright_cyan]{model}[/]\n"
4856
- f" Status {status_line}\n"
4857
- f" Session [dim]#{sessions}[/]\n"
4858
- ),
4859
- title="[bold bright_cyan]CodeGPT[/]",
4860
- border_style="bright_cyan", padding=(0, 1), width=w,
4861
- ))
4862
- else:
4863
- if offline_mode:
4864
- status_line = f" Server: [yellow]offline[/] — use [bright_cyan]/connect IP[/] to link"
4865
- else:
4866
- status_line = f" Server: [green]{server}[/] ({model_count} models)"
4878
+ name = profile.get("name", "")
4879
+ if offline_mode:
4880
+ console.print(Text.from_markup(" [yellow]offline[/] — use [bright_cyan]/connect IP[/] to link to Ollama"))
4881
+ console.print()
4867
4882
 
4868
- console.print(Panel(
4869
- Text.from_markup(
4870
- f"[bold]{greeting}, {name}![/]\n\n"
4871
- f" Model: [bright_cyan]{model}[/]\n"
4872
- f"{status_line}\n"
4873
- f" Session: [dim]#{sessions}[/] ({total_msgs} lifetime msgs)\n\n"
4874
- f" [dim]Type / for commands · /help for full list[/]"
4875
- ),
4876
- title="[bold bright_cyan]Welcome[/]",
4877
- border_style="bright_cyan", padding=(1, 2), width=w,
4878
- ))
4883
+ if name:
4884
+ hour = datetime.now().hour
4885
+ greeting = "Good morning" if hour < 12 else "Good afternoon" if hour < 18 else "Good evening"
4886
+ console.print(Text(f" {greeting}, {name}.", style="bold white"))
4887
+ console.print(Text(" Type a message to chat. Type / for commands.", style="dim"))
4888
+ console.print()
4879
4889
 
4880
4890
  print_welcome(model, available_models)
4881
4891
 
@@ -4886,6 +4896,11 @@ def main():
4886
4896
  audit_log("LOCKED_OUT")
4887
4897
  break
4888
4898
 
4899
+ # Drain cron command queue
4900
+ while cron_command_queue:
4901
+ cron_cmd = cron_command_queue.pop(0)
4902
+ print_sys(f"[cron] {cron_cmd}")
4903
+
4889
4904
  user_input = get_input()
4890
4905
  if user_input is None:
4891
4906
  cancel_all_reminders()
@@ -4952,10 +4967,10 @@ def main():
4952
4967
  continue
4953
4968
 
4954
4969
  elif cmd == "/save":
4955
- if messages and ask_permission("save_chat", "Save conversation"):
4956
- save_conversation(messages, model)
4957
- else:
4970
+ if not messages:
4958
4971
  print_sys("Nothing to save.")
4972
+ elif ask_permission("save_chat", "Save conversation"):
4973
+ save_conversation(messages, model)
4959
4974
  continue
4960
4975
 
4961
4976
  elif cmd == "/load":
@@ -5441,10 +5456,10 @@ def main():
5441
5456
  elif sub == "clear":
5442
5457
  mem_clear()
5443
5458
  elif sub == "inject":
5444
- # Inject memories into current conversation as context
5459
+ # Inject memories as a user-role context message (not system — avoids corrupting conversation)
5445
5460
  mem_context = get_memory_context()
5446
5461
  if mem_context:
5447
- messages.append({"role": "system", "content": mem_context})
5462
+ messages.append({"role": "user", "content": f"[Memory context for reference]:\n{mem_context}"})
5448
5463
  print_sys("Memories injected into context.")
5449
5464
  else:
5450
5465
  print_sys("No memories to inject.")
@@ -5857,8 +5872,7 @@ def main():
5857
5872
 
5858
5873
  if len(names) >= 2:
5859
5874
  # Inject topic
5860
- history_init = [{"speaker": "moderator", "content": topic}]
5861
- h = chat_room(names, model, system, user_joins=False)
5875
+ h = chat_room(names, model, system, user_joins=False, topic=topic)
5862
5876
  else:
5863
5877
  print_sys("Need at least 2 AIs. Example: /spectate coder reviewer discuss Python")
5864
5878
  else:
@@ -6189,7 +6203,7 @@ def main():
6189
6203
  launch_cmd = [tool_bin] + tool.get("default_args", [])
6190
6204
  if tool_args:
6191
6205
  launch_cmd.append(tool_args)
6192
- subprocess.run(launch_cmd, shell=True, cwd=project_dir, env=tool_env)
6206
+ subprocess.run(" ".join(launch_cmd), shell=True, cwd=project_dir, env=tool_env)
6193
6207
  else:
6194
6208
  tool_sandbox = Path.home() / ".codegpt" / "sandbox" / tool_key
6195
6209
  tool_sandbox.mkdir(parents=True, exist_ok=True)
@@ -6226,7 +6240,7 @@ def main():
6226
6240
  launch_cmd = [tool_bin] + tool.get("default_args", [])
6227
6241
  if tool_args:
6228
6242
  launch_cmd.append(tool_args)
6229
- subprocess.run(launch_cmd, shell=True, cwd=str(tool_sandbox), env=tool_env)
6243
+ subprocess.run(" ".join(launch_cmd), shell=True, cwd=str(tool_sandbox), env=tool_env)
6230
6244
 
6231
6245
  print_sys("Back to CodeGPT.")
6232
6246
  audit_log(f"TOOL_EXIT", tool_key)
@@ -6343,7 +6357,7 @@ def main():
6343
6357
  print_sys(f"Installed in {elapsed:.1f}s. Launching...")
6344
6358
  audit_log(f"TOOL_INSTALL", tool_key)
6345
6359
  launch_cmd = [found_bin] + tool.get("default_args", [])
6346
- subprocess.run(launch_cmd, shell=True)
6360
+ subprocess.run(" ".join(launch_cmd), shell=True)
6347
6361
  print_sys("Back to CodeGPT.")
6348
6362
  elif tool_bin in pip_module_map:
6349
6363
  # Try python -m fallback
@@ -6906,7 +6920,7 @@ def main():
6906
6920
  elif cmd == "/browse":
6907
6921
  url = user_input[len("/browse "):].strip()
6908
6922
  if url and ask_permission("open_url", f"Fetch {url}"):
6909
- content = browse_url(url)
6923
+ content = browse_url(url, model=model)
6910
6924
  if content:
6911
6925
  messages.append({"role": "user", "content": f"[browsed: {url}]"})
6912
6926
  messages.append({"role": "assistant", "content": content[:500]})
@@ -6971,21 +6985,23 @@ def main():
6971
6985
  save_permissions()
6972
6986
  print_sys("All permissions reset. You'll be asked again.")
6973
6987
  else:
6974
- table = Table(title="Permissions", border_style="yellow",
6975
- title_style="bold yellow", show_header=True, header_style="bold")
6976
- table.add_column("Action", style="bright_cyan", width=16)
6977
- table.add_column("Description", style="dim")
6978
- table.add_column("Risk", width=10)
6979
- table.add_column("Status", width=10)
6980
- for action, info in RISKY_ACTIONS.items():
6981
- if isinstance(info, tuple):
6982
- desc, risk = info
6983
- else:
6984
- desc, risk = info, "MEDIUM"
6985
- rc = RISK_COLORS.get(risk, "yellow")
6986
- ri = RISK_ICONS.get(risk, "?")
6987
- status = "[green]allowed[/]" if action in PERMISSION_ALWAYS_ALLOW else "[yellow]ask[/]"
6988
- table.add_row(action, desc, f"[{rc}]{ri} {risk}[/]", status)
6988
+ console.print(Text("\n Permissions", style="bold"))
6989
+ console.print(Rule(style="dim", characters=""))
6990
+
6991
+ # Group by risk level
6992
+ for risk_level in ["CRITICAL", "HIGH", "MEDIUM", "LOW"]:
6993
+ rc = RISK_COLORS.get(risk_level, "yellow")
6994
+ ri = RISK_ICONS.get(risk_level, "?")
6995
+ console.print(Text.from_markup(f"\n [{rc}]{ri} {risk_level}[/]"))
6996
+ for action, info in RISKY_ACTIONS.items():
6997
+ if isinstance(info, tuple):
6998
+ desc, risk = info
6999
+ else:
7000
+ desc, risk = info, "MEDIUM"
7001
+ if risk != risk_level:
7002
+ continue
7003
+ status = "[green]✓ allowed[/]" if action in PERMISSION_ALWAYS_ALLOW else "[dim]ask[/]"
7004
+ console.print(Text.from_markup(f" {action:<16} {status} [dim]{desc}[/]"))
6989
7005
  console.print(table)
6990
7006
  console.print(Text(" /permissions reset — revoke all", style="dim"))
6991
7007
  console.print()
@@ -7019,7 +7035,7 @@ def main():
7019
7035
  f" Log entries [bright_cyan]{audit_count}[/]\n"
7020
7036
  f" Log file [dim]{AUDIT_FILE}[/]\n\n"
7021
7037
  f"[bold]Storage[/]\n"
7022
- f" Encrypted [yellow]local XOR[/]\n"
7038
+ f" PIN hash [green]SHA-256[/]\n"
7023
7039
  f" Location [dim]{SECURITY_DIR}[/]\n"
7024
7040
  ),
7025
7041
  border_style="bright_cyan",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codegpt-ai",
3
- "version": "1.28.1",
3
+ "version": "2.0.0",
4
4
  "description": "Local AI Assistant Hub — 80+ commands, 29 tools, 8 agents, training, security",
5
5
  "author": "ArukuX",
6
6
  "license": "MIT",