codegpt-ai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,35 @@
1
+ {
2
+ "name": "codegpt-ai",
3
+ "version": "1.0.0",
4
+ "description": "Local AI Assistant Hub — 80+ commands, 29 tools, 8 agents, training, security",
5
+ "author": "ArukuX",
6
+ "license": "MIT",
7
+ "bin": {
8
+ "ai": "./bin/ai.js",
9
+ "codegpt": "./bin/ai.js"
10
+ },
11
+ "scripts": {
12
+ "postinstall": "node bin/setup.js"
13
+ },
14
+ "keywords": [
15
+ "ai",
16
+ "cli",
17
+ "ollama",
18
+ "llm",
19
+ "chatgpt",
20
+ "terminal",
21
+ "assistant",
22
+ "agents"
23
+ ],
24
+ "repository": {
25
+ "type": "git",
26
+ "url": "https://github.com/CCguvycu/codegpt.git"
27
+ },
28
+ "homepage": "https://github.com/CCguvycu/codegpt",
29
+ "engines": {
30
+ "node": ">=16.0.0"
31
+ },
32
+ "dependencies": {
33
+ "settings": "^0.1.1"
34
+ }
35
+ }
package/pyproject.toml ADDED
@@ -0,0 +1,33 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "codegpt"
7
+ version = "1.0.0"
8
+ description = "Local AI Assistant Hub — CLI, agents, training, 15+ tool integrations"
9
+ readme = "CLAUDE.md"
10
+ license = {text = "MIT"}
11
+ requires-python = ">=3.10"
12
+ authors = [{name = "ArukuX"}]
13
+
14
+ dependencies = [
15
+ "requests>=2.31.0",
16
+ "rich>=13.0.0",
17
+ "prompt-toolkit>=3.0.0",
18
+ ]
19
+
20
+ [project.optional-dependencies]
21
+ full = [
22
+ "textual>=0.40.0",
23
+ "python-telegram-bot>=20.0",
24
+ "flask>=3.0.0",
25
+ "groq>=0.4.0",
26
+ "SpeechRecognition>=3.10.0",
27
+ ]
28
+
29
+ [project.scripts]
30
+ ai = "ai_cli.__main__:main"
31
+
32
+ [tool.setuptools.packages.find]
33
+ include = ["ai_cli*"]
@@ -0,0 +1,12 @@
1
+ requests>=2.31.0
2
+ rich>=13.0.0
3
+ prompt_toolkit>=3.0.0
4
+ textual>=1.0.0
5
+ python-telegram-bot>=21.0
6
+ SpeechRecognition>=3.10.0
7
+ # PyAudio - install separately for voice input:
8
+ # pipwin install pyaudio OR
9
+ # pip install pipwin && pipwin install pyaudio
10
+ flask>=3.0.0
11
+ groq>=0.4.0
12
+ flet>=0.20.0
package/run.py ADDED
@@ -0,0 +1,157 @@
1
+ """CodeGPT Launcher
2
+
3
+ Usage:
4
+ python run.py Start CLI chat (default)
5
+ python run.py chat Start CLI chat
6
+ python run.py tui Start TUI with sidebar
7
+ python run.py bot Start Telegram bot
8
+ python run.py server Start backend server
9
+ python run.py mobile Start mobile app (desktop preview)
10
+ python run.py apk Build Android APK
11
+ """
12
+
13
+ import os
14
+ import sys
15
+ import json
16
+ import subprocess
17
+ from pathlib import Path
18
+
19
+ CONFIG_DIR = Path.home() / ".codegpt"
20
+ CONFIG_FILE = CONFIG_DIR / "config.json"
21
+
22
+
23
+ def load_config():
24
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
25
+ if CONFIG_FILE.exists():
26
+ try:
27
+ return json.loads(CONFIG_FILE.read_text())
28
+ except Exception:
29
+ pass
30
+ return {}
31
+
32
+
33
+ def save_config(config):
34
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
35
+ CONFIG_FILE.write_text(json.dumps(config, indent=2))
36
+
37
+
38
+ def run_script(name):
39
+ script = Path(__file__).parent / name
40
+ subprocess.run([sys.executable, str(script)])
41
+
42
+
43
+ def run_bot(token=None):
44
+ config = load_config()
45
+ bot_token = token or os.environ.get("CODEGPT_BOT_TOKEN") or config.get("bot_token")
46
+
47
+ if not bot_token:
48
+ print(" No token. Get one from @BotFather on Telegram.")
49
+ try:
50
+ bot_token = input(" Paste token > ").strip()
51
+ except (KeyboardInterrupt, EOFError):
52
+ return
53
+ if not bot_token:
54
+ return
55
+ try:
56
+ if input(" Save for later? (y/n) > ").strip().lower() == "y":
57
+ config["bot_token"] = bot_token
58
+ save_config(config)
59
+ print(" Saved.\n")
60
+ except (KeyboardInterrupt, EOFError):
61
+ pass
62
+
63
+ os.environ["CODEGPT_BOT_TOKEN"] = bot_token
64
+ run_script("bot.py")
65
+
66
+
67
+ def run_server():
68
+ config = load_config()
69
+ groq_key = os.environ.get("GROQ_API_KEY") or config.get("groq_api_key")
70
+
71
+ if not groq_key:
72
+ print(" No Groq API key found.")
73
+ print(" Get a free one at: https://console.groq.com/keys\n")
74
+ print(" Without it, the server will use local Ollama.\n")
75
+ try:
76
+ groq_key = input(" Paste Groq key (or Enter to skip) > ").strip()
77
+ except (KeyboardInterrupt, EOFError):
78
+ groq_key = ""
79
+
80
+ if groq_key:
81
+ try:
82
+ if input(" Save for later? (y/n) > ").strip().lower() == "y":
83
+ config["groq_api_key"] = groq_key
84
+ save_config(config)
85
+ print(" Saved.\n")
86
+ except (KeyboardInterrupt, EOFError):
87
+ pass
88
+
89
+ if groq_key:
90
+ os.environ["GROQ_API_KEY"] = groq_key
91
+
92
+ run_script("server.py")
93
+
94
+
95
+ def build_apk():
96
+ project = Path(__file__).parent
97
+ print(" Building APK with Flet...\n")
98
+ result = subprocess.run(
99
+ ["flet", "build", "apk", "--project", str(project), "--module-name", "mobile"],
100
+ cwd=str(project),
101
+ )
102
+ if result.returncode == 0:
103
+ # Copy APK to desktop
104
+ apk_path = project / "build" / "apk" / "app-release.apk"
105
+ desktop = Path.home() / "Desktop"
106
+ if not desktop.exists():
107
+ desktop = Path.home() / "OneDrive" / "Desktop"
108
+ if apk_path.exists():
109
+ import shutil
110
+ dest = desktop / "CodeGPT.apk"
111
+ shutil.copy2(str(apk_path), str(dest))
112
+ print(f"\n APK copied to: {dest}")
113
+ else:
114
+ print(f"\n APK built. Check: {project / 'build'}")
115
+ else:
116
+ print("\n Build failed. Make sure Flet is installed: pip install flet")
117
+
118
+
119
+ def main():
120
+ args = sys.argv[1:]
121
+
122
+ if not args or args[0].lower() in ("chat", "cli"):
123
+ run_script("chat.py")
124
+ elif args[0].lower() in ("tui", "ui", "app"):
125
+ run_script("app.py")
126
+ elif args[0].lower() in ("bot", "telegram"):
127
+ token = None
128
+ if "--token" in args:
129
+ idx = args.index("--token")
130
+ if idx + 1 < len(args):
131
+ token = args[idx + 1]
132
+ if "--save-token" in args:
133
+ idx = args.index("--save-token")
134
+ if idx + 1 < len(args):
135
+ token = args[idx + 1]
136
+ config = load_config()
137
+ config["bot_token"] = token
138
+ save_config(config)
139
+ print(" Token saved.")
140
+ run_bot(token)
141
+ elif args[0].lower() in ("server", "backend", "api"):
142
+ run_server()
143
+ elif args[0].lower() in ("mobile", "phone"):
144
+ run_script("mobile.py")
145
+ elif args[0].lower() in ("web",):
146
+ run_script("web.py")
147
+ elif args[0].lower() in ("apk", "build"):
148
+ build_apk()
149
+ elif args[0].lower() in ("help", "--help", "-h"):
150
+ print(__doc__)
151
+ else:
152
+ print(f" Unknown: {args[0]}")
153
+ print(" Use: chat, tui, bot, server, mobile, or apk")
154
+
155
+
156
+ if __name__ == "__main__":
157
+ main()
package/server.py ADDED
@@ -0,0 +1,335 @@
1
+ """CodeGPT Backend — Flask API server. Supports Groq (cloud) and Ollama (local).
2
+
3
+ Deploy to Render/Railway/PythonAnywhere for 24/7 access without your laptop.
4
+
5
+ Usage:
6
+ python server.py # Starts on port 5050
7
+ python server.py --port 8080 # Custom port
8
+ GROQ_API_KEY=gsk_... python server.py # Use Groq cloud backend
9
+
10
+ Endpoints:
11
+ POST /chat — Send messages, get AI response (streaming)
12
+ POST /chat/quick — Non-streaming single response
13
+ GET /models — List available models
14
+ GET /health — Health check
15
+ GET /config — Get server config (model, provider)
16
+ """
17
+
18
+ import json
19
+ import os
20
+ import sys
21
+ import time
22
+ from datetime import datetime
23
+
24
+ from flask import Flask, request, jsonify, Response, stream_with_context
25
+
26
+ # Optional: Groq for cloud inference
27
+ try:
28
+ from groq import Groq
29
+ HAS_GROQ = True
30
+ except ImportError:
31
+ HAS_GROQ = False
32
+
33
+ # Optional: requests for Ollama
34
+ import requests as http_requests
35
+
36
+ app = Flask(__name__)
37
+
38
+ # --- Config ---
39
+
40
+ GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "")
41
+ OLLAMA_URL = os.environ.get("OLLAMA_URL", "http://localhost:11434")
42
+ DEFAULT_MODEL = os.environ.get("CODEGPT_MODEL", "llama-3.2-3b-preview") # Groq model
43
+ OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", "llama3.2")
44
+ PORT = int(os.environ.get("PORT", 5050))
45
+
46
+ # Auto-detect provider
47
+ if GROQ_API_KEY and HAS_GROQ:
48
+ PROVIDER = "groq"
49
+ else:
50
+ PROVIDER = "ollama"
51
+
52
+ SYSTEM_PROMPT = """You are an AI modeled after a highly technical, system-focused developer mindset.
53
+ Be direct, concise, and dense with information. No fluff, no filler, no emojis.
54
+ Give conclusions first, then minimal necessary explanation.
55
+ Focus on: AI, coding, automation, cybersecurity, system design.
56
+ Blunt but intelligent. Slightly dark tone is acceptable.
57
+ Keep responses concise for mobile reading."""
58
+
59
+ PERSONAS = {
60
+ "Default": SYSTEM_PROMPT,
61
+ "Hacker": "You are a cybersecurity expert. Technical jargon, CVEs, attack vectors. Defensive security only. Concise.",
62
+ "Teacher": "You are a patient programming teacher. Step by step, analogies, examples. Concise.",
63
+ "Roast": "You are a brutally sarcastic code reviewer. Roast then fix. Dark humor. Concise.",
64
+ "Minimal": "Shortest possible answer. One line if possible. Code only.",
65
+ }
66
+
67
+ # Stats
68
+ server_stats = {"requests": 0, "start": time.time()}
69
+
70
+
71
+ # --- Groq Backend ---
72
+
73
+ def query_groq(messages, model, system, stream=False):
74
+ """Query Groq cloud API."""
75
+ client = Groq(api_key=GROQ_API_KEY)
76
+
77
+ full_messages = [{"role": "system", "content": system}]
78
+ full_messages.extend(messages)
79
+
80
+ if stream:
81
+ completion = client.chat.completions.create(
82
+ model=model or DEFAULT_MODEL,
83
+ messages=full_messages,
84
+ stream=True,
85
+ max_tokens=4096,
86
+ )
87
+
88
+ def generate():
89
+ full = []
90
+ for chunk in completion:
91
+ delta = chunk.choices[0].delta
92
+ if delta and delta.content:
93
+ full.append(delta.content)
94
+ yield json.dumps({
95
+ "content": delta.content,
96
+ "done": False,
97
+ }) + "\n"
98
+
99
+ yield json.dumps({
100
+ "content": "",
101
+ "done": True,
102
+ "full_response": "".join(full),
103
+ "model": model or DEFAULT_MODEL,
104
+ "provider": "groq",
105
+ }) + "\n"
106
+
107
+ return generate()
108
+ else:
109
+ completion = client.chat.completions.create(
110
+ model=model or DEFAULT_MODEL,
111
+ messages=full_messages,
112
+ max_tokens=4096,
113
+ )
114
+ content = completion.choices[0].message.content
115
+ usage = completion.usage
116
+ return {
117
+ "content": content,
118
+ "model": model or DEFAULT_MODEL,
119
+ "provider": "groq",
120
+ "tokens": {
121
+ "input": usage.prompt_tokens if usage else 0,
122
+ "output": usage.completion_tokens if usage else 0,
123
+ },
124
+ }
125
+
126
+
127
+ # --- Ollama Backend ---
128
+
129
+ def query_ollama(messages, model, system, stream=False):
130
+ """Query local Ollama."""
131
+ ollama_messages = [{"role": "system", "content": system}]
132
+ ollama_messages.extend(messages)
133
+
134
+ if stream:
135
+ try:
136
+ response = http_requests.post(
137
+ f"{OLLAMA_URL}/api/chat",
138
+ json={"model": model or OLLAMA_MODEL, "messages": ollama_messages, "stream": True},
139
+ stream=True,
140
+ timeout=120,
141
+ )
142
+ response.raise_for_status()
143
+ except Exception as e:
144
+ def error_gen():
145
+ yield json.dumps({"content": f"Error: {e}", "done": True}) + "\n"
146
+ return error_gen()
147
+
148
+ def generate():
149
+ for line in response.iter_lines():
150
+ if not line:
151
+ continue
152
+ try:
153
+ chunk = json.loads(line)
154
+ except json.JSONDecodeError:
155
+ continue
156
+
157
+ content = chunk.get("message", {}).get("content", "")
158
+ done = chunk.get("done", False)
159
+
160
+ out = {"content": content, "done": done}
161
+
162
+ if done:
163
+ ec = chunk.get("eval_count", 0)
164
+ td = chunk.get("total_duration", 0)
165
+ ds = td / 1e9 if td else 0
166
+ tps = ec / ds if ds > 0 else 0
167
+ out["model"] = model or OLLAMA_MODEL
168
+ out["provider"] = "ollama"
169
+ out["tokens"] = {"output": ec}
170
+ out["stats"] = f"{ec} tok | {ds:.1f}s | {tps:.0f} tok/s"
171
+
172
+ yield json.dumps(out) + "\n"
173
+
174
+ return generate()
175
+ else:
176
+ response = http_requests.post(
177
+ f"{OLLAMA_URL}/api/chat",
178
+ json={"model": model or OLLAMA_MODEL, "messages": ollama_messages, "stream": False},
179
+ timeout=120,
180
+ )
181
+ response.raise_for_status()
182
+ data = response.json()
183
+ content = data.get("message", {}).get("content", "")
184
+ ec = data.get("eval_count", 0)
185
+ td = data.get("total_duration", 0)
186
+ ds = td / 1e9 if td else 0
187
+ return {
188
+ "content": content,
189
+ "model": model or OLLAMA_MODEL,
190
+ "provider": "ollama",
191
+ "tokens": {"output": ec},
192
+ "stats": f"{ec} tok | {ds:.1f}s",
193
+ }
194
+
195
+
196
+ # --- Routes ---
197
+
198
+ @app.route("/health", methods=["GET"])
199
+ def health():
200
+ uptime = int(time.time() - server_stats["start"])
201
+ return jsonify({
202
+ "status": "ok",
203
+ "provider": PROVIDER,
204
+ "model": DEFAULT_MODEL if PROVIDER == "groq" else OLLAMA_MODEL,
205
+ "uptime": uptime,
206
+ "requests": server_stats["requests"],
207
+ })
208
+
209
+
210
+ @app.route("/config", methods=["GET"])
211
+ def get_config():
212
+ return jsonify({
213
+ "provider": PROVIDER,
214
+ "model": DEFAULT_MODEL if PROVIDER == "groq" else OLLAMA_MODEL,
215
+ "personas": list(PERSONAS.keys()),
216
+ })
217
+
218
+
219
+ @app.route("/models", methods=["GET"])
220
+ def list_models():
221
+ if PROVIDER == "groq":
222
+ # Groq available models
223
+ models = [
224
+ "llama-3.2-3b-preview",
225
+ "llama-3.2-1b-preview",
226
+ "llama-3.3-70b-versatile",
227
+ "llama-3.1-8b-instant",
228
+ "gemma2-9b-it",
229
+ "mixtral-8x7b-32768",
230
+ ]
231
+ else:
232
+ try:
233
+ resp = http_requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
234
+ models = [m["name"] for m in resp.json().get("models", [])]
235
+ except Exception:
236
+ models = []
237
+
238
+ return jsonify({"models": models, "provider": PROVIDER})
239
+
240
+
241
+ @app.route("/chat", methods=["POST"])
242
+ def chat_stream():
243
+ """Streaming chat endpoint."""
244
+ server_stats["requests"] += 1
245
+
246
+ data = request.get_json()
247
+ if not data:
248
+ return jsonify({"error": "Invalid JSON"}), 400
249
+ messages = data.get("messages", [])
250
+ model = data.get("model", "")
251
+ persona = data.get("persona", "Default")
252
+ system = PERSONAS.get(persona, SYSTEM_PROMPT)
253
+
254
+ if not messages:
255
+ return jsonify({"error": "No messages"}), 400
256
+
257
+ # Input validation: cap message count and content length
258
+ if len(messages) > 100:
259
+ messages = messages[-100:]
260
+ for msg in messages:
261
+ if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
262
+ return jsonify({"error": "Invalid message format"}), 400
263
+ if len(msg["content"]) > 50000:
264
+ msg["content"] = msg["content"][:50000]
265
+
266
+ try:
267
+ if PROVIDER == "groq":
268
+ gen = query_groq(messages, model, system, stream=True)
269
+ else:
270
+ gen = query_ollama(messages, model, system, stream=True)
271
+
272
+ return Response(
273
+ stream_with_context(gen),
274
+ content_type="application/x-ndjson",
275
+ )
276
+ except Exception as e:
277
+ return jsonify({"error": str(e)}), 500
278
+
279
+
280
+ @app.route("/chat/quick", methods=["POST"])
281
+ def chat_quick():
282
+ """Non-streaming chat endpoint."""
283
+ server_stats["requests"] += 1
284
+
285
+ data = request.get_json()
286
+ if not data:
287
+ return jsonify({"error": "Invalid JSON"}), 400
288
+ messages = data.get("messages", [])
289
+ model = data.get("model", "")
290
+ persona = data.get("persona", "Default")
291
+ system = PERSONAS.get(persona, SYSTEM_PROMPT)
292
+
293
+ if not messages:
294
+ return jsonify({"error": "No messages"}), 400
295
+
296
+ # Input validation
297
+ if len(messages) > 100:
298
+ messages = messages[-100:]
299
+ for msg in messages:
300
+ if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
301
+ return jsonify({"error": "Invalid message format"}), 400
302
+ if len(msg["content"]) > 50000:
303
+ msg["content"] = msg["content"][:50000]
304
+
305
+ try:
306
+ if PROVIDER == "groq":
307
+ result = query_groq(messages, model, system, stream=False)
308
+ else:
309
+ result = query_ollama(messages, model, system, stream=False)
310
+ return jsonify(result)
311
+ except Exception as e:
312
+ return jsonify({"error": str(e)}), 500
313
+
314
+
315
+ # --- Main ---
316
+
317
+ if __name__ == "__main__":
318
+ print("=" * 50)
319
+ print(" CodeGPT Backend Server")
320
+ print("=" * 50)
321
+ print(f" Provider: {PROVIDER}")
322
+ if PROVIDER == "groq":
323
+ print(f" Model: {DEFAULT_MODEL}")
324
+ print(f" API Key: ****{GROQ_API_KEY[-4:]}")
325
+ else:
326
+ print(f" Model: {OLLAMA_MODEL}")
327
+ print(f" Ollama: {OLLAMA_URL}")
328
+ print(f" Port: {PORT}")
329
+ print(f" Personas: {', '.join(PERSONAS.keys())}")
330
+ print("=" * 50)
331
+ print(f" http://localhost:{PORT}")
332
+ print(f" http://0.0.0.0:{PORT}")
333
+ print(" Ctrl+C to stop.\n")
334
+
335
+ app.run(host="0.0.0.0", port=PORT, debug=False)
package/uninstall.ps1 ADDED
@@ -0,0 +1,30 @@
1
+ # CodeGPT Uninstaller
2
+ $installDir = "$env:LOCALAPPDATA\codegpt"
3
+
4
+ Write-Host "Uninstalling CodeGPT..." -ForegroundColor Yellow
5
+
6
+ # Remove binary
7
+ if (Test-Path $installDir) {
8
+ Remove-Item -Recurse -Force $installDir
9
+ Write-Host " Removed $installDir" -ForegroundColor Green
10
+ }
11
+
12
+ # Remove from PATH
13
+ $userPath = [Environment]::GetEnvironmentVariable("Path", "User")
14
+ if ($userPath -like "*$installDir*") {
15
+ $newPath = ($userPath.Split(";") | Where-Object { $_ -ne $installDir }) -join ";"
16
+ [Environment]::SetEnvironmentVariable("Path", $newPath, "User")
17
+ Write-Host " Removed from PATH" -ForegroundColor Green
18
+ }
19
+
20
+ # Ask about data
21
+ $answer = Read-Host " Delete user data (~/.codegpt)? (y/n)"
22
+ if ($answer -eq "y") {
23
+ $dataDir = "$HOME\.codegpt"
24
+ if (Test-Path $dataDir) {
25
+ Remove-Item -Recurse -Force $dataDir
26
+ Write-Host " Removed $dataDir" -ForegroundColor Green
27
+ }
28
+ }
29
+
30
+ Write-Host "Uninstalled." -ForegroundColor Green