codegpt-ai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bot.py ADDED
@@ -0,0 +1,1453 @@
1
+ """CodeGPT Telegram Bot — Full-featured AI assistant via Telegram + Ollama."""
2
+
3
+ import io
4
+ import json
5
+ import logging
6
+ import os
7
+ import re
8
+ import subprocess
9
+ import sys
10
+ import tempfile
11
+ import time
12
+ import traceback
13
+ from collections import defaultdict
14
+ from datetime import datetime, timedelta
15
+ from pathlib import Path
16
+
17
+ import requests
18
+ from telegram import (
19
+ Update, BotCommand, InlineQueryResultArticle, InputTextMessageContent,
20
+ InlineKeyboardButton, InlineKeyboardMarkup,
21
+ )
22
+ from telegram.ext import (
23
+ Application, CommandHandler, MessageHandler, InlineQueryHandler,
24
+ CallbackQueryHandler, filters, ContextTypes,
25
+ )
26
+ from telegram.constants import ChatAction, ParseMode
27
+
28
+ # --- Config ---
29
+
30
+ OLLAMA_URL = "http://localhost:11434/api/chat"
31
+ OLLAMA_GENERATE_URL = "http://localhost:11434/api/generate"
32
+ DEFAULT_MODEL = "llama3.2"
33
+ VISION_MODEL = "llava" # Change to your vision-capable model
34
+ MAX_HISTORY = 20
35
+ BOT_TOKEN = os.environ.get("CODEGPT_BOT_TOKEN", "")
36
+ ADMIN_IDS = [] # Add your Telegram user ID here for /admin access
37
+ ALLOWED_USERS = [] # Empty = open access
38
+
39
+ SYSTEM_PROMPT = """You are an AI modeled after a highly technical, system-focused developer mindset.
40
+
41
+ Communication:
42
+ - Be direct, concise, and dense with information
43
+ - No fluff, no filler, no emojis
44
+ - No motivational or overly friendly tone
45
+ - Give conclusions first, then minimal necessary explanation
46
+
47
+ Thinking:
48
+ - Break problems into systems and components
49
+ - Optimize for efficiency, scalability, and control
50
+ - Focus on practical, real-world solutions
51
+ - Avoid over-explaining basic concepts
52
+
53
+ Behavior:
54
+ - Do not sugar-coat
55
+ - Do not moralize
56
+ - Do not give generic advice
57
+ - If uncertain, say so briefly
58
+ - If incorrect, correct immediately
59
+
60
+ Focus areas:
61
+ - AI, coding, automation, cybersecurity (defensive), system design
62
+
63
+ Style:
64
+ - Structured when useful (lists, steps, architecture)
65
+ - Blunt but intelligent
66
+ - Slightly dark, high-intensity tone is acceptable
67
+
68
+ Goal:
69
+ Deliver high-value, efficient, technically sharp responses with zero wasted words.
70
+
71
+ IMPORTANT: Format responses for Telegram. Use Markdown formatting.
72
+ Keep responses under 4000 characters when possible."""
73
+
74
+ # --- Personas ---
75
+
76
+ PERSONAS = {
77
+ "default": SYSTEM_PROMPT,
78
+ "hacker": (
79
+ "You are a cybersecurity expert and ethical hacker. You speak in technical jargon, "
80
+ "reference CVEs, talk about attack vectors and defense strategies. You're paranoid "
81
+ "about security and see vulnerabilities everywhere. Dark humor about data breaches. "
82
+ "Always ethical — defensive security only. Format for Telegram Markdown."
83
+ ),
84
+ "teacher": (
85
+ "You are a patient programming teacher. You explain concepts step by step, "
86
+ "use analogies, give examples, and check understanding. You encourage questions. "
87
+ "You adapt your explanation level to the student. Format for Telegram Markdown."
88
+ ),
89
+ "roast": (
90
+ "You are a brutally sarcastic code reviewer. You roast bad code mercilessly but "
91
+ "always give the correct solution after. You use dark humor, compare bad code to "
92
+ "disasters, and question life choices. But deep down you care. Format for Telegram Markdown."
93
+ ),
94
+ "architect": (
95
+ "You are a senior system architect. You think in terms of scalability, "
96
+ "distributed systems, microservices, and infrastructure. You draw ASCII diagrams. "
97
+ "You always consider trade-offs. Format for Telegram Markdown."
98
+ ),
99
+ "minimal": (
100
+ "You give the shortest possible answer. One line if possible. No explanation "
101
+ "unless asked. Code only, no commentary. Format for Telegram Markdown."
102
+ ),
103
+ }
104
+
105
+ # --- Daily Tips ---
106
+
107
+ DAILY_TIPS = [
108
+ "Use `git stash` to temporarily save changes without committing.",
109
+ "Python tip: `collections.Counter` counts hashable objects in one line.",
110
+ "Security: Never store passwords in plain text. Use bcrypt or argon2.",
111
+ "Docker tip: Use multi-stage builds to reduce image size by 80%+.",
112
+ "bash: `ctrl+r` does reverse search through command history.",
113
+ "Python: Use `__slots__` in classes to reduce memory usage by 40%.",
114
+ "Networking: `curl -I` shows only HTTP headers — great for debugging.",
115
+ "Git: `git bisect` binary-searches commits to find where a bug was introduced.",
116
+ "Security: Use `nmap -sV` for service version detection on open ports.",
117
+ "Python: `functools.lru_cache` adds memoization with one decorator.",
118
+ "Linux: `htop` > `top`. Install it. Use it. Love it.",
119
+ "API design: Use HTTP status codes correctly. 201 for created, 204 for no content.",
120
+ "Python: `pathlib.Path` > `os.path`. It's cleaner and more Pythonic.",
121
+ "Docker: `docker system prune -a` reclaims disk space from unused images.",
122
+ "Git: `git log --oneline --graph --all` gives you the best commit visualization.",
123
+ "Security: Set `HttpOnly` and `Secure` flags on all session cookies.",
124
+ "Python: `dataclasses` save you from writing __init__, __repr__, __eq__.",
125
+ "bash: `!!` repeats the last command. `sudo !!` runs it as root.",
126
+ "Networking: DNS over HTTPS (DoH) prevents ISP snooping on your queries.",
127
+ "Python: `breakpoint()` drops you into pdb. No imports needed (3.7+).",
128
+ "Linux: `watch -n 1 command` runs a command every second and shows output.",
129
+ "API: Rate limit everything. 429 is your friend, not your enemy.",
130
+ "Git: `git cherry-pick <hash>` applies a single commit to your current branch.",
131
+ "Python: `textwrap.dedent()` cleans up indented multi-line strings.",
132
+ "Security: Use CSP headers to prevent XSS. `Content-Security-Policy: default-src 'self'`.",
133
+ "Docker: `docker compose up -d` starts services detached. `logs -f` to tail.",
134
+ "Python: `sys.getsizeof()` shows memory usage of any object in bytes.",
135
+ "bash: `xargs` converts stdin to arguments. `find . -name '*.py' | xargs wc -l`.",
136
+ "Networking: `ss -tlnp` shows listening TCP ports with process names.",
137
+ "Python: `contextlib.suppress(Error)` is cleaner than try/except/pass.",
138
+ ]
139
+
140
+ # --- Profiles ---
141
+
142
+ PROFILES_DIR = Path.home() / ".codegpt" / "profiles"
143
+ PROFILES_DIR.mkdir(parents=True, exist_ok=True)
144
+
145
+ DEFAULT_PROFILE = {
146
+ "display_name": "",
147
+ "bio": "",
148
+ "model": DEFAULT_MODEL,
149
+ "persona": "default",
150
+ "language": "en",
151
+ "daily_tips": True,
152
+ "code_autorun": False,
153
+ "created": None,
154
+ }
155
+
156
+ LANGUAGES = {
157
+ "en": "English",
158
+ "es": "Spanish",
159
+ "fr": "French",
160
+ "de": "German",
161
+ "pt": "Portuguese",
162
+ "ru": "Russian",
163
+ "ja": "Japanese",
164
+ "zh": "Chinese",
165
+ "ar": "Arabic",
166
+ "hi": "Hindi",
167
+ }
168
+
169
+
170
+ def load_profile(uid):
171
+ """Load user profile from disk, or create default."""
172
+ path = PROFILES_DIR / f"{uid}.json"
173
+ if path.exists():
174
+ try:
175
+ data = json.loads(path.read_text())
176
+ # Merge with defaults for any missing keys
177
+ profile = {**DEFAULT_PROFILE, **data}
178
+ return profile
179
+ except Exception:
180
+ pass
181
+ return {**DEFAULT_PROFILE, "created": datetime.now().isoformat()}
182
+
183
+
184
+ def save_profile(uid, profile):
185
+ """Save user profile to disk."""
186
+ path = PROFILES_DIR / f"{uid}.json"
187
+ path.write_text(json.dumps(profile, indent=2))
188
+
189
+
190
+ def get_profile_field(uid, field):
191
+ """Get a single profile field."""
192
+ return load_profile(uid).get(field, DEFAULT_PROFILE.get(field))
193
+
194
+
195
+ def set_profile_field(uid, field, value):
196
+ """Set a single profile field and save."""
197
+ profile = load_profile(uid)
198
+ profile[field] = value
199
+ save_profile(uid, profile)
200
+
201
+
202
+ # --- State ---
203
+
204
+ user_conversations = defaultdict(list)
205
+ user_models = defaultdict(lambda: DEFAULT_MODEL)
206
+ user_personas = defaultdict(lambda: "default")
207
+ user_stats = defaultdict(lambda: {"messages": 0, "first_seen": None, "last_seen": None})
208
+ rate_limits = defaultdict(list) # user_id -> list of timestamps
209
+
210
+ RATE_LIMIT_WINDOW = 60 # seconds
211
+ RATE_LIMIT_MAX = 15 # max messages per window
212
+
213
+ logging.basicConfig(
214
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
215
+ level=logging.INFO,
216
+ )
217
+ logger = logging.getLogger(__name__)
218
+
219
+
220
+ # --- Helpers ---
221
+
222
+ def ensure_ollama():
223
+ try:
224
+ requests.get("http://localhost:11434/api/tags", timeout=5)
225
+ return True
226
+ except (requests.ConnectionError, requests.Timeout):
227
+ logger.info("Starting Ollama...")
228
+ subprocess.Popen(
229
+ ["ollama", "serve"],
230
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
231
+ creationflags=subprocess.DETACHED_PROCESS if os.name == "nt" else 0,
232
+ )
233
+ for _ in range(30):
234
+ time.sleep(2)
235
+ try:
236
+ requests.get("http://localhost:11434/api/tags", timeout=5)
237
+ return True
238
+ except (requests.ConnectionError, requests.Timeout):
239
+ continue
240
+ return False
241
+
242
+
243
+ def get_available_models():
244
+ try:
245
+ resp = requests.get("http://localhost:11434/api/tags", timeout=3)
246
+ return [m["name"] for m in resp.json().get("models", [])]
247
+ except Exception:
248
+ return []
249
+
250
+
251
+ def is_allowed(user_id):
252
+ if not ALLOWED_USERS:
253
+ return True
254
+ return user_id in ALLOWED_USERS
255
+
256
+
257
+ def is_admin(user_id):
258
+ return user_id in ADMIN_IDS
259
+
260
+
261
+ def check_rate_limit(user_id):
262
+ """Returns True if user is within rate limit."""
263
+ now = time.time()
264
+ # Clean old entries
265
+ rate_limits[user_id] = [t for t in rate_limits[user_id] if now - t < RATE_LIMIT_WINDOW]
266
+ if len(rate_limits[user_id]) >= RATE_LIMIT_MAX:
267
+ return False
268
+ rate_limits[user_id].append(now)
269
+ return True
270
+
271
+
272
+ def track_user(user):
273
+ """Track user stats and sync profile."""
274
+ uid = user.id
275
+ now = datetime.now().isoformat()
276
+ if user_stats[uid]["first_seen"] is None:
277
+ user_stats[uid]["first_seen"] = now
278
+ user_stats[uid]["username"] = getattr(user, "username", None)
279
+ user_stats[uid]["name"] = user.first_name
280
+ # Load saved profile preferences
281
+ profile = load_profile(uid)
282
+ if not profile["display_name"]:
283
+ profile["display_name"] = user.first_name
284
+ profile["created"] = now
285
+ save_profile(uid, profile)
286
+ user_models[uid] = profile["model"]
287
+ user_personas[uid] = profile["persona"]
288
+ user_stats[uid]["last_seen"] = now
289
+ user_stats[uid]["messages"] += 1
290
+
291
+
292
+ def query_ollama(messages, model, system=None):
293
+ """Send messages to Ollama and return response (non-streaming)."""
294
+ sys_prompt = system or SYSTEM_PROMPT
295
+ ollama_messages = [{"role": "system", "content": sys_prompt}]
296
+ ollama_messages.extend(messages)
297
+
298
+ try:
299
+ response = requests.post(
300
+ OLLAMA_URL,
301
+ json={"model": model, "messages": ollama_messages, "stream": False},
302
+ timeout=120,
303
+ )
304
+ response.raise_for_status()
305
+ data = response.json()
306
+
307
+ content = data.get("message", {}).get("content", "No response.")
308
+ ec = data.get("eval_count", 0)
309
+ td = data.get("total_duration", 0)
310
+ ds = td / 1e9 if td else 0
311
+ tps = ec / ds if ds > 0 else 0
312
+ stats = f"\n\n`{ec} tok | {ds:.1f}s | {tps:.0f} tok/s`"
313
+ return content, stats
314
+
315
+ except requests.ConnectionError:
316
+ return "Error: Cannot connect to Ollama.", ""
317
+ except requests.Timeout:
318
+ return "Error: Request timed out.", ""
319
+ except Exception as e:
320
+ return f"Error: {e}", ""
321
+
322
+
323
+ def stream_ollama(messages, model, system=None):
324
+ """Stream response from Ollama, yielding (full_text_so_far, is_done, stats) tuples."""
325
+ sys_prompt = system or SYSTEM_PROMPT
326
+ ollama_messages = [{"role": "system", "content": sys_prompt}]
327
+ ollama_messages.extend(messages)
328
+
329
+ try:
330
+ response = requests.post(
331
+ OLLAMA_URL,
332
+ json={"model": model, "messages": ollama_messages, "stream": True},
333
+ stream=True,
334
+ timeout=120,
335
+ )
336
+ response.raise_for_status()
337
+
338
+ full = []
339
+ for line in response.iter_lines():
340
+ if not line:
341
+ continue
342
+ try:
343
+ chunk = json.loads(line)
344
+ except json.JSONDecodeError:
345
+ continue
346
+ if "message" in chunk and "content" in chunk["message"]:
347
+ full.append(chunk["message"]["content"])
348
+
349
+ if chunk.get("done"):
350
+ ec = chunk.get("eval_count", 0)
351
+ td = chunk.get("total_duration", 0)
352
+ ds = td / 1e9 if td else 0
353
+ tps = ec / ds if ds > 0 else 0
354
+ stats = f"\n\n`{ec} tok | {ds:.1f}s | {tps:.0f} tok/s`"
355
+ yield "".join(full), True, stats
356
+ else:
357
+ yield "".join(full), False, ""
358
+
359
+ except requests.ConnectionError:
360
+ yield "Error: Cannot connect to Ollama.", True, ""
361
+ except requests.Timeout:
362
+ yield "Error: Request timed out.", True, ""
363
+ except Exception as e:
364
+ yield f"Error: {e}", True, ""
365
+
366
+
367
+ def query_ollama_vision(prompt_text, image_bytes, model):
368
+ """Send image + text to Ollama vision model."""
369
+ import base64
370
+ b64 = base64.b64encode(image_bytes).decode("utf-8")
371
+
372
+ try:
373
+ response = requests.post(
374
+ OLLAMA_URL,
375
+ json={
376
+ "model": model,
377
+ "messages": [{
378
+ "role": "user",
379
+ "content": prompt_text or "Describe this image in detail.",
380
+ "images": [b64],
381
+ }],
382
+ "stream": False,
383
+ },
384
+ timeout=120,
385
+ )
386
+ response.raise_for_status()
387
+ data = response.json()
388
+ return data.get("message", {}).get("content", "No response."), ""
389
+ except Exception as e:
390
+ return f"Error: {e}", ""
391
+
392
+
393
+ async def safe_reply(message, text, parse_mode=ParseMode.MARKDOWN):
394
+ """Send reply, falling back to plain text if markdown fails."""
395
+ if len(text) <= 4096:
396
+ try:
397
+ await message.reply_text(text, parse_mode=parse_mode)
398
+ except Exception:
399
+ await message.reply_text(text)
400
+ else:
401
+ chunks = [text[i:i+4000] for i in range(0, len(text), 4000)]
402
+ for chunk in chunks:
403
+ try:
404
+ await message.reply_text(chunk, parse_mode=parse_mode)
405
+ except Exception:
406
+ await message.reply_text(chunk)
407
+
408
+
409
+ def extract_code_blocks(text):
410
+ """Extract Python code blocks from markdown text."""
411
+ pattern = r'```(?:python)?\s*\n(.*?)```'
412
+ matches = re.findall(pattern, text, re.DOTALL)
413
+ return matches
414
+
415
+
416
+ def run_python_code(code, timeout=10):
417
+ """Execute Python code in a subprocess with timeout."""
418
+ try:
419
+ result = subprocess.run(
420
+ [sys.executable, "-c", code],
421
+ capture_output=True, text=True, timeout=timeout,
422
+ cwd=tempfile.gettempdir(),
423
+ )
424
+ output = ""
425
+ if result.stdout:
426
+ output += result.stdout
427
+ if result.stderr:
428
+ output += "\n" + result.stderr
429
+ return output.strip() or "(no output)", result.returncode
430
+ except subprocess.TimeoutExpired:
431
+ return "Error: Code execution timed out (10s limit).", 1
432
+ except Exception as e:
433
+ return f"Error: {e}", 1
434
+
435
+
436
+ # --- Command Handlers ---
437
+
438
+ async def start_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
439
+ if not is_allowed(update.effective_user.id):
440
+ await update.message.reply_text("Access denied.")
441
+ return
442
+
443
+ track_user(update.effective_user)
444
+ user = update.effective_user.first_name
445
+
446
+ keyboard = [
447
+ [
448
+ InlineKeyboardButton("Commands", callback_data="show_help"),
449
+ InlineKeyboardButton("Profile", callback_data="show_profile"),
450
+ ],
451
+ [
452
+ InlineKeyboardButton("Models", callback_data="show_models"),
453
+ InlineKeyboardButton("Personas", callback_data="show_personas"),
454
+ ],
455
+ ]
456
+
457
+ await update.message.reply_text(
458
+ f"*CodeGPT*\n\n"
459
+ f"Hey {user}. Your local AI assistant, on Telegram.\n\n"
460
+ f"*Quick start:*\n"
461
+ f" Send a message to chat\n"
462
+ f" Send a voice note for voice chat\n"
463
+ f" Send a photo for image analysis\n"
464
+ f" Send a file to discuss it\n\n"
465
+ f"Tap a button below or type /help",
466
+ parse_mode=ParseMode.MARKDOWN,
467
+ reply_markup=InlineKeyboardMarkup(keyboard),
468
+ )
469
+
470
+
471
+ async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
472
+ if not is_allowed(update.effective_user.id):
473
+ return
474
+ track_user(update.effective_user)
475
+
476
+ text = (
477
+ "*Commands*\n\n"
478
+ "*Chat*\n"
479
+ "`/new` — New conversation\n"
480
+ "`/run` — Execute last code block\n"
481
+ "`/export` — Export chat as file\n"
482
+ "`/tip` — Random coding tip\n\n"
483
+ "*Profile*\n"
484
+ "`/profile` — View & edit your profile\n"
485
+ "`/setname` — Set display name\n"
486
+ "`/setbio` — Set bio\n"
487
+ "`/setlang` — Set language\n\n"
488
+ "*Settings*\n"
489
+ "`/model` — Switch model\n"
490
+ "`/models` — List models\n"
491
+ "`/persona` — Switch personality\n"
492
+ "`/personas` — List personalities\n"
493
+ "`/stats` — Your stats\n"
494
+ "`/help` — This message\n"
495
+ )
496
+ if is_admin(update.effective_user.id):
497
+ text += "\n*Admin*\n`/admin` — User stats & usage\n"
498
+
499
+ await safe_reply(update.message, text)
500
+
501
+
502
+ async def new_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
503
+ if not is_allowed(update.effective_user.id):
504
+ return
505
+ track_user(update.effective_user)
506
+
507
+ uid = update.effective_user.id
508
+ count = len(user_conversations[uid])
509
+ user_conversations[uid] = []
510
+ await update.message.reply_text(f"Cleared. ({count} messages removed)")
511
+
512
+
513
+ async def model_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
514
+ if not is_allowed(update.effective_user.id):
515
+ return
516
+ track_user(update.effective_user)
517
+
518
+ uid = update.effective_user.id
519
+ if context.args:
520
+ new_model = " ".join(context.args)
521
+ user_models[uid] = new_model
522
+ await safe_reply(update.message, f"Model: `{new_model}`")
523
+ else:
524
+ await safe_reply(update.message, f"Current: `{user_models[uid]}`\nUsage: `/model llama3.2`")
525
+
526
+
527
+ async def models_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
528
+ if not is_allowed(update.effective_user.id):
529
+ return
530
+ track_user(update.effective_user)
531
+
532
+ uid = update.effective_user.id
533
+ models = get_available_models()
534
+ current = user_models[uid]
535
+
536
+ if models:
537
+ lines = []
538
+ for m in models:
539
+ marker = " <" if m == current or m.startswith(current + ":") else ""
540
+ lines.append(f" `{m}`{marker}")
541
+ text = "*Available Models*\n\n" + "\n".join(lines)
542
+ else:
543
+ text = "No models found."
544
+
545
+ await safe_reply(update.message, text)
546
+
547
+
548
+ async def persona_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
549
+ if not is_allowed(update.effective_user.id):
550
+ return
551
+ track_user(update.effective_user)
552
+
553
+ uid = update.effective_user.id
554
+ if context.args:
555
+ name = context.args[0].lower()
556
+ if name in PERSONAS:
557
+ user_personas[uid] = name
558
+ await update.message.reply_text(f"Persona: {name}")
559
+ else:
560
+ available = ", ".join(PERSONAS.keys())
561
+ await update.message.reply_text(f"Unknown persona. Available: {available}")
562
+ else:
563
+ current = user_personas[uid]
564
+ await update.message.reply_text(f"Current: {current}\nUsage: /persona hacker")
565
+
566
+
567
+ async def personas_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
568
+ if not is_allowed(update.effective_user.id):
569
+ return
570
+ track_user(update.effective_user)
571
+
572
+ keyboard = []
573
+ row = []
574
+ for name in PERSONAS:
575
+ row.append(InlineKeyboardButton(name.title(), callback_data=f"persona_{name}"))
576
+ if len(row) == 3:
577
+ keyboard.append(row)
578
+ row = []
579
+ if row:
580
+ keyboard.append(row)
581
+
582
+ uid = update.effective_user.id
583
+ current = user_personas[uid]
584
+
585
+ await update.message.reply_text(
586
+ f"*Personas*\nCurrent: {current}\n\nTap to switch:",
587
+ parse_mode=ParseMode.MARKDOWN,
588
+ reply_markup=InlineKeyboardMarkup(keyboard),
589
+ )
590
+
591
+
592
+ async def run_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
593
+ """Execute the last code block from AI response."""
594
+ if not is_allowed(update.effective_user.id):
595
+ return
596
+ track_user(update.effective_user)
597
+
598
+ uid = update.effective_user.id
599
+ # Find last AI message with code
600
+ for msg in reversed(user_conversations[uid]):
601
+ if msg["role"] == "assistant":
602
+ blocks = extract_code_blocks(msg["content"])
603
+ if blocks:
604
+ code = blocks[-1] # Run the last code block
605
+ await update.message.reply_text(f"Running...\n```python\n{code[:200]}{'...' if len(code) > 200 else ''}\n```", parse_mode=ParseMode.MARKDOWN)
606
+
607
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
608
+ output, returncode = run_python_code(code)
609
+
610
+ status = "OK" if returncode == 0 else "FAIL"
611
+ result = f"*Output* ({status}):\n```\n{output[:3000]}\n```"
612
+ await safe_reply(update.message, result)
613
+ return
614
+
615
+ await update.message.reply_text("No code blocks found in recent messages.")
616
+
617
+
618
+ async def export_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
619
+ """Export conversation as a text file."""
620
+ if not is_allowed(update.effective_user.id):
621
+ return
622
+ track_user(update.effective_user)
623
+
624
+ uid = update.effective_user.id
625
+ messages = user_conversations[uid]
626
+
627
+ if not messages:
628
+ await update.message.reply_text("No messages to export.")
629
+ return
630
+
631
+ # Build text file
632
+ lines = [f"CodeGPT Chat Export — {datetime.now().strftime('%Y-%m-%d %H:%M')}\n"]
633
+ lines.append(f"Model: {user_models[uid]}")
634
+ lines.append(f"Persona: {user_personas[uid]}")
635
+ lines.append(f"Messages: {len(messages)}\n")
636
+ lines.append("=" * 60 + "\n")
637
+
638
+ for msg in messages:
639
+ role = "YOU" if msg["role"] == "user" else "AI"
640
+ lines.append(f"[{role}]")
641
+ lines.append(msg["content"])
642
+ lines.append("")
643
+
644
+ content = "\n".join(lines)
645
+ buf = io.BytesIO(content.encode("utf-8"))
646
+ buf.name = f"codegpt_chat_{datetime.now().strftime('%Y%m%d_%H%M')}.txt"
647
+
648
+ await update.message.reply_document(document=buf, caption="Chat exported.")
649
+
650
+
651
+ async def tip_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
652
+ """Send a random coding tip."""
653
+ if not is_allowed(update.effective_user.id):
654
+ return
655
+ track_user(update.effective_user)
656
+
657
+ import random
658
+ tip = random.choice(DAILY_TIPS)
659
+ await update.message.reply_text(f"*Tip of the moment:*\n\n{tip}", parse_mode=ParseMode.MARKDOWN)
660
+
661
+
662
+ async def stats_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
663
+ if not is_allowed(update.effective_user.id):
664
+ return
665
+ track_user(update.effective_user)
666
+
667
+ uid = update.effective_user.id
668
+ s = user_stats[uid]
669
+ model = user_models[uid]
670
+ persona = user_personas[uid]
671
+ history = len(user_conversations[uid])
672
+
673
+ await safe_reply(update.message,
674
+ f"*Your Stats*\n\n"
675
+ f"Model: `{model}`\n"
676
+ f"Persona: `{persona}`\n"
677
+ f"Messages sent: {s['messages']}\n"
678
+ f"History: {history}/{MAX_HISTORY}\n"
679
+ f"First seen: {s['first_seen'][:16] if s['first_seen'] else 'now'}\n"
680
+ f"Rate limit: {RATE_LIMIT_MAX}/{RATE_LIMIT_WINDOW}s"
681
+ )
682
+
683
+
684
+ async def admin_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
685
+ """Admin panel — usage stats."""
686
+ if not is_admin(update.effective_user.id):
687
+ await update.message.reply_text("Admin only.")
688
+ return
689
+
690
+ total_users = len(user_stats)
691
+ total_messages = sum(s["messages"] for s in user_stats.values())
692
+ active_convos = sum(1 for c in user_conversations.values() if c)
693
+
694
+ text = (
695
+ f"*Admin Panel*\n\n"
696
+ f"Total users: {total_users}\n"
697
+ f"Total messages: {total_messages}\n"
698
+ f"Active conversations: {active_convos}\n\n"
699
+ )
700
+
701
+ # Top users
702
+ if user_stats:
703
+ text += "*Top Users:*\n"
704
+ sorted_users = sorted(user_stats.items(), key=lambda x: x[1]["messages"], reverse=True)
705
+ for uid, s in sorted_users[:10]:
706
+ name = s.get("name", "Unknown")
707
+ username = s.get("username", "")
708
+ uname = f" (@{username})" if username else ""
709
+ text += f" {name}{uname}: {s['messages']} msgs\n"
710
+
711
+ # Models in use
712
+ if user_models:
713
+ model_counts = defaultdict(int)
714
+ for m in user_models.values():
715
+ model_counts[m] += 1
716
+ text += "\n*Models in use:*\n"
717
+ for m, count in model_counts.items():
718
+ text += f" `{m}`: {count} users\n"
719
+
720
+ await safe_reply(update.message, text)
721
+
722
+
723
+ # --- Profile ---
724
+
725
+ async def profile_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
726
+ """Show user profile card with edit buttons."""
727
+ if not is_allowed(update.effective_user.id):
728
+ return
729
+ track_user(update.effective_user)
730
+
731
+ uid = update.effective_user.id
732
+ profile = load_profile(uid)
733
+ s = user_stats[uid]
734
+ username = getattr(update.effective_user, "username", None)
735
+
736
+ # Profile card
737
+ name = profile["display_name"] or update.effective_user.first_name
738
+ bio = profile["bio"] or "No bio set"
739
+ model = profile["model"]
740
+ persona = profile["persona"]
741
+ lang = LANGUAGES.get(profile["language"], profile["language"])
742
+ tips = "On" if profile["daily_tips"] else "Off"
743
+ autorun = "On" if profile["code_autorun"] else "Off"
744
+ msgs = s["messages"]
745
+ since = profile["created"][:10] if profile.get("created") else "today"
746
+
747
+ text = (
748
+ f"*Your Profile*\n"
749
+ f"━━━━━━━━━━━━━━━━━━━\n\n"
750
+ f"*Name:* {name}\n"
751
+ f"{'@' + username if username else ''}\n"
752
+ f"*Bio:* {bio}\n\n"
753
+ f"*Model:* `{model}`\n"
754
+ f"*Persona:* {persona}\n"
755
+ f"*Language:* {lang}\n"
756
+ f"*Daily tips:* {tips}\n"
757
+ f"*Code autorun:* {autorun}\n\n"
758
+ f"*Messages:* {msgs}\n"
759
+ f"*Member since:* {since}\n"
760
+ )
761
+
762
+ keyboard = [
763
+ [
764
+ InlineKeyboardButton("Edit Name", callback_data="profile_edit_name"),
765
+ InlineKeyboardButton("Edit Bio", callback_data="profile_edit_bio"),
766
+ ],
767
+ [
768
+ InlineKeyboardButton("Model", callback_data="profile_pick_model"),
769
+ InlineKeyboardButton("Persona", callback_data="profile_pick_persona"),
770
+ ],
771
+ [
772
+ InlineKeyboardButton("Language", callback_data="profile_pick_lang"),
773
+ InlineKeyboardButton("Tips: " + tips, callback_data="profile_toggle_tips"),
774
+ ],
775
+ [
776
+ InlineKeyboardButton("Autorun: " + autorun, callback_data="profile_toggle_autorun"),
777
+ InlineKeyboardButton("Reset Profile", callback_data="profile_reset"),
778
+ ],
779
+ ]
780
+
781
+ await update.message.reply_text(
782
+ text,
783
+ parse_mode=ParseMode.MARKDOWN,
784
+ reply_markup=InlineKeyboardMarkup(keyboard),
785
+ )
786
+
787
+
788
+ async def setname_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
789
+ """Quick set display name: /setname <name>"""
790
+ if not is_allowed(update.effective_user.id):
791
+ return
792
+ uid = update.effective_user.id
793
+ if context.args:
794
+ name = " ".join(context.args)[:30]
795
+ set_profile_field(uid, "display_name", name)
796
+ await update.message.reply_text(f"Name set: *{name}*", parse_mode=ParseMode.MARKDOWN)
797
+ else:
798
+ await update.message.reply_text("Usage: `/setname Your Name`", parse_mode=ParseMode.MARKDOWN)
799
+
800
+
801
+ async def setbio_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
802
+ """Quick set bio: /setbio <text>"""
803
+ if not is_allowed(update.effective_user.id):
804
+ return
805
+ uid = update.effective_user.id
806
+ if context.args:
807
+ bio = " ".join(context.args)[:160]
808
+ set_profile_field(uid, "bio", bio)
809
+ await update.message.reply_text(f"Bio set: _{bio}_", parse_mode=ParseMode.MARKDOWN)
810
+ else:
811
+ await update.message.reply_text("Usage: `/setbio I build things`", parse_mode=ParseMode.MARKDOWN)
812
+
813
+
814
+ async def setlang_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
815
+ """Quick set language: /setlang <code>"""
816
+ if not is_allowed(update.effective_user.id):
817
+ return
818
+ uid = update.effective_user.id
819
+ if context.args:
820
+ code = context.args[0].lower()
821
+ if code in LANGUAGES:
822
+ set_profile_field(uid, "language", code)
823
+ # Update system prompt to include language preference
824
+ await update.message.reply_text(f"Language: {LANGUAGES[code]}")
825
+ else:
826
+ langs = ", ".join(f"`{k}`={v}" for k, v in LANGUAGES.items())
827
+ await update.message.reply_text(f"Available:\n{langs}", parse_mode=ParseMode.MARKDOWN)
828
+ else:
829
+ await update.message.reply_text("Usage: `/setlang en`", parse_mode=ParseMode.MARKDOWN)
830
+
831
+
832
+ # --- Message Handlers ---
833
+
834
+ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
835
+ """Handle regular text messages with live streaming updates."""
836
+ if not is_allowed(update.effective_user.id):
837
+ await update.message.reply_text("Access denied.")
838
+ return
839
+
840
+ uid = update.effective_user.id
841
+ track_user(update.effective_user)
842
+
843
+ if not check_rate_limit(uid):
844
+ await update.message.reply_text(f"Slow down. Max {RATE_LIMIT_MAX} messages per {RATE_LIMIT_WINDOW}s.")
845
+ return
846
+
847
+ user_text = update.message.text
848
+ if not user_text:
849
+ return
850
+
851
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
852
+
853
+ user_conversations[uid].append({"role": "user", "content": user_text})
854
+ if len(user_conversations[uid]) > MAX_HISTORY:
855
+ user_conversations[uid] = user_conversations[uid][-MAX_HISTORY:]
856
+
857
+ model = user_models[uid]
858
+ persona = user_personas[uid]
859
+ system = PERSONAS.get(persona, SYSTEM_PROMPT)
860
+
861
+ # Send initial "thinking" message
862
+ live_msg = await update.message.reply_text("Thinking...")
863
+
864
+ # Stream with live edits
865
+ last_edit = 0
866
+ last_text = ""
867
+ response_text = ""
868
+ stats = ""
869
+ edit_interval = 1.0 # Edit at most once per second (Telegram rate limit)
870
+ token_count = 0
871
+
872
+ for text_so_far, is_done, chunk_stats in stream_ollama(user_conversations[uid], model, system):
873
+ response_text = text_so_far
874
+ token_count = len(text_so_far.split())
875
+ now = time.time()
876
+
877
+ if is_done:
878
+ stats = chunk_stats
879
+ break
880
+
881
+ # Update message periodically
882
+ if now - last_edit >= edit_interval and text_so_far != last_text:
883
+ display = text_so_far + f"\n\n_streaming... {token_count} words_"
884
+ if len(display) > 4096:
885
+ display = display[-4000:]
886
+ try:
887
+ await live_msg.edit_text(display, parse_mode=ParseMode.MARKDOWN)
888
+ except Exception:
889
+ try:
890
+ await live_msg.edit_text(display)
891
+ except Exception:
892
+ pass
893
+ last_edit = now
894
+ last_text = text_so_far
895
+
896
+ # Final edit with complete response
897
+ # Only append to history if we got a real response (not an error string)
898
+ if response_text and not response_text.startswith("Error:"):
899
+ user_conversations[uid].append({"role": "assistant", "content": response_text})
900
+ if len(user_conversations[uid]) > MAX_HISTORY:
901
+ user_conversations[uid] = user_conversations[uid][-MAX_HISTORY:]
902
+ else:
903
+ # Remove the user message we already appended since request failed
904
+ if user_conversations[uid] and user_conversations[uid][-1]["role"] == "user":
905
+ user_conversations[uid].pop()
906
+
907
+ # Check for code blocks
908
+ code_blocks = extract_code_blocks(response_text)
909
+ reply_markup = None
910
+ if code_blocks:
911
+ reply_markup = InlineKeyboardMarkup([[
912
+ InlineKeyboardButton("Run Code", callback_data="run_code"),
913
+ InlineKeyboardButton("Copy", callback_data="copy_code"),
914
+ ]])
915
+
916
+ full = response_text + stats
917
+ if len(full) <= 4096:
918
+ try:
919
+ await live_msg.edit_text(full, parse_mode=ParseMode.MARKDOWN, reply_markup=reply_markup)
920
+ except Exception:
921
+ try:
922
+ await live_msg.edit_text(full, reply_markup=reply_markup)
923
+ except Exception:
924
+ pass
925
+ else:
926
+ # For long responses, edit with truncated version + send rest as new messages
927
+ try:
928
+ await live_msg.edit_text(full[:4000] + "\n\n_...continued below_", parse_mode=ParseMode.MARKDOWN)
929
+ except Exception:
930
+ await live_msg.edit_text(full[:4000])
931
+
932
+ remaining = full[4000:]
933
+ chunks = [remaining[i:i+4000] for i in range(0, len(remaining), 4000)]
934
+ for i, chunk in enumerate(chunks):
935
+ rm = reply_markup if i == len(chunks) - 1 else None
936
+ try:
937
+ await update.message.reply_text(chunk, parse_mode=ParseMode.MARKDOWN, reply_markup=rm)
938
+ except Exception:
939
+ await update.message.reply_text(chunk, reply_markup=rm)
940
+
941
+
942
+ async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
943
+ """Handle voice messages — transcribe with Whisper then respond."""
944
+ if not is_allowed(update.effective_user.id):
945
+ return
946
+
947
+ uid = update.effective_user.id
948
+ track_user(update.effective_user)
949
+
950
+ if not check_rate_limit(uid):
951
+ await update.message.reply_text(f"Slow down. Rate limited.")
952
+ return
953
+
954
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
955
+
956
+ # Download voice file
957
+ voice = update.message.voice or update.message.audio
958
+ if not voice:
959
+ return
960
+
961
+ file = await context.bot.get_file(voice.file_id)
962
+
963
+ with tempfile.TemporaryDirectory() as tmpdir:
964
+ ogg_path = os.path.join(tmpdir, "voice.ogg")
965
+ wav_path = os.path.join(tmpdir, "voice.wav")
966
+
967
+ await file.download_to_drive(ogg_path)
968
+
969
+ # Convert OGG to WAV with ffmpeg
970
+ try:
971
+ subprocess.run(
972
+ ["ffmpeg", "-i", ogg_path, "-ar", "16000", "-ac", "1", wav_path],
973
+ capture_output=True, timeout=15,
974
+ )
975
+ except FileNotFoundError:
976
+ await update.message.reply_text(
977
+ "Voice not supported: ffmpeg not installed.\n"
978
+ "Install: `winget install ffmpeg` or `choco install ffmpeg`",
979
+ parse_mode=ParseMode.MARKDOWN,
980
+ )
981
+ return
982
+ except Exception as e:
983
+ await update.message.reply_text(f"Audio conversion failed: {e}")
984
+ return
985
+
986
+ if not os.path.exists(wav_path):
987
+ await update.message.reply_text("Audio conversion failed.")
988
+ return
989
+
990
+ # Transcribe with speech_recognition
991
+ try:
992
+ import speech_recognition as sr
993
+ recognizer = sr.Recognizer()
994
+ with sr.AudioFile(wav_path) as source:
995
+ audio_data = recognizer.record(source)
996
+ transcript = recognizer.recognize_google(audio_data)
997
+ except ImportError:
998
+ await update.message.reply_text(
999
+ "Voice not supported: install `SpeechRecognition`\n"
1000
+ "`pip install SpeechRecognition`",
1001
+ parse_mode=ParseMode.MARKDOWN,
1002
+ )
1003
+ return
1004
+ except sr.UnknownValueError:
1005
+ await update.message.reply_text("Could not understand the audio.")
1006
+ return
1007
+ except Exception as e:
1008
+ await update.message.reply_text(f"Transcription failed: {e}")
1009
+ return
1010
+
1011
+ # Show what was heard
1012
+ await update.message.reply_text(f"_Heard:_ {transcript}", parse_mode=ParseMode.MARKDOWN)
1013
+
1014
+ # Process as regular message
1015
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
1016
+
1017
+ user_conversations[uid].append({"role": "user", "content": transcript})
1018
+ if len(user_conversations[uid]) > MAX_HISTORY:
1019
+ user_conversations[uid] = user_conversations[uid][-MAX_HISTORY:]
1020
+
1021
+ model = user_models[uid]
1022
+ persona = user_personas[uid]
1023
+ system = PERSONAS.get(persona, SYSTEM_PROMPT)
1024
+
1025
+ response_text, stats = query_ollama(user_conversations[uid], model, system)
1026
+ if not response_text.startswith("Error:"):
1027
+ user_conversations[uid].append({"role": "assistant", "content": response_text})
1028
+
1029
+ await safe_reply(update.message, response_text + stats)
1030
+
1031
+
1032
+ async def handle_photo(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
1033
+ """Handle photos — analyze with vision model."""
1034
+ if not is_allowed(update.effective_user.id):
1035
+ return
1036
+
1037
+ uid = update.effective_user.id
1038
+ track_user(update.effective_user)
1039
+
1040
+ if not check_rate_limit(uid):
1041
+ await update.message.reply_text("Rate limited.")
1042
+ return
1043
+
1044
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
1045
+
1046
+ # Get the largest photo
1047
+ photo = update.message.photo[-1]
1048
+ file = await context.bot.get_file(photo.file_id)
1049
+
1050
+ # Download to bytes
1051
+ photo_bytes = await file.download_as_bytearray()
1052
+
1053
+ caption = update.message.caption or "Describe this image in detail. If it contains code, analyze it."
1054
+
1055
+ await update.message.reply_text(f"_Analyzing image with {VISION_MODEL}..._", parse_mode=ParseMode.MARKDOWN)
1056
+
1057
+ response_text, stats = query_ollama_vision(caption, bytes(photo_bytes), VISION_MODEL)
1058
+
1059
+ # Add to conversation as text
1060
+ user_conversations[uid].append({"role": "user", "content": f"[Image sent] {caption}"})
1061
+ user_conversations[uid].append({"role": "assistant", "content": response_text})
1062
+
1063
+ await safe_reply(update.message, response_text + stats)
1064
+
1065
+
1066
+ async def handle_document(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
1067
+ """Handle file uploads — read and discuss."""
1068
+ if not is_allowed(update.effective_user.id):
1069
+ return
1070
+
1071
+ uid = update.effective_user.id
1072
+ track_user(update.effective_user)
1073
+
1074
+ if not check_rate_limit(uid):
1075
+ await update.message.reply_text("Rate limited.")
1076
+ return
1077
+
1078
+ doc = update.message.document
1079
+ if not doc:
1080
+ return
1081
+
1082
+ # Size limit: 1MB
1083
+ if doc.file_size and doc.file_size > 1_000_000:
1084
+ await update.message.reply_text("File too large. Max 1MB.")
1085
+ return
1086
+
1087
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
1088
+
1089
+ file = await context.bot.get_file(doc.file_id)
1090
+ file_bytes = await file.download_as_bytearray()
1091
+
1092
+ # Try to decode as text
1093
+ try:
1094
+ content = bytes(file_bytes).decode("utf-8")
1095
+ except UnicodeDecodeError:
1096
+ await update.message.reply_text("Cannot read file. Only text/code files are supported.")
1097
+ return
1098
+
1099
+ # Truncate if too long
1100
+ if len(content) > 8000:
1101
+ content = content[:8000] + "\n\n... (truncated)"
1102
+
1103
+ caption = update.message.caption or "Analyze this file."
1104
+ prompt = f"File: {doc.file_name}\n\n```\n{content}\n```\n\n{caption}"
1105
+
1106
+ user_conversations[uid].append({"role": "user", "content": prompt})
1107
+ if len(user_conversations[uid]) > MAX_HISTORY:
1108
+ user_conversations[uid] = user_conversations[uid][-MAX_HISTORY:]
1109
+
1110
+ model = user_models[uid]
1111
+ persona = user_personas[uid]
1112
+ system = PERSONAS.get(persona, SYSTEM_PROMPT)
1113
+
1114
+ response_text, stats = query_ollama(user_conversations[uid], model, system)
1115
+ user_conversations[uid].append({"role": "assistant", "content": response_text})
1116
+
1117
+ await safe_reply(update.message, response_text + stats)
1118
+
1119
+
1120
+ # --- Inline Mode ---
1121
+
1122
+ async def handle_inline(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
1123
+ """Handle inline queries — @botname <query> in any chat."""
1124
+ query = update.inline_query.query.strip()
1125
+ if not query:
1126
+ return
1127
+
1128
+ uid = update.inline_query.from_user.id
1129
+ if not is_allowed(uid):
1130
+ return
1131
+
1132
+ # Quick query — no conversation history
1133
+ model = user_models[uid]
1134
+ response_text, _ = query_ollama(
1135
+ [{"role": "user", "content": query}],
1136
+ model,
1137
+ "Be extremely concise. Max 200 words. Format for Telegram.",
1138
+ )
1139
+
1140
+ results = [
1141
+ InlineQueryResultArticle(
1142
+ id="1",
1143
+ title=f"CodeGPT: {query[:50]}",
1144
+ description=response_text[:100],
1145
+ input_message_content=InputTextMessageContent(
1146
+ f"*Q:* {query}\n\n*A:* {response_text}",
1147
+ parse_mode=ParseMode.MARKDOWN,
1148
+ ),
1149
+ )
1150
+ ]
1151
+
1152
+ await update.inline_query.answer(results, cache_time=30)
1153
+
1154
+
1155
+ # --- Callback Handlers ---
1156
+
1157
+ async def handle_callback(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
1158
+ """Handle inline keyboard button presses."""
1159
+ query = update.callback_query
1160
+ await query.answer()
1161
+
1162
+ uid = query.from_user.id
1163
+ data = query.data
1164
+
1165
+ if data == "show_help":
1166
+ text = (
1167
+ "*Commands*\n\n"
1168
+ "/new — New conversation\n"
1169
+ "/model — Switch model\n"
1170
+ "/persona — Switch personality\n"
1171
+ "/run — Execute code block\n"
1172
+ "/export — Export chat\n"
1173
+ "/tip — Coding tip\n"
1174
+ "/stats — Your stats"
1175
+ )
1176
+ await query.edit_message_text(text, parse_mode=ParseMode.MARKDOWN)
1177
+
1178
+ elif data == "show_profile":
1179
+ profile = load_profile(uid)
1180
+ name = profile["display_name"] or "Not set"
1181
+ bio = profile["bio"] or "Not set"
1182
+ model = profile["model"]
1183
+ persona = profile["persona"]
1184
+ lang = LANGUAGES.get(profile["language"], profile["language"])
1185
+ text = (
1186
+ f"*Your Profile*\n\n"
1187
+ f"*Name:* {name}\n"
1188
+ f"*Bio:* {bio}\n"
1189
+ f"*Model:* `{model}`\n"
1190
+ f"*Persona:* {persona}\n"
1191
+ f"*Language:* {lang}\n\n"
1192
+ f"Type /profile for full view with edit buttons."
1193
+ )
1194
+ await query.edit_message_text(text, parse_mode=ParseMode.MARKDOWN)
1195
+
1196
+ elif data == "show_models":
1197
+ models = get_available_models()
1198
+ current = user_models[uid]
1199
+ if models:
1200
+ lines = [f" {'>' if m.startswith(current) else ' '} {m}" for m in models]
1201
+ text = "*Models*\n\n" + "\n".join(lines)
1202
+ else:
1203
+ text = "No models found."
1204
+ await query.edit_message_text(text, parse_mode=ParseMode.MARKDOWN)
1205
+
1206
+ elif data == "show_personas":
1207
+ current = user_personas[uid]
1208
+ lines = [f" {'>' if k == current else ' '} {k}" for k in PERSONAS]
1209
+ text = "*Personas*\n\n" + "\n".join(lines) + "\n\nUse: /persona <name>"
1210
+ await query.edit_message_text(text, parse_mode=ParseMode.MARKDOWN)
1211
+
1212
+ elif data.startswith("persona_"):
1213
+ name = data[8:]
1214
+ if name in PERSONAS:
1215
+ user_personas[uid] = name
1216
+ await query.edit_message_text(f"Persona switched to: *{name}*", parse_mode=ParseMode.MARKDOWN)
1217
+
1218
+ elif data == "run_code":
1219
+ # Find last code block in conversation
1220
+ for msg in reversed(user_conversations[uid]):
1221
+ if msg["role"] == "assistant":
1222
+ blocks = extract_code_blocks(msg["content"])
1223
+ if blocks:
1224
+ code = blocks[-1]
1225
+ output, rc = run_python_code(code)
1226
+ status = "OK" if rc == 0 else "FAIL"
1227
+ result = f"*Output* ({status}):\n```\n{output[:3000]}\n```"
1228
+ try:
1229
+ await query.message.reply_text(result, parse_mode=ParseMode.MARKDOWN)
1230
+ except Exception:
1231
+ await query.message.reply_text(f"Output ({status}):\n{output[:3000]}")
1232
+ return
1233
+ await query.message.reply_text("No code blocks found.")
1234
+
1235
+ elif data == "copy_code":
1236
+ for msg in reversed(user_conversations[uid]):
1237
+ if msg["role"] == "assistant":
1238
+ blocks = extract_code_blocks(msg["content"])
1239
+ if blocks:
1240
+ code = blocks[-1]
1241
+ await query.message.reply_text(f"```python\n{code}\n```", parse_mode=ParseMode.MARKDOWN)
1242
+ return
1243
+ await query.message.reply_text("No code blocks found.")
1244
+
1245
+ # --- Profile callbacks ---
1246
+
1247
+ elif data == "profile_edit_name":
1248
+ await query.edit_message_text(
1249
+ "Send your new name with:\n`/setname Your Name`",
1250
+ parse_mode=ParseMode.MARKDOWN,
1251
+ )
1252
+
1253
+ elif data == "profile_edit_bio":
1254
+ await query.edit_message_text(
1255
+ "Send your bio with:\n`/setbio I build cool stuff`",
1256
+ parse_mode=ParseMode.MARKDOWN,
1257
+ )
1258
+
1259
+ elif data == "profile_pick_model":
1260
+ models = get_available_models()
1261
+ if models:
1262
+ keyboard = []
1263
+ row = []
1264
+ for m in models[:12]:
1265
+ short = m.split(":")[0] if ":" in m else m
1266
+ row.append(InlineKeyboardButton(short, callback_data=f"pmodel_{m}"))
1267
+ if len(row) == 2:
1268
+ keyboard.append(row)
1269
+ row = []
1270
+ if row:
1271
+ keyboard.append(row)
1272
+ await query.edit_message_text(
1273
+ "*Pick a model:*",
1274
+ parse_mode=ParseMode.MARKDOWN,
1275
+ reply_markup=InlineKeyboardMarkup(keyboard),
1276
+ )
1277
+ else:
1278
+ await query.edit_message_text("No models found.")
1279
+
1280
+ elif data.startswith("pmodel_"):
1281
+ model = data[7:]
1282
+ user_models[uid] = model
1283
+ set_profile_field(uid, "model", model)
1284
+ await query.edit_message_text(f"Model set: `{model}`\n\nType /profile to see your profile.", parse_mode=ParseMode.MARKDOWN)
1285
+
1286
+ elif data == "profile_pick_persona":
1287
+ keyboard = []
1288
+ row = []
1289
+ for name in PERSONAS:
1290
+ row.append(InlineKeyboardButton(name.title(), callback_data=f"ppersona_{name}"))
1291
+ if len(row) == 3:
1292
+ keyboard.append(row)
1293
+ row = []
1294
+ if row:
1295
+ keyboard.append(row)
1296
+ await query.edit_message_text(
1297
+ "*Pick a persona:*",
1298
+ parse_mode=ParseMode.MARKDOWN,
1299
+ reply_markup=InlineKeyboardMarkup(keyboard),
1300
+ )
1301
+
1302
+ elif data.startswith("ppersona_"):
1303
+ name = data[9:]
1304
+ if name in PERSONAS:
1305
+ user_personas[uid] = name
1306
+ set_profile_field(uid, "persona", name)
1307
+ await query.edit_message_text(f"Persona: *{name}*\n\nType /profile to see your profile.", parse_mode=ParseMode.MARKDOWN)
1308
+
1309
+ elif data == "profile_pick_lang":
1310
+ keyboard = []
1311
+ row = []
1312
+ for code, lang_name in LANGUAGES.items():
1313
+ row.append(InlineKeyboardButton(f"{lang_name}", callback_data=f"plang_{code}"))
1314
+ if len(row) == 3:
1315
+ keyboard.append(row)
1316
+ row = []
1317
+ if row:
1318
+ keyboard.append(row)
1319
+ await query.edit_message_text(
1320
+ "*Pick a language:*",
1321
+ parse_mode=ParseMode.MARKDOWN,
1322
+ reply_markup=InlineKeyboardMarkup(keyboard),
1323
+ )
1324
+
1325
+ elif data.startswith("plang_"):
1326
+ code = data[6:]
1327
+ if code in LANGUAGES:
1328
+ set_profile_field(uid, "language", code)
1329
+ await query.edit_message_text(f"Language: {LANGUAGES[code]}\n\nType /profile to see your profile.", parse_mode=ParseMode.MARKDOWN)
1330
+
1331
+ elif data == "profile_toggle_tips":
1332
+ current = get_profile_field(uid, "daily_tips")
1333
+ set_profile_field(uid, "daily_tips", not current)
1334
+ state = "On" if not current else "Off"
1335
+ await query.edit_message_text(f"Daily tips: *{state}*\n\nType /profile to see your profile.", parse_mode=ParseMode.MARKDOWN)
1336
+
1337
+ elif data == "profile_toggle_autorun":
1338
+ current = get_profile_field(uid, "code_autorun")
1339
+ set_profile_field(uid, "code_autorun", not current)
1340
+ state = "On" if not current else "Off"
1341
+ await query.edit_message_text(f"Code autorun: *{state}*\n\nType /profile to see your profile.", parse_mode=ParseMode.MARKDOWN)
1342
+
1343
+ elif data == "profile_reset":
1344
+ profile = {**DEFAULT_PROFILE, "display_name": query.from_user.first_name, "created": datetime.now().isoformat()}
1345
+ save_profile(uid, profile)
1346
+ user_models[uid] = DEFAULT_MODEL
1347
+ user_personas[uid] = "default"
1348
+ await query.edit_message_text("Profile reset to defaults.\n\nType /profile to see your profile.", parse_mode=ParseMode.MARKDOWN)
1349
+
1350
+
1351
+ # --- Scheduled Tasks ---
1352
+
1353
+ async def send_daily_tip(context: ContextTypes.DEFAULT_TYPE) -> None:
1354
+ """Send daily tip to users who have tips enabled."""
1355
+ import random
1356
+ tip = random.choice(DAILY_TIPS)
1357
+ for uid, stats in user_stats.items():
1358
+ if stats.get("last_seen"):
1359
+ # Check profile — only send if tips enabled
1360
+ if not get_profile_field(uid, "daily_tips"):
1361
+ continue
1362
+ try:
1363
+ await context.bot.send_message(
1364
+ chat_id=uid,
1365
+ text=f"*Daily Tip*\n\n{tip}",
1366
+ parse_mode=ParseMode.MARKDOWN,
1367
+ )
1368
+ except Exception:
1369
+ pass # User may have blocked the bot
1370
+
1371
+
1372
+ # --- Error Handler ---
1373
+
1374
+ async def error_handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
1375
+ logger.error(f"Error: {context.error}")
1376
+ if update and update.effective_message:
1377
+ await update.effective_message.reply_text("Something went wrong. Try again.")
1378
+
1379
+
1380
+ # --- Main ---
1381
+
1382
+ def main():
1383
+ if not BOT_TOKEN:
1384
+ print("Error: No bot token.")
1385
+ print(" Set: export CODEGPT_BOT_TOKEN='your-token'")
1386
+ print(" Get token from @BotFather on Telegram.")
1387
+ return
1388
+
1389
+ if not ensure_ollama():
1390
+ print("Error: Could not start Ollama.")
1391
+ return
1392
+
1393
+ print("=" * 50)
1394
+ print(" CodeGPT Telegram Bot")
1395
+ print("=" * 50)
1396
+ print(f" Model: {DEFAULT_MODEL}")
1397
+ print(f" Vision: {VISION_MODEL}")
1398
+ print(f" Ollama: {OLLAMA_URL}")
1399
+ print(f" Access: {'restricted' if ALLOWED_USERS else 'open'}")
1400
+ print(f" Admins: {ADMIN_IDS or 'none'}")
1401
+ print(f" Rate: {RATE_LIMIT_MAX} msgs / {RATE_LIMIT_WINDOW}s")
1402
+ print("=" * 50)
1403
+ print(" Bot running. Ctrl+C to stop.\n")
1404
+
1405
+ app = Application.builder().token(BOT_TOKEN).build()
1406
+
1407
+ # Commands
1408
+ app.add_handler(CommandHandler("start", start_command))
1409
+ app.add_handler(CommandHandler("help", help_command))
1410
+ app.add_handler(CommandHandler("new", new_command))
1411
+ app.add_handler(CommandHandler("model", model_command))
1412
+ app.add_handler(CommandHandler("models", models_command))
1413
+ app.add_handler(CommandHandler("persona", persona_command))
1414
+ app.add_handler(CommandHandler("personas", personas_command))
1415
+ app.add_handler(CommandHandler("run", run_command))
1416
+ app.add_handler(CommandHandler("export", export_command))
1417
+ app.add_handler(CommandHandler("tip", tip_command))
1418
+ app.add_handler(CommandHandler("stats", stats_command))
1419
+ app.add_handler(CommandHandler("admin", admin_command))
1420
+ app.add_handler(CommandHandler("profile", profile_command))
1421
+ app.add_handler(CommandHandler("setname", setname_command))
1422
+ app.add_handler(CommandHandler("setbio", setbio_command))
1423
+ app.add_handler(CommandHandler("setlang", setlang_command))
1424
+
1425
+ # Message types
1426
+ app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
1427
+ app.add_handler(MessageHandler(filters.VOICE | filters.AUDIO, handle_voice))
1428
+ app.add_handler(MessageHandler(filters.PHOTO, handle_photo))
1429
+ app.add_handler(MessageHandler(filters.Document.ALL, handle_document))
1430
+
1431
+ # Inline mode
1432
+ app.add_handler(InlineQueryHandler(handle_inline))
1433
+
1434
+ # Callbacks (button presses)
1435
+ app.add_handler(CallbackQueryHandler(handle_callback))
1436
+
1437
+ # Error handler
1438
+ app.add_error_handler(error_handler)
1439
+
1440
+ # Daily tip at 9:00 AM
1441
+ job_queue = app.job_queue
1442
+ if job_queue:
1443
+ job_queue.run_daily(
1444
+ send_daily_tip,
1445
+ time=datetime.strptime("09:00", "%H:%M").time(),
1446
+ name="daily_tip",
1447
+ )
1448
+
1449
+ app.run_polling(allowed_updates=Update.ALL_TYPES)
1450
+
1451
+
1452
+ if __name__ == "__main__":
1453
+ main()