superbrain-server 1.0.2-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/superbrain.js +196 -0
- package/package.json +23 -0
- package/payload/.dockerignore +45 -0
- package/payload/.env.example +58 -0
- package/payload/Dockerfile +73 -0
- package/payload/analyzers/__init__.py +0 -0
- package/payload/analyzers/audio_transcribe.py +225 -0
- package/payload/analyzers/caption.py +244 -0
- package/payload/analyzers/music_identifier.py +346 -0
- package/payload/analyzers/text_analyzer.py +117 -0
- package/payload/analyzers/visual_analyze.py +218 -0
- package/payload/analyzers/webpage_analyzer.py +789 -0
- package/payload/analyzers/youtube_analyzer.py +320 -0
- package/payload/api.py +1676 -0
- package/payload/config/.api_keys.example +22 -0
- package/payload/config/model_rankings.json +492 -0
- package/payload/config/openrouter_free_models.json +1364 -0
- package/payload/config/whisper_model.txt +1 -0
- package/payload/config_settings.py +185 -0
- package/payload/core/__init__.py +0 -0
- package/payload/core/category_manager.py +219 -0
- package/payload/core/database.py +811 -0
- package/payload/core/link_checker.py +300 -0
- package/payload/core/model_router.py +1253 -0
- package/payload/docker-compose.yml +120 -0
- package/payload/instagram/__init__.py +0 -0
- package/payload/instagram/instagram_downloader.py +253 -0
- package/payload/instagram/instagram_login.py +190 -0
- package/payload/main.py +912 -0
- package/payload/requirements.txt +39 -0
- package/payload/reset.py +311 -0
- package/payload/start-docker-prod.sh +125 -0
- package/payload/start-docker.sh +56 -0
- package/payload/start.py +1302 -0
- package/payload/static/favicon.ico +0 -0
- package/payload/stop-docker.sh +16 -0
- package/payload/utils/__init__.py +0 -0
- package/payload/utils/db_stats.py +108 -0
- package/payload/utils/manage_token.py +91 -0
package/payload/start.py
ADDED
|
@@ -0,0 +1,1302 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
╔══════════════════════════════════════════════════════════════════╗
|
|
4
|
+
║ SuperBrain — First-Time Setup & Launcher ║
|
|
5
|
+
║ Run this once to configure everything, then again ║
|
|
6
|
+
║ any time to start the server. ║
|
|
7
|
+
╚══════════════════════════════════════════════════════════════════╝
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
python start.py — interactive setup on first run, then start server
|
|
11
|
+
python start.py --reset — re-run the full setup wizard
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import sys
|
|
15
|
+
import os
|
|
16
|
+
import subprocess
|
|
17
|
+
import platform
|
|
18
|
+
import shutil
|
|
19
|
+
import json
|
|
20
|
+
import secrets
|
|
21
|
+
import string
|
|
22
|
+
import textwrap
|
|
23
|
+
import time
|
|
24
|
+
import importlib
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
|
|
27
|
+
# ── Paths ─────────────────────────────────────────────────────────────────────
|
|
28
|
+
BASE_DIR = Path(__file__).parent.resolve()
|
|
29
|
+
VENV_DIR = BASE_DIR / ".venv"
|
|
30
|
+
API_KEYS = BASE_DIR / "config" / ".api_keys"
|
|
31
|
+
TOKEN_FILE = BASE_DIR / "token.txt"
|
|
32
|
+
SETUP_DONE = BASE_DIR / ".setup_done"
|
|
33
|
+
|
|
34
|
+
IS_WINDOWS = platform.system() == "Windows"
|
|
35
|
+
PYTHON = sys.executable # path that launched this script
|
|
36
|
+
VENV_PYTHON = (VENV_DIR / "Scripts" / "python.exe") if IS_WINDOWS else (VENV_DIR / "bin" / "python")
|
|
37
|
+
VENV_PIP = (VENV_DIR / "Scripts" / "pip.exe") if IS_WINDOWS else (VENV_DIR / "bin" / "pip")
|
|
38
|
+
|
|
39
|
+
# ── ANSI colours (stripped on Windows unless ANSICON / Windows Terminal) ──────
|
|
40
|
+
def _ansi(code): return f"\033[{code}m"
|
|
41
|
+
RESET = _ansi(0); BOLD = _ansi(1)
|
|
42
|
+
RED = _ansi(31); GREEN = _ansi(32); YELLOW = _ansi(33)
|
|
43
|
+
BLUE = _ansi(34); CYAN = _ansi(36); WHITE = _ansi(37)
|
|
44
|
+
DIM = _ansi(2)
|
|
45
|
+
MAGENTA = _ansi(35)
|
|
46
|
+
MAG = MAGENTA
|
|
47
|
+
|
|
48
|
+
def link(url: str, text: str | None = None) -> str:
|
|
49
|
+
"""OSC 8 terminal hyperlink — clickable in most modern terminals."""
|
|
50
|
+
label = text or url
|
|
51
|
+
return f"\033]8;;{url}\033\\{label}\033]8;;\033\\"
|
|
52
|
+
|
|
53
|
+
def banner():
|
|
54
|
+
art = f"""{CYAN}{BOLD}
|
|
55
|
+
███████╗██╗ ██╗██████╗ ███████╗██████╗
|
|
56
|
+
██╔════╝██║ ██║██╔══██╗██╔════╝██╔══██╗
|
|
57
|
+
███████╗██║ ██║██████╔╝█████╗ ██████╔╝
|
|
58
|
+
╚════██║██║ ██║██╔═══╝ ██╔══╝ ██╔══██╗
|
|
59
|
+
███████║╚██████╔╝██║ ███████╗██║ ██║
|
|
60
|
+
╚══════╝ ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═╝
|
|
61
|
+
|
|
62
|
+
██████╗ ██████╗ █████╗ ██╗███╗ ██╗
|
|
63
|
+
██╔══██╗██╔══██╗██╔══██╗██║████╗ ██║
|
|
64
|
+
██████╔╝██████╔╝███████║██║██╔██╗ ██║
|
|
65
|
+
██╔══██╗██╔══██╗██╔══██║██║██║╚██╗██║
|
|
66
|
+
██████╔╝██║ ██║██║ ██║██║██║ ╚████║
|
|
67
|
+
╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝
|
|
68
|
+
{RESET}"""
|
|
69
|
+
credit = (f" {DIM}made with {RESET}{MAG}❤{RESET}{DIM} by "
|
|
70
|
+
f"{link('https://github.com/sidinsearch', f'{BOLD}sidinsearch{RESET}{DIM}')}"
|
|
71
|
+
f"{RESET}\n")
|
|
72
|
+
print(art + credit)
|
|
73
|
+
|
|
74
|
+
def h1(msg): print(f"\n{BOLD}{CYAN}{'━'*64}{RESET}\n{BOLD} {msg}{RESET}\n{BOLD}{CYAN}{'━'*64}{RESET}")
|
|
75
|
+
def h2(msg): print(f"\n{BOLD}{BLUE} ▶ {msg}{RESET}")
|
|
76
|
+
def ok(msg): print(f" {GREEN}✓{RESET} {msg}")
|
|
77
|
+
def warn(msg):print(f" {YELLOW}⚠{RESET} {msg}")
|
|
78
|
+
def err(msg): print(f" {RED}✗{RESET} {msg}")
|
|
79
|
+
def info(msg):print(f" {DIM}{msg}{RESET}")
|
|
80
|
+
def nl(): print()
|
|
81
|
+
|
|
82
|
+
def ask(prompt, default=None, secret=False, paste=False):
|
|
83
|
+
"""
|
|
84
|
+
Prompt for input.
|
|
85
|
+
secret=True — uses getpass (hidden, no echo) — good for passwords typed char-by-char.
|
|
86
|
+
paste=True — uses plain input (visible) so Ctrl+V / right-click paste works;
|
|
87
|
+
existing value is shown as ●●●● to indicate something is already set.
|
|
88
|
+
"""
|
|
89
|
+
if paste and default:
|
|
90
|
+
display_default = f" [{DIM}●●●● (already set — paste to replace){RESET}]"
|
|
91
|
+
elif default:
|
|
92
|
+
display_default = f" [{DIM}{default}{RESET}]"
|
|
93
|
+
else:
|
|
94
|
+
display_default = ""
|
|
95
|
+
|
|
96
|
+
full_prompt = f"\n {BOLD}{prompt}{RESET}{display_default}: "
|
|
97
|
+
|
|
98
|
+
if secret:
|
|
99
|
+
import getpass
|
|
100
|
+
val = getpass.getpass(full_prompt)
|
|
101
|
+
else:
|
|
102
|
+
val = input(full_prompt).strip()
|
|
103
|
+
return val if val else default
|
|
104
|
+
|
|
105
|
+
def ask_yn(prompt, default=True):
|
|
106
|
+
suffix = f"[{BOLD}Y{RESET}/n]" if default else f"[y/{BOLD}N{RESET}]"
|
|
107
|
+
val = input(f"\n {BOLD}{prompt}{RESET} {suffix}: ").strip().lower()
|
|
108
|
+
if not val:
|
|
109
|
+
return default
|
|
110
|
+
return val in ("y", "yes")
|
|
111
|
+
|
|
112
|
+
def run(cmd, **kwargs):
|
|
113
|
+
"""Run a command, raise on failure."""
|
|
114
|
+
return subprocess.run(cmd, check=True, **kwargs)
|
|
115
|
+
|
|
116
|
+
def run_q(cmd, **kwargs):
|
|
117
|
+
"""Run a command silently, capture output."""
|
|
118
|
+
return subprocess.run(cmd, check=True, capture_output=True, text=True, **kwargs)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def ensure_runtime_dependencies():
|
|
122
|
+
"""Install must-have runtime packages if missing in the active venv."""
|
|
123
|
+
required = [
|
|
124
|
+
("multipart", "python-multipart"),
|
|
125
|
+
]
|
|
126
|
+
missing: list[str] = []
|
|
127
|
+
|
|
128
|
+
for module_name, package_name in required:
|
|
129
|
+
try:
|
|
130
|
+
importlib.import_module(module_name)
|
|
131
|
+
except Exception:
|
|
132
|
+
missing.append(package_name)
|
|
133
|
+
|
|
134
|
+
if not missing:
|
|
135
|
+
return
|
|
136
|
+
|
|
137
|
+
warn(f"Missing runtime package(s): {', '.join(missing)}")
|
|
138
|
+
info("Installing missing runtime package(s) automatically …")
|
|
139
|
+
try:
|
|
140
|
+
run([str(VENV_PYTHON), "-m", "pip", "install", *missing])
|
|
141
|
+
ok("Runtime dependencies installed")
|
|
142
|
+
except Exception as e:
|
|
143
|
+
err(f"Failed to install runtime dependencies: {e}")
|
|
144
|
+
info(f"Run manually: {VENV_PYTHON} -m pip install {' '.join(missing)}")
|
|
145
|
+
sys.exit(1)
|
|
146
|
+
|
|
147
|
+
# ── Helpers for live output displays ───────────────────────────────────────────────
|
|
148
|
+
BAR_WIDTH = 36
|
|
149
|
+
|
|
150
|
+
def _ascii_bar(completed: int, total: int, width: int = BAR_WIDTH) -> str:
|
|
151
|
+
"""Return a coloured ASCII progress bar string."""
|
|
152
|
+
if total <= 0:
|
|
153
|
+
return ""
|
|
154
|
+
pct = min(completed / total, 1.0)
|
|
155
|
+
fill = int(width * pct)
|
|
156
|
+
bar = f"{GREEN}{'█' * fill}{DIM}{'░' * (width - fill)}{RESET}"
|
|
157
|
+
mb_d = completed / 1_048_576
|
|
158
|
+
mb_t = total / 1_048_576
|
|
159
|
+
return f"[{bar}] {mb_d:6.1f} / {mb_t:.1f} MB {pct*100:5.1f}%"
|
|
160
|
+
|
|
161
|
+
def _overwrite(line: str):
|
|
162
|
+
"""Overwrite the current terminal line in-place."""
|
|
163
|
+
sys.stdout.write(f"\r {line}")
|
|
164
|
+
sys.stdout.flush()
|
|
165
|
+
|
|
166
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
167
|
+
# Step 1 — Virtual Environment
|
|
168
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
169
|
+
def setup_venv():
|
|
170
|
+
h1("Step 1 of 6 — Python Virtual Environment")
|
|
171
|
+
if VENV_DIR.exists():
|
|
172
|
+
ok(f"Virtual environment already exists at {VENV_DIR}")
|
|
173
|
+
return
|
|
174
|
+
h2("Creating virtual environment …")
|
|
175
|
+
run([PYTHON, "-m", "venv", str(VENV_DIR)])
|
|
176
|
+
ok(f"Virtual environment created at {VENV_DIR}")
|
|
177
|
+
|
|
178
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
179
|
+
# Step 2 — Install Dependencies
|
|
180
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
181
|
+
def install_deps():
|
|
182
|
+
h1("Step 2 of 7 — Installing Python Dependencies")
|
|
183
|
+
req = BASE_DIR / "requirements.txt"
|
|
184
|
+
if not req.exists():
|
|
185
|
+
err("requirements.txt not found — cannot install dependencies.")
|
|
186
|
+
sys.exit(1)
|
|
187
|
+
|
|
188
|
+
h2("Upgrading pip …")
|
|
189
|
+
run([str(VENV_PYTHON), "-m", "pip", "install", "--quiet", "--upgrade", "pip"])
|
|
190
|
+
ok("pip up to date")
|
|
191
|
+
|
|
192
|
+
h2("Installing packages from requirements.txt …")
|
|
193
|
+
nl()
|
|
194
|
+
|
|
195
|
+
# ── stream pip output and display each package live ────────────────────────
|
|
196
|
+
cmd = [str(VENV_PIP), "install", "--progress-bar", "off", "-r", str(req)]
|
|
197
|
+
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
198
|
+
text=True, bufsize=1)
|
|
199
|
+
|
|
200
|
+
collecting: list[str] = []
|
|
201
|
+
n_cached = 0
|
|
202
|
+
n_download = 0
|
|
203
|
+
n_install = 0
|
|
204
|
+
current_pkg = ""
|
|
205
|
+
|
|
206
|
+
for raw in proc.stdout: # type: ignore[union-attr]
|
|
207
|
+
line = raw.rstrip()
|
|
208
|
+
if not line:
|
|
209
|
+
continue
|
|
210
|
+
|
|
211
|
+
if line.startswith("Collecting "):
|
|
212
|
+
pkg = line.split()[1]
|
|
213
|
+
current_pkg = pkg
|
|
214
|
+
collecting.append(pkg)
|
|
215
|
+
idx = len(collecting)
|
|
216
|
+
print(f" {CYAN}↓{RESET} [{idx:>3}] {BOLD}{pkg}{RESET}")
|
|
217
|
+
|
|
218
|
+
elif "Downloading" in line and ".whl" in line or ".tar.gz" in line:
|
|
219
|
+
# e.g. " Downloading fastapi-0.111.0-py3..whl (92 kB)"
|
|
220
|
+
parts = line.strip().split()
|
|
221
|
+
if len(parts) >= 2:
|
|
222
|
+
filename = parts[1]
|
|
223
|
+
size_str = " ".join(parts[2:]).strip("()")
|
|
224
|
+
print(f" {DIM}↓ {filename} {size_str}{RESET}")
|
|
225
|
+
n_download += 1
|
|
226
|
+
|
|
227
|
+
elif line.strip().startswith("Requirement already satisfied"):
|
|
228
|
+
n_cached += 1
|
|
229
|
+
|
|
230
|
+
elif line.startswith("Installing collected packages:"):
|
|
231
|
+
pkgs = line.split(":", 1)[1].strip()
|
|
232
|
+
n_install = len(pkgs.split(","))
|
|
233
|
+
nl()
|
|
234
|
+
print(f" {BLUE}{BOLD} ▶ Linking {n_install} package(s) into virtual environment …{RESET}")
|
|
235
|
+
|
|
236
|
+
elif line.startswith("Successfully installed"):
|
|
237
|
+
tail = line.replace("Successfully installed", "").strip()
|
|
238
|
+
count = len(tail.split())
|
|
239
|
+
nl()
|
|
240
|
+
ok(f"{count} package(s) installed successfully")
|
|
241
|
+
if n_cached:
|
|
242
|
+
info(f"{n_cached} package(s) already satisfied (cached)")
|
|
243
|
+
|
|
244
|
+
elif line.upper().startswith("WARNING") or line.upper().startswith("DEPRECATION"):
|
|
245
|
+
pass # suppress pip noise
|
|
246
|
+
|
|
247
|
+
else:
|
|
248
|
+
# Any other line (build output, etc.) show dimmed
|
|
249
|
+
if line.strip():
|
|
250
|
+
print(f" {DIM}{line.strip()}{RESET}")
|
|
251
|
+
|
|
252
|
+
proc.wait()
|
|
253
|
+
if proc.returncode != 0:
|
|
254
|
+
raise subprocess.CalledProcessError(proc.returncode, cmd)
|
|
255
|
+
|
|
256
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
257
|
+
# ── API key validators ────────────────────────────────────────────────────────
|
|
258
|
+
# Return values:
|
|
259
|
+
# (True, detail) — key is definitely valid
|
|
260
|
+
# (False, detail) — key is definitely INVALID (401 / explicit auth error)
|
|
261
|
+
# (None, detail) — could not verify (network, 403 scope, timeout, etc.)
|
|
262
|
+
|
|
263
|
+
def _validate_gemini(key: str):
|
|
264
|
+
"""Hit the Gemini models list endpoint — any valid key returns 200."""
|
|
265
|
+
try:
|
|
266
|
+
import urllib.request as _r, json as _j
|
|
267
|
+
req = _r.Request(
|
|
268
|
+
f"https://generativelanguage.googleapis.com/v1beta/models?key={key}",
|
|
269
|
+
headers={"Accept": "application/json", "User-Agent": "Mozilla/5.0"})
|
|
270
|
+
with _r.urlopen(req, timeout=8) as resp:
|
|
271
|
+
data = _j.loads(resp.read())
|
|
272
|
+
count = len(data.get("models", []))
|
|
273
|
+
return True, f"{count} models accessible"
|
|
274
|
+
except Exception as e:
|
|
275
|
+
msg = str(e)
|
|
276
|
+
if "400" in msg:
|
|
277
|
+
return False, "invalid key (400 Bad Request)"
|
|
278
|
+
if "401" in msg:
|
|
279
|
+
return False, "invalid key (401 Unauthorized)"
|
|
280
|
+
# 403, timeouts, etc. — cannot determine validity
|
|
281
|
+
return None, f"could not verify ({msg[:70]})"
|
|
282
|
+
|
|
283
|
+
def _validate_groq(key: str):
|
|
284
|
+
"""
|
|
285
|
+
Test with a minimal chat completion (max_tokens=1).
|
|
286
|
+
Must include User-Agent to pass Cloudflare (error 1010 without it).
|
|
287
|
+
"""
|
|
288
|
+
try:
|
|
289
|
+
import urllib.request as _r, urllib.error as _e, json as _j
|
|
290
|
+
body = _j.dumps({
|
|
291
|
+
"model": "llama-3.3-70b-versatile",
|
|
292
|
+
"messages": [{"role": "user", "content": "hi"}],
|
|
293
|
+
"max_tokens": 1,
|
|
294
|
+
}).encode()
|
|
295
|
+
req = _r.Request(
|
|
296
|
+
"https://api.groq.com/openai/v1/chat/completions",
|
|
297
|
+
data=body,
|
|
298
|
+
headers={"Authorization": f"Bearer {key}",
|
|
299
|
+
"Content-Type": "application/json",
|
|
300
|
+
"Accept": "application/json",
|
|
301
|
+
"User-Agent": "Mozilla/5.0"},
|
|
302
|
+
method="POST",
|
|
303
|
+
)
|
|
304
|
+
with _r.urlopen(req, timeout=10) as resp:
|
|
305
|
+
return True, "key valid"
|
|
306
|
+
except _e.HTTPError as e:
|
|
307
|
+
if e.code in (401, 400):
|
|
308
|
+
return False, f"invalid key ({e.code} {e.reason})"
|
|
309
|
+
# 403, 429, 503, etc. — key may be fine
|
|
310
|
+
return None, f"could not verify ({e.code} {e.reason})"
|
|
311
|
+
except Exception as e:
|
|
312
|
+
return None, f"could not verify ({str(e)[:70]})"
|
|
313
|
+
|
|
314
|
+
def _validate_openrouter(key: str):
|
|
315
|
+
try:
|
|
316
|
+
import urllib.request as _r, urllib.error as _e, json as _j
|
|
317
|
+
req = _r.Request("https://openrouter.ai/api/v1/auth/key",
|
|
318
|
+
headers={"Authorization": f"Bearer {key}",
|
|
319
|
+
"Accept": "application/json",
|
|
320
|
+
"User-Agent": "Mozilla/5.0"})
|
|
321
|
+
with _r.urlopen(req, timeout=8) as resp:
|
|
322
|
+
data = _j.loads(resp.read())
|
|
323
|
+
label = data.get("data", {}).get("label", "")
|
|
324
|
+
return True, label or "key valid"
|
|
325
|
+
except _e.HTTPError as e:
|
|
326
|
+
if e.code in (401, 400):
|
|
327
|
+
return False, f"invalid key ({e.code})"
|
|
328
|
+
return None, f"could not verify ({e.code} {e.reason})"
|
|
329
|
+
except Exception as e:
|
|
330
|
+
return None, f"could not verify ({str(e)[:70]})"
|
|
331
|
+
|
|
332
|
+
def _check_and_report(name: str, key: str, validator) -> str:
|
|
333
|
+
"""Validate `key`, print result inline, return the key unchanged."""
|
|
334
|
+
if not key:
|
|
335
|
+
return key
|
|
336
|
+
print(f" {DIM}Checking {name} key …{RESET}", end="", flush=True)
|
|
337
|
+
result, detail = validator(key)
|
|
338
|
+
if result is True:
|
|
339
|
+
print(f"\r {GREEN}✓{RESET} {name}: {detail} ")
|
|
340
|
+
elif result is False:
|
|
341
|
+
print(f"\r {RED}✗{RESET} {name}: {detail} ")
|
|
342
|
+
warn(f"That key looks invalid — double-check at the provider dashboard.")
|
|
343
|
+
else:
|
|
344
|
+
# None — ambiguous, don't cry wolf
|
|
345
|
+
print(f"\r {YELLOW}~{RESET} {name}: {detail} ")
|
|
346
|
+
info("Could not reach the API right now — key saved, will be tested on first use.")
|
|
347
|
+
return key
|
|
348
|
+
|
|
349
|
+
# Step 3 — API Keys
|
|
350
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
351
|
+
def setup_api_keys():
|
|
352
|
+
h1("Step 3 of 7 — AI Provider Keys")
|
|
353
|
+
|
|
354
|
+
print(f"""
|
|
355
|
+
SuperBrain uses AI providers to analyse your saved content.
|
|
356
|
+
You need {BOLD}at least one{RESET} key — the router tries them in order and
|
|
357
|
+
falls back automatically.
|
|
358
|
+
|
|
359
|
+
Recommended: {GREEN}Gemini{RESET} (most generous free tier — 1 500 req/day)
|
|
360
|
+
|
|
361
|
+
Get free keys:
|
|
362
|
+
Gemini → {CYAN}https://aistudio.google.com/apikey{RESET}
|
|
363
|
+
Groq → {CYAN}https://console.groq.com/keys{RESET}
|
|
364
|
+
OpenRouter → {CYAN}https://openrouter.ai/keys{RESET}
|
|
365
|
+
|
|
366
|
+
Press {BOLD}Enter{RESET} to skip any key you don't have yet.
|
|
367
|
+
{DIM}Keys and passwords are visible as you paste — don't run setup in a screen share.{RESET}
|
|
368
|
+
""")
|
|
369
|
+
|
|
370
|
+
# Load existing values if re-running
|
|
371
|
+
existing = {}
|
|
372
|
+
if API_KEYS.exists():
|
|
373
|
+
for line in API_KEYS.read_text(encoding="utf-8", errors="ignore").splitlines():
|
|
374
|
+
line = line.strip()
|
|
375
|
+
if "=" in line and not line.startswith("#"):
|
|
376
|
+
k, _, v = line.partition("=")
|
|
377
|
+
existing[k.strip()] = v.strip()
|
|
378
|
+
|
|
379
|
+
gemini = ask("Gemini API key", default=existing.get("GEMINI_API_KEY"), paste=True) or ""
|
|
380
|
+
gemini = _check_and_report("Gemini", gemini, _validate_gemini)
|
|
381
|
+
groq_k = ask("Groq API key", default=existing.get("GROQ_API_KEY"), paste=True) or ""
|
|
382
|
+
groq_k = _check_and_report("Groq", groq_k, _validate_groq)
|
|
383
|
+
openr = ask("OpenRouter API key", default=existing.get("OPENROUTER_API_KEY"), paste=True) or ""
|
|
384
|
+
openr = _check_and_report("OpenRouter", openr, _validate_openrouter)
|
|
385
|
+
|
|
386
|
+
if not any([gemini, groq_k, openr]):
|
|
387
|
+
warn("No AI keys entered. SuperBrain will still work but can only use")
|
|
388
|
+
warn("local Ollama models (configured in the next step).")
|
|
389
|
+
|
|
390
|
+
# Instagram credentials
|
|
391
|
+
nl()
|
|
392
|
+
print(f" {BOLD}Instagram Credentials{RESET}")
|
|
393
|
+
print(f"""
|
|
394
|
+
Used for downloading private/public Instagram posts.
|
|
395
|
+
{YELLOW}Use a secondary / burner account — NOT your main account.{RESET}
|
|
396
|
+
The session is cached after first login so you won't be asked again.
|
|
397
|
+
|
|
398
|
+
{DIM}Without credentials:{RESET}
|
|
399
|
+
SuperBrain can still save and analyse {BOLD}YouTube videos{RESET} and {BOLD}Websites{RESET}
|
|
400
|
+
without any Instagram account. However, Instagram posts will be limited:
|
|
401
|
+
• Only {BOLD}public posts{RESET} that are accessible without login may work.
|
|
402
|
+
• You {BOLD}cannot process multiple Instagram posts back-to-back{RESET} —
|
|
403
|
+
Instagram enforces a rate-limit cool-down between unauthenticated
|
|
404
|
+
requests. You may need to wait several minutes between saves.
|
|
405
|
+
Adding credentials removes these restrictions entirely.
|
|
406
|
+
|
|
407
|
+
Press {BOLD}Enter{RESET} to skip.
|
|
408
|
+
""")
|
|
409
|
+
ig_user = ask("Instagram username", default=existing.get("INSTAGRAM_USERNAME")) or ""
|
|
410
|
+
ig_pass = ask("Instagram password", default=existing.get("INSTAGRAM_PASSWORD"), paste=True) or ""
|
|
411
|
+
|
|
412
|
+
# Write .api_keys
|
|
413
|
+
API_KEYS.parent.mkdir(parents=True, exist_ok=True)
|
|
414
|
+
lines = [
|
|
415
|
+
"# SuperBrain API Keys — DO NOT COMMIT THIS FILE\n",
|
|
416
|
+
f"GEMINI_API_KEY={gemini}\n",
|
|
417
|
+
f"GROQ_API_KEY={groq_k}\n",
|
|
418
|
+
f"OPENROUTER_API_KEY={openr}\n",
|
|
419
|
+
"\n",
|
|
420
|
+
f"INSTAGRAM_USERNAME={ig_user}\n",
|
|
421
|
+
f"INSTAGRAM_PASSWORD={ig_pass}\n",
|
|
422
|
+
]
|
|
423
|
+
API_KEYS.write_text("".join(lines))
|
|
424
|
+
ok(f"Keys saved to {API_KEYS}")
|
|
425
|
+
|
|
426
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
427
|
+
# Step 4 — Ollama / Offline Model
|
|
428
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
429
|
+
OLLAMA_MODEL = "qwen3-vl:4b" # vision-language model, fits ~6 GB VRAM / ~8 GB RAM
|
|
430
|
+
|
|
431
|
+
def setup_ollama():
|
|
432
|
+
h1("Step 4 of 7 — Offline AI Model (Ollama)")
|
|
433
|
+
|
|
434
|
+
print(f"""
|
|
435
|
+
Ollama runs AI models {BOLD}locally on your machine{RESET} — no internet or API
|
|
436
|
+
key required. SuperBrain uses it as a last-resort fallback if all
|
|
437
|
+
cloud providers fail or run out of quota.
|
|
438
|
+
|
|
439
|
+
Recommended model: {BOLD}{OLLAMA_MODEL}{RESET} (~3 GB download, needs ~8 GB RAM)
|
|
440
|
+
→ Vision-language model: understands both text AND images.
|
|
441
|
+
Other options: llama3.2:3b (2 GB / 4 GB RAM), gemma2:2b (1.5 GB / 4 GB RAM)
|
|
442
|
+
""")
|
|
443
|
+
|
|
444
|
+
if not ask_yn("Set up Ollama offline model?", default=True):
|
|
445
|
+
warn("Skipping Ollama. Cloud providers only — make sure you have API keys.")
|
|
446
|
+
return
|
|
447
|
+
|
|
448
|
+
# Check if ollama binary is available
|
|
449
|
+
if not shutil.which("ollama"):
|
|
450
|
+
print(f"""
|
|
451
|
+
{YELLOW}Ollama is not installed.{RESET}
|
|
452
|
+
|
|
453
|
+
Install it first:
|
|
454
|
+
Linux / macOS → {CYAN}curl -fsSL https://ollama.com/install.sh | sh{RESET}
|
|
455
|
+
Windows → Download from {CYAN}https://ollama.com/download{RESET}
|
|
456
|
+
|
|
457
|
+
After installing, re-run {BOLD}python start.py{RESET} to continue.
|
|
458
|
+
""")
|
|
459
|
+
if not ask_yn("Continue setup anyway (skip model pull for now)?", default=False):
|
|
460
|
+
sys.exit(0)
|
|
461
|
+
warn("Skipping model pull. Run ollama pull {OLLAMA_MODEL} manually later.")
|
|
462
|
+
return
|
|
463
|
+
|
|
464
|
+
ok("Ollama binary found")
|
|
465
|
+
|
|
466
|
+
# Check if model already pulled
|
|
467
|
+
try:
|
|
468
|
+
result = run_q(["ollama", "list"])
|
|
469
|
+
if OLLAMA_MODEL.split(":")[0] in result.stdout:
|
|
470
|
+
ok(f"Model {OLLAMA_MODEL} already available")
|
|
471
|
+
return
|
|
472
|
+
except Exception:
|
|
473
|
+
pass
|
|
474
|
+
|
|
475
|
+
custom = ask(f"Model to pull", default=OLLAMA_MODEL)
|
|
476
|
+
model = custom or OLLAMA_MODEL
|
|
477
|
+
|
|
478
|
+
h2(f"Pulling {model} — this downloads ~3 GB, grab a coffee ☕")
|
|
479
|
+
nl()
|
|
480
|
+
try:
|
|
481
|
+
_ollama_pull_with_progress(model)
|
|
482
|
+
except subprocess.CalledProcessError:
|
|
483
|
+
err(f"Failed to pull {model}.")
|
|
484
|
+
warn(f"Run manually later: ollama pull {model}")
|
|
485
|
+
|
|
486
|
+
def _ollama_pull_with_progress(model: str):
|
|
487
|
+
"""Run `ollama pull` and render a live per-layer progress bar."""
|
|
488
|
+
import json as _json
|
|
489
|
+
|
|
490
|
+
cmd = ["ollama", "pull", model]
|
|
491
|
+
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
492
|
+
text=True, bufsize=1)
|
|
493
|
+
|
|
494
|
+
# digest → (total_bytes, completed_bytes, short_label)
|
|
495
|
+
layers: dict[str, tuple[int, int, str]] = {}
|
|
496
|
+
last_status = ""
|
|
497
|
+
active_digest = ""
|
|
498
|
+
render_line = False # True while a progress bar is being overwritten
|
|
499
|
+
|
|
500
|
+
for raw in proc.stdout: # type: ignore[union-attr]
|
|
501
|
+
raw = raw.strip()
|
|
502
|
+
if not raw:
|
|
503
|
+
continue
|
|
504
|
+
|
|
505
|
+
# Ollama outputs plain-text lines (not JSON) when not a TTY — accept both
|
|
506
|
+
try:
|
|
507
|
+
data = _json.loads(raw)
|
|
508
|
+
except _json.JSONDecodeError:
|
|
509
|
+
# plain text line from older Ollama or piped output
|
|
510
|
+
if render_line:
|
|
511
|
+
sys.stdout.write("\n"); render_line = False
|
|
512
|
+
if raw != last_status:
|
|
513
|
+
last_status = raw
|
|
514
|
+
print(f" {CYAN}→{RESET} {raw}")
|
|
515
|
+
continue
|
|
516
|
+
|
|
517
|
+
status = data.get("status", "")
|
|
518
|
+
digest = data.get("digest", "")
|
|
519
|
+
total = int(data.get("total", 0))
|
|
520
|
+
completed= int(data.get("completed",0))
|
|
521
|
+
|
|
522
|
+
if digest and total > 0:
|
|
523
|
+
short = (digest.split(":")[-1])[:12] # e.g. "a1b2c3d4e5f6"
|
|
524
|
+
layers[digest] = (total, completed, short)
|
|
525
|
+
active_digest = digest
|
|
526
|
+
bar = _ascii_bar(completed, total)
|
|
527
|
+
_overwrite(f"{DIM}{short}{RESET} {bar}")
|
|
528
|
+
render_line = True
|
|
529
|
+
|
|
530
|
+
elif status and status != last_status:
|
|
531
|
+
if render_line:
|
|
532
|
+
sys.stdout.write("\n"); render_line = False
|
|
533
|
+
last_status = status
|
|
534
|
+
# Show a checkmark when a layer finishes
|
|
535
|
+
done_statuses = ("verifying sha256 digest", "writing manifest",
|
|
536
|
+
"removing any unused layers", "success")
|
|
537
|
+
if any(s in status.lower() for s in done_statuses):
|
|
538
|
+
print(f" {GREEN}✓{RESET} {status}")
|
|
539
|
+
else:
|
|
540
|
+
print(f" {CYAN}→{RESET} {status}")
|
|
541
|
+
|
|
542
|
+
if render_line:
|
|
543
|
+
sys.stdout.write("\n"); render_line = False
|
|
544
|
+
|
|
545
|
+
proc.wait()
|
|
546
|
+
if proc.returncode != 0:
|
|
547
|
+
raise subprocess.CalledProcessError(proc.returncode, cmd)
|
|
548
|
+
|
|
549
|
+
ok(f"Model {model} ready")
|
|
550
|
+
|
|
551
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
552
|
+
# Step 5 — Whisper / Offline Transcription
|
|
553
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
554
|
+
WHISPER_MODELS = {
|
|
555
|
+
"tiny": (" ~74 MB", "fastest, lower accuracy"),
|
|
556
|
+
"base": ("~142 MB", "good balance ⭐ recommended"),
|
|
557
|
+
"small": ("~461 MB", "higher accuracy"),
|
|
558
|
+
"medium": ("~1.5 GB", "high accuracy, slower"),
|
|
559
|
+
"large": ("~2.9 GB", "best accuracy, needs 10 GB RAM"),
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
def setup_whisper():
|
|
563
|
+
h1("Step 5 of 7 — Offline Audio Transcription (Whisper)")
|
|
564
|
+
|
|
565
|
+
print(f"""
|
|
566
|
+
OpenAI Whisper transcribes audio and video {BOLD}entirely on your machine{RESET}.
|
|
567
|
+
SuperBrain uses it to extract speech from Instagram Reels, YouTube
|
|
568
|
+
videos, and any other saved media — no API key needed.
|
|
569
|
+
|
|
570
|
+
Whisper requires {BOLD}ffmpeg{RESET} to be installed on your system.
|
|
571
|
+
It also pre-downloads a speech model the first time it runs.
|
|
572
|
+
""")
|
|
573
|
+
|
|
574
|
+
# ── ffmpeg check ──────────────────────────────────────────────────────────
|
|
575
|
+
if shutil.which("ffmpeg"):
|
|
576
|
+
ok("ffmpeg is installed")
|
|
577
|
+
else:
|
|
578
|
+
warn("ffmpeg is NOT installed — Whisper cannot run without it.")
|
|
579
|
+
print(f"""
|
|
580
|
+
Install ffmpeg:
|
|
581
|
+
Linux / WSL → {CYAN}sudo apt install ffmpeg{RESET}
|
|
582
|
+
macOS → {CYAN}brew install ffmpeg{RESET}
|
|
583
|
+
Windows → {CYAN}winget install ffmpeg{RESET}
|
|
584
|
+
or download from {CYAN}https://ffmpeg.org/download.html{RESET}
|
|
585
|
+
|
|
586
|
+
After installing ffmpeg, re-run {BOLD}python start.py --reset{RESET} or just
|
|
587
|
+
restart — Whisper will work automatically once ffmpeg is present.
|
|
588
|
+
""")
|
|
589
|
+
if not ask_yn("Continue setup anyway?", default=True):
|
|
590
|
+
sys.exit(0)
|
|
591
|
+
|
|
592
|
+
# ── Whisper package check / install ──────────────────────────────────────
|
|
593
|
+
try:
|
|
594
|
+
result = run_q([str(VENV_PYTHON), "-c", "import whisper; print(whisper.__version__)"])
|
|
595
|
+
ok(f"openai-whisper installed (version {result.stdout.strip()})")
|
|
596
|
+
except Exception:
|
|
597
|
+
warn("openai-whisper not found — installing now …")
|
|
598
|
+
nl()
|
|
599
|
+
try:
|
|
600
|
+
cmd = [str(VENV_PIP), "install", "--progress-bar", "off", "openai-whisper"]
|
|
601
|
+
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
602
|
+
text=True, bufsize=1)
|
|
603
|
+
for raw in proc.stdout: # type: ignore[union-attr]
|
|
604
|
+
line = raw.rstrip()
|
|
605
|
+
if not line:
|
|
606
|
+
continue
|
|
607
|
+
if line.startswith("Collecting "):
|
|
608
|
+
print(f" {CYAN}↓{RESET} {BOLD}{line.split()[1]}{RESET}")
|
|
609
|
+
elif "Downloading" in line and (".whl" in line or ".tar.gz" in line):
|
|
610
|
+
parts = line.strip().split()
|
|
611
|
+
if len(parts) >= 2:
|
|
612
|
+
print(f" {DIM}↓ {parts[1]} {' '.join(parts[2:]).strip('()')}{RESET}")
|
|
613
|
+
elif line.startswith("Successfully installed"):
|
|
614
|
+
print(f" {GREEN}✓ {line}{RESET}")
|
|
615
|
+
elif "error" in line.lower() or "ERROR" in line:
|
|
616
|
+
print(f" {RED}{line}{RESET}")
|
|
617
|
+
proc.wait()
|
|
618
|
+
if proc.returncode == 0:
|
|
619
|
+
result = run_q([str(VENV_PYTHON), "-c", "import whisper; print(whisper.__version__)"])
|
|
620
|
+
ok(f"openai-whisper installed (version {result.stdout.strip()})")
|
|
621
|
+
else:
|
|
622
|
+
err("openai-whisper install failed — offline transcription will not work.")
|
|
623
|
+
if not ask_yn("Continue setup anyway?", default=True):
|
|
624
|
+
sys.exit(0)
|
|
625
|
+
return
|
|
626
|
+
except Exception as e:
|
|
627
|
+
err(f"openai-whisper install failed: {e}")
|
|
628
|
+
if not ask_yn("Continue setup anyway?", default=True):
|
|
629
|
+
sys.exit(0)
|
|
630
|
+
return
|
|
631
|
+
|
|
632
|
+
# ── Model pre-download ────────────────────────────────────────────────────
|
|
633
|
+
nl()
|
|
634
|
+
print(f" {BOLD}Whisper model pre-download{RESET}")
|
|
635
|
+
print(f" Pre-downloading a model now avoids a delay on first use.\n")
|
|
636
|
+
|
|
637
|
+
rows = ""
|
|
638
|
+
for name, (size, note) in WHISPER_MODELS.items():
|
|
639
|
+
star = f" {YELLOW}← default if skipped{RESET}" if name == "base" else ""
|
|
640
|
+
rows += f" {BOLD}{name:<8}{RESET} {size} {DIM}{note}{RESET}{star}\n"
|
|
641
|
+
print(rows)
|
|
642
|
+
|
|
643
|
+
choice = ask("Model to pre-download", default="base")
|
|
644
|
+
model = choice.strip().lower() if choice else "base"
|
|
645
|
+
if model not in WHISPER_MODELS:
|
|
646
|
+
warn(f"Unknown model '{model}' — defaulting to 'base'.")
|
|
647
|
+
model = "base"
|
|
648
|
+
|
|
649
|
+
# ── Save model choice to config ─────────────────────────────────────────
|
|
650
|
+
whisper_cfg = BASE_DIR / "config" / "whisper_model.txt"
|
|
651
|
+
(BASE_DIR / "config").mkdir(exist_ok=True)
|
|
652
|
+
whisper_cfg.write_text(model)
|
|
653
|
+
ok(f"Whisper model set to '{model}' (saved to config/whisper_model.txt)")
|
|
654
|
+
|
|
655
|
+
h2(f"Pre-downloading Whisper '{model}' model …")
|
|
656
|
+
print(f" {DIM}(Whisper's own progress bar will appear below){RESET}\n")
|
|
657
|
+
try:
|
|
658
|
+
# Don't capture: let tqdm's download progress bars stream to the terminal
|
|
659
|
+
run([str(VENV_PYTHON), "-c",
|
|
660
|
+
f"import whisper; print('Loading model …'); whisper.load_model('{model}'); print('Done.')"])
|
|
661
|
+
nl()
|
|
662
|
+
ok(f"Whisper '{model}' model downloaded and cached")
|
|
663
|
+
except subprocess.CalledProcessError:
|
|
664
|
+
err(f"Pre-download failed — Whisper will download '{model}' automatically on first use.")
|
|
665
|
+
|
|
666
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
667
|
+
# Step 6 — Remote Access / Port Forwarding
|
|
668
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
669
|
+
LOCALTUNNEL_ENABLED = BASE_DIR / "config" / "localtunnel_enabled.txt"
|
|
670
|
+
LOCALTUNNEL_LOG = BASE_DIR / "config" / "localtunnel.log"
|
|
671
|
+
|
|
672
|
+
def setup_remote_access():
|
|
673
|
+
h1("Step 6 of 7 — Remote Access (localtunnel / Port Forwarding)")
|
|
674
|
+
|
|
675
|
+
print(f"""
|
|
676
|
+
The SuperBrain backend runs on {BOLD}port 5000{RESET} on your machine.
|
|
677
|
+
Your phone needs to reach this port over the internet.
|
|
678
|
+
|
|
679
|
+
You have two options:
|
|
680
|
+
|
|
681
|
+
{BOLD}Option A — localtunnel (easiest + free){RESET}
|
|
682
|
+
localtunnel creates a public HTTPS URL that tunnels to your local port 5000.
|
|
683
|
+
No account required.
|
|
684
|
+
Official site: {CYAN}https://theboroer.github.io/localtunnel-www/{RESET}
|
|
685
|
+
|
|
686
|
+
{BOLD}Option B — Your own port forwarding (advanced){RESET}
|
|
687
|
+
Forward {BOLD}TCP port 5000{RESET} on your router to your machine's local IP.
|
|
688
|
+
Then use {BOLD}http://<your-public-ip>:5000{RESET} in the mobile app.
|
|
689
|
+
Steps:
|
|
690
|
+
1. Find your machine's local IP → ip addr (Linux) / ipconfig (Windows)
|
|
691
|
+
2. Log into your router admin panel (usually http://192.168.1.1)
|
|
692
|
+
3. Add a port forwarding rule: External 5000 → Internal <your-local-IP>:5000
|
|
693
|
+
4. Use your public IP (check https://ipify.org) in the mobile app.
|
|
694
|
+
{YELLOW}Note: dynamic public IPs change on router restart — consider a DDNS service.{RESET}
|
|
695
|
+
|
|
696
|
+
{DIM}You can also run only on your local WiFi — both phone and PC must be on
|
|
697
|
+
the same network. Use your PC's local IP (e.g. 192.168.x.x) in the app.{RESET}
|
|
698
|
+
""")
|
|
699
|
+
|
|
700
|
+
choice = ask_yn("Enable localtunnel on startup?", default=True)
|
|
701
|
+
if not choice:
|
|
702
|
+
LOCALTUNNEL_ENABLED.unlink(missing_ok=True)
|
|
703
|
+
warn("Skipping localtunnel. Use either your own port forwarding or local WiFi.")
|
|
704
|
+
info("Remember: set the correct server URL in the mobile app Settings.")
|
|
705
|
+
return
|
|
706
|
+
|
|
707
|
+
if not shutil.which("npx"):
|
|
708
|
+
print(f"""
|
|
709
|
+
{YELLOW}npx is not installed / not on PATH.{RESET}
|
|
710
|
+
|
|
711
|
+
Install it:
|
|
712
|
+
Linux → {CYAN}Install Node.js (includes npm + npx){RESET}
|
|
713
|
+
macOS → {CYAN}brew install node{RESET}
|
|
714
|
+
Windows → Install Node.js LTS from {CYAN}https://nodejs.org/{RESET}
|
|
715
|
+
|
|
716
|
+
After installing, re-run {BOLD}python start.py{RESET}.
|
|
717
|
+
""")
|
|
718
|
+
warn("Skipping localtunnel setup.")
|
|
719
|
+
return
|
|
720
|
+
|
|
721
|
+
ok("npx binary found")
|
|
722
|
+
LOCALTUNNEL_ENABLED.parent.mkdir(parents=True, exist_ok=True)
|
|
723
|
+
LOCALTUNNEL_ENABLED.write_text("enabled")
|
|
724
|
+
ok("localtunnel auto-start enabled")
|
|
725
|
+
nl()
|
|
726
|
+
info("localtunnel will be started automatically every time you run start.py.")
|
|
727
|
+
|
|
728
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
729
|
+
# Step 6 — Access Token & Database
|
|
730
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
731
|
+
def setup_token_and_db():
|
|
732
|
+
h1("Step 7 of 7 — Access Token & Database")
|
|
733
|
+
|
|
734
|
+
# Token
|
|
735
|
+
if TOKEN_FILE.exists():
|
|
736
|
+
token = TOKEN_FILE.read_text().strip()
|
|
737
|
+
if token and len(token) == 8 and token.isalnum():
|
|
738
|
+
ok(f"Access Token already exists: {BOLD}{token}{RESET}")
|
|
739
|
+
if not ask_yn("Generate a new Access Token?", default=False):
|
|
740
|
+
return
|
|
741
|
+
elif token:
|
|
742
|
+
warn("Existing token uses old format. A new 8-character Access Token will be generated.")
|
|
743
|
+
else:
|
|
744
|
+
token = None
|
|
745
|
+
|
|
746
|
+
alphabet = string.ascii_uppercase + string.digits
|
|
747
|
+
new_token = ''.join(secrets.choice(alphabet) for _ in range(8))
|
|
748
|
+
TOKEN_FILE.write_text(new_token)
|
|
749
|
+
ok(f"Access Token saved: {BOLD}{GREEN}{new_token}{RESET}")
|
|
750
|
+
nl()
|
|
751
|
+
print(f" {YELLOW}Copy this token into the mobile app → Settings → Access Token.{RESET}")
|
|
752
|
+
|
|
753
|
+
# DB is auto-created on first backend start; just let the user know
|
|
754
|
+
nl()
|
|
755
|
+
info("The SQLite database (superbrain.db) will be created automatically")
|
|
756
|
+
info("the first time the backend starts.")
|
|
757
|
+
|
|
758
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
759
|
+
# Launch Backend
|
|
760
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
761
|
+
def _extract_localtunnel_url(text: str) -> str | None:
|
|
762
|
+
"""Extract first localtunnel public URL from text."""
|
|
763
|
+
import re
|
|
764
|
+
m = re.search(r"https://[\w.-]+\.loca\.lt\b", text)
|
|
765
|
+
return m.group(0) if m else None
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def _find_localtunnel_url_from_log() -> str | None:
|
|
769
|
+
"""Read local tunnel log and return detected public URL if available."""
|
|
770
|
+
try:
|
|
771
|
+
if not LOCALTUNNEL_LOG.exists():
|
|
772
|
+
return None
|
|
773
|
+
text = LOCALTUNNEL_LOG.read_text(encoding="utf-8", errors="ignore")
|
|
774
|
+
return _extract_localtunnel_url(text)
|
|
775
|
+
except Exception:
|
|
776
|
+
return None
|
|
777
|
+
|
|
778
|
+
|
|
779
|
+
def _stop_localtunnel_processes():
|
|
780
|
+
"""Stop existing localtunnel processes so only one tunnel remains active."""
|
|
781
|
+
try:
|
|
782
|
+
if IS_WINDOWS:
|
|
783
|
+
script = (
|
|
784
|
+
"Get-CimInstance Win32_Process "
|
|
785
|
+
"| Where-Object { $_.CommandLine -match 'localtunnel|\\.loca\\.lt' } "
|
|
786
|
+
"| ForEach-Object { Stop-Process -Id $_.ProcessId -Force -ErrorAction SilentlyContinue }"
|
|
787
|
+
)
|
|
788
|
+
subprocess.run(["powershell", "-NoProfile", "-Command", script], check=False)
|
|
789
|
+
else:
|
|
790
|
+
subprocess.run(["pkill", "-f", "localtunnel"], check=False)
|
|
791
|
+
except Exception:
|
|
792
|
+
pass
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
def _start_localtunnel(port: int, timeout: int = 25) -> str | None:
|
|
796
|
+
"""Start localtunnel in the background and wait for the public URL."""
|
|
797
|
+
import time as _time
|
|
798
|
+
|
|
799
|
+
npx_exec = shutil.which("npx") or shutil.which("npx.cmd")
|
|
800
|
+
if not npx_exec:
|
|
801
|
+
return None
|
|
802
|
+
|
|
803
|
+
# Clean stale localtunnel processes.
|
|
804
|
+
_stop_localtunnel_processes()
|
|
805
|
+
_time.sleep(0.8)
|
|
806
|
+
|
|
807
|
+
info("Starting localtunnel in background …")
|
|
808
|
+
try:
|
|
809
|
+
LOCALTUNNEL_LOG.parent.mkdir(parents=True, exist_ok=True)
|
|
810
|
+
LOCALTUNNEL_LOG.write_text("")
|
|
811
|
+
|
|
812
|
+
log_handle = open(LOCALTUNNEL_LOG, "a", encoding="utf-8", buffering=1)
|
|
813
|
+
kwargs = {
|
|
814
|
+
"start_new_session": True,
|
|
815
|
+
"stdout": log_handle,
|
|
816
|
+
"stderr": subprocess.STDOUT,
|
|
817
|
+
"text": True,
|
|
818
|
+
}
|
|
819
|
+
if IS_WINDOWS and npx_exec.lower().endswith(".cmd"):
|
|
820
|
+
cmd = ["cmd", "/c", npx_exec, "-y", "localtunnel", "--port", str(port)]
|
|
821
|
+
else:
|
|
822
|
+
cmd = [npx_exec, "-y", "localtunnel", "--port", str(port)]
|
|
823
|
+
subprocess.Popen(cmd, **kwargs)
|
|
824
|
+
except Exception as e:
|
|
825
|
+
warn(f"Could not start localtunnel: {e}")
|
|
826
|
+
return None
|
|
827
|
+
|
|
828
|
+
# Poll log output until URL is emitted.
|
|
829
|
+
deadline = _time.time() + timeout
|
|
830
|
+
while _time.time() < deadline:
|
|
831
|
+
_time.sleep(1)
|
|
832
|
+
url = _find_localtunnel_url_from_log()
|
|
833
|
+
if url:
|
|
834
|
+
ok(f"localtunnel active → {GREEN}{BOLD}{url}{RESET}")
|
|
835
|
+
return url
|
|
836
|
+
|
|
837
|
+
warn("localtunnel started but URL is not available yet.")
|
|
838
|
+
info(f"Check tunnel logs in: {LOCALTUNNEL_LOG}")
|
|
839
|
+
return None
|
|
840
|
+
|
|
841
|
+
|
|
842
|
+
def _get_windows_pids_on_port(port: int) -> list[int]:
|
|
843
|
+
"""Return listener PIDs on Windows using Get-NetTCPConnection when available."""
|
|
844
|
+
pids: set[int] = set()
|
|
845
|
+
try:
|
|
846
|
+
ps_cmd = (
|
|
847
|
+
f"Get-NetTCPConnection -LocalPort {port} -State Listen -ErrorAction SilentlyContinue "
|
|
848
|
+
"| Select-Object -ExpandProperty OwningProcess -Unique"
|
|
849
|
+
)
|
|
850
|
+
result = subprocess.run(
|
|
851
|
+
["powershell", "-NoProfile", "-Command", ps_cmd],
|
|
852
|
+
check=False,
|
|
853
|
+
capture_output=True,
|
|
854
|
+
text=True,
|
|
855
|
+
)
|
|
856
|
+
for row in (result.stdout or "").splitlines():
|
|
857
|
+
row = row.strip()
|
|
858
|
+
if row.isdigit():
|
|
859
|
+
pids.add(int(row))
|
|
860
|
+
except Exception:
|
|
861
|
+
pass
|
|
862
|
+
return sorted(pids)
|
|
863
|
+
|
|
864
|
+
def _check_port(port: int) -> int | None:
|
|
865
|
+
"""Return the PID occupying `port`, or None if free."""
|
|
866
|
+
import socket as _socket
|
|
867
|
+
with _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) as s:
|
|
868
|
+
s.settimeout(0.5)
|
|
869
|
+
if s.connect_ex(("127.0.0.1", port)) != 0:
|
|
870
|
+
return None # port is free
|
|
871
|
+
|
|
872
|
+
# Port is busy — try to find the PID
|
|
873
|
+
try:
|
|
874
|
+
if IS_WINDOWS:
|
|
875
|
+
out = run_q(["netstat", "-ano"]).stdout
|
|
876
|
+
for line in out.splitlines():
|
|
877
|
+
parts = line.strip().split()
|
|
878
|
+
if len(parts) >= 5 and parts[0].upper() == "TCP" and parts[3].upper() == "LISTENING":
|
|
879
|
+
local_addr = parts[1]
|
|
880
|
+
if local_addr.endswith(f":{port}"):
|
|
881
|
+
return int(parts[-1])
|
|
882
|
+
else:
|
|
883
|
+
out = run_q(["lsof", "-ti", f"TCP:{port}", "-sTCP:LISTEN"]).stdout.strip()
|
|
884
|
+
if out:
|
|
885
|
+
return int(out.splitlines()[0])
|
|
886
|
+
except Exception:
|
|
887
|
+
pass
|
|
888
|
+
return -1 # busy but PID unknown
|
|
889
|
+
|
|
890
|
+
|
|
891
|
+
def _find_pids_on_port(port: int) -> list[int]:
|
|
892
|
+
"""Return all PIDs listening on a given port."""
|
|
893
|
+
pids: set[int] = set()
|
|
894
|
+
try:
|
|
895
|
+
if IS_WINDOWS:
|
|
896
|
+
for pid in _get_windows_pids_on_port(port):
|
|
897
|
+
pids.add(pid)
|
|
898
|
+
|
|
899
|
+
out = run_q(["netstat", "-ano"]).stdout
|
|
900
|
+
for line in out.splitlines():
|
|
901
|
+
parts = line.strip().split()
|
|
902
|
+
if len(parts) >= 5 and parts[0].upper() == "TCP" and parts[3].upper() == "LISTENING":
|
|
903
|
+
local_addr = parts[1]
|
|
904
|
+
if local_addr.endswith(f":{port}"):
|
|
905
|
+
try:
|
|
906
|
+
pids.add(int(parts[-1]))
|
|
907
|
+
except ValueError:
|
|
908
|
+
pass
|
|
909
|
+
else:
|
|
910
|
+
out = run_q(["lsof", "-ti", f"TCP:{port}", "-sTCP:LISTEN"]).stdout.strip()
|
|
911
|
+
if out:
|
|
912
|
+
for row in out.splitlines():
|
|
913
|
+
try:
|
|
914
|
+
pids.add(int(row.strip()))
|
|
915
|
+
except ValueError:
|
|
916
|
+
pass
|
|
917
|
+
except Exception:
|
|
918
|
+
pass
|
|
919
|
+
return sorted(pids)
|
|
920
|
+
|
|
921
|
+
|
|
922
|
+
def _kill_pid_windows(pid: int) -> bool:
|
|
923
|
+
"""Best-effort kill for a Windows PID. Returns True if command succeeded."""
|
|
924
|
+
try:
|
|
925
|
+
result = subprocess.run(
|
|
926
|
+
["taskkill", "/PID", str(pid), "/T", "/F"],
|
|
927
|
+
check=False,
|
|
928
|
+
capture_output=True,
|
|
929
|
+
text=True,
|
|
930
|
+
)
|
|
931
|
+
if result.returncode == 0:
|
|
932
|
+
return True
|
|
933
|
+
|
|
934
|
+
# Fallback for cases where taskkill can't resolve a rapidly-exiting process.
|
|
935
|
+
ps = subprocess.run(
|
|
936
|
+
[
|
|
937
|
+
"powershell",
|
|
938
|
+
"-NoProfile",
|
|
939
|
+
"-Command",
|
|
940
|
+
f"Stop-Process -Id {pid} -Force -ErrorAction SilentlyContinue",
|
|
941
|
+
],
|
|
942
|
+
check=False,
|
|
943
|
+
capture_output=True,
|
|
944
|
+
text=True,
|
|
945
|
+
)
|
|
946
|
+
return ps.returncode == 0
|
|
947
|
+
except Exception:
|
|
948
|
+
return False
|
|
949
|
+
|
|
950
|
+
|
|
951
|
+
def _clear_port_listeners(port: int, attempts: int = 6) -> bool:
|
|
952
|
+
"""Try multiple passes to free a busy port by killing all listeners."""
|
|
953
|
+
for _ in range(attempts):
|
|
954
|
+
if _check_port(port) is None:
|
|
955
|
+
return True
|
|
956
|
+
|
|
957
|
+
pids = _find_pids_on_port(port)
|
|
958
|
+
if not pids:
|
|
959
|
+
time.sleep(0.8)
|
|
960
|
+
continue
|
|
961
|
+
|
|
962
|
+
for ep in pids:
|
|
963
|
+
try:
|
|
964
|
+
if IS_WINDOWS:
|
|
965
|
+
_kill_pid_windows(ep)
|
|
966
|
+
else:
|
|
967
|
+
os.kill(ep, 9)
|
|
968
|
+
except Exception:
|
|
969
|
+
pass
|
|
970
|
+
time.sleep(0.8)
|
|
971
|
+
|
|
972
|
+
return _check_port(port) is None
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
def _detect_local_ip() -> str:
|
|
976
|
+
"""Best-effort LAN IP detection for same-network mobile access."""
|
|
977
|
+
import socket
|
|
978
|
+
|
|
979
|
+
# Most reliable route-based detection.
|
|
980
|
+
try:
|
|
981
|
+
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
982
|
+
s.connect(("8.8.8.8", 80))
|
|
983
|
+
ip = s.getsockname()[0]
|
|
984
|
+
s.close()
|
|
985
|
+
if ip and not ip.startswith("127."):
|
|
986
|
+
return ip
|
|
987
|
+
except Exception:
|
|
988
|
+
pass
|
|
989
|
+
|
|
990
|
+
# Hostname fallback.
|
|
991
|
+
try:
|
|
992
|
+
ip = socket.gethostbyname(socket.gethostname())
|
|
993
|
+
if ip and not ip.startswith("127."):
|
|
994
|
+
return ip
|
|
995
|
+
except Exception:
|
|
996
|
+
pass
|
|
997
|
+
|
|
998
|
+
return "127.0.0.1"
|
|
999
|
+
|
|
1000
|
+
def _display_connect_qr(url: str, token: str):
|
|
1001
|
+
"""Display a proper QR code in the terminal using segno + Unicode half-block chars.
|
|
1002
|
+
|
|
1003
|
+
The QR encodes a JSON string: {"url": "...", "token": "..."}
|
|
1004
|
+
which the mobile app's QR scanner can read to auto-configure connection.
|
|
1005
|
+
"""
|
|
1006
|
+
try:
|
|
1007
|
+
import segno
|
|
1008
|
+
except ImportError:
|
|
1009
|
+
# segno not installed — try installing it on the fly
|
|
1010
|
+
try:
|
|
1011
|
+
info("Installing segno for QR code display …")
|
|
1012
|
+
run_q([str(VENV_PIP), "install", "--quiet", "segno"])
|
|
1013
|
+
import segno
|
|
1014
|
+
except Exception:
|
|
1015
|
+
warn("Could not generate QR code (segno not available).")
|
|
1016
|
+
info("Install it: pip install segno")
|
|
1017
|
+
return
|
|
1018
|
+
|
|
1019
|
+
payload = json.dumps({"url": url, "token": token}, separators=(',', ':'))
|
|
1020
|
+
qr = segno.make(payload, error='L')
|
|
1021
|
+
|
|
1022
|
+
# Convert to a matrix of booleans (True = dark module)
|
|
1023
|
+
matrix = [list(row) for row in qr.matrix]
|
|
1024
|
+
rows = len(matrix)
|
|
1025
|
+
cols = len(matrix[0]) if rows else 0
|
|
1026
|
+
|
|
1027
|
+
# Add quiet zone (2 modules on each side)
|
|
1028
|
+
quiet = 2
|
|
1029
|
+
padded_cols = cols + quiet * 2
|
|
1030
|
+
padded_rows = rows + quiet * 2
|
|
1031
|
+
padded = []
|
|
1032
|
+
empty_row = [0] * padded_cols
|
|
1033
|
+
for _ in range(quiet):
|
|
1034
|
+
padded.append(list(empty_row))
|
|
1035
|
+
for row in matrix:
|
|
1036
|
+
padded.append([0] * quiet + row + [0] * quiet)
|
|
1037
|
+
for _ in range(quiet):
|
|
1038
|
+
padded.append(list(empty_row))
|
|
1039
|
+
|
|
1040
|
+
# Render using Unicode half-block characters for double vertical resolution
|
|
1041
|
+
# Each output line encodes TWO rows of QR modules:
|
|
1042
|
+
# top=dark, bottom=dark → "█" (full block)
|
|
1043
|
+
# top=dark, bottom=light → "▀" (upper half)
|
|
1044
|
+
# top=light, bottom=dark → "▄" (lower half)
|
|
1045
|
+
# top=light, bottom=light → " " (space)
|
|
1046
|
+
BG_WHITE = "\033[47m" # white background
|
|
1047
|
+
FG_BLACK = "\033[30m" # black foreground
|
|
1048
|
+
ANSI_RST = "\033[0m"
|
|
1049
|
+
|
|
1050
|
+
nl()
|
|
1051
|
+
print(f" {BOLD}{CYAN}┌{'─' * (padded_cols + 4)}┐{RESET}")
|
|
1052
|
+
print(f" {BOLD}{CYAN}│{RESET} {BOLD}Scan with SuperBrain app{RESET} {BOLD}{CYAN}│{RESET}")
|
|
1053
|
+
print(f" {BOLD}{CYAN}├{'─' * (padded_cols + 4)}┤{RESET}")
|
|
1054
|
+
|
|
1055
|
+
for y in range(0, padded_rows, 2):
|
|
1056
|
+
line_chars = []
|
|
1057
|
+
for x in range(padded_cols):
|
|
1058
|
+
top = padded[y][x] if y < padded_rows else 0
|
|
1059
|
+
bottom = padded[y + 1][x] if y + 1 < padded_rows else 0
|
|
1060
|
+
|
|
1061
|
+
if top and bottom:
|
|
1062
|
+
line_chars.append("█")
|
|
1063
|
+
elif top and not bottom:
|
|
1064
|
+
line_chars.append("▀")
|
|
1065
|
+
elif not top and bottom:
|
|
1066
|
+
line_chars.append("▄")
|
|
1067
|
+
else:
|
|
1068
|
+
line_chars.append(" ")
|
|
1069
|
+
|
|
1070
|
+
line = "".join(line_chars)
|
|
1071
|
+
print(f" {BOLD}{CYAN}│{RESET} {line} {BOLD}{CYAN}│{RESET}")
|
|
1072
|
+
|
|
1073
|
+
print(f" {BOLD}{CYAN}└{'─' * (padded_cols + 4)}┘{RESET}")
|
|
1074
|
+
nl()
|
|
1075
|
+
|
|
1076
|
+
|
|
1077
|
+
def launch_backend():
|
|
1078
|
+
h1("Launching SuperBrain Backend")
|
|
1079
|
+
|
|
1080
|
+
# Ensure upload endpoints won't crash FastAPI at import time.
|
|
1081
|
+
ensure_runtime_dependencies()
|
|
1082
|
+
|
|
1083
|
+
# ── Port conflict check ───────────────────────────────────────────────────
|
|
1084
|
+
PORT = 5000
|
|
1085
|
+
pid = _check_port(PORT)
|
|
1086
|
+
if pid is not None:
|
|
1087
|
+
if pid > 0:
|
|
1088
|
+
warn(f"Port {PORT} is already in use by PID {BOLD}{pid}{RESET}.")
|
|
1089
|
+
else:
|
|
1090
|
+
warn(f"Port {PORT} is already in use (PID unknown).")
|
|
1091
|
+
|
|
1092
|
+
nl()
|
|
1093
|
+
print(f" This is usually a previous SuperBrain server that wasn't stopped.")
|
|
1094
|
+
print(f" Options:")
|
|
1095
|
+
print(f" {BOLD}1{RESET} Kill the existing process and start fresh {DIM}(recommended){RESET}")
|
|
1096
|
+
print(f" {BOLD}2{RESET} Exit — I'll stop it manually then re-run start.py")
|
|
1097
|
+
nl()
|
|
1098
|
+
choice = input(f" {BOLD}Choose [1/2]{RESET}: ").strip()
|
|
1099
|
+
|
|
1100
|
+
if choice != "1":
|
|
1101
|
+
nl()
|
|
1102
|
+
if pid and pid > 0:
|
|
1103
|
+
info(f"Stop it with: kill {pid}")
|
|
1104
|
+
else:
|
|
1105
|
+
info(f"Find what's on port {PORT}: lsof -i :{PORT} (Linux/macOS)")
|
|
1106
|
+
info(f" netstat -ano | findstr :{PORT} (Windows)")
|
|
1107
|
+
info("Then re-run: python start.py")
|
|
1108
|
+
sys.exit(0)
|
|
1109
|
+
|
|
1110
|
+
# Kill it
|
|
1111
|
+
try:
|
|
1112
|
+
import signal as _sig
|
|
1113
|
+
if pid and pid > 0:
|
|
1114
|
+
if IS_WINDOWS:
|
|
1115
|
+
killed = _kill_pid_windows(pid)
|
|
1116
|
+
if not killed:
|
|
1117
|
+
warn(f"PID {pid} is no longer active. Trying current listeners on port {PORT} …")
|
|
1118
|
+
else:
|
|
1119
|
+
os.kill(pid, _sig.SIGTERM)
|
|
1120
|
+
time.sleep(1)
|
|
1121
|
+
# If still alive, SIGKILL
|
|
1122
|
+
if not IS_WINDOWS:
|
|
1123
|
+
try:
|
|
1124
|
+
os.kill(pid, 0) # check if process exists
|
|
1125
|
+
os.kill(pid, _sig.SIGKILL)
|
|
1126
|
+
time.sleep(0.5)
|
|
1127
|
+
except ProcessLookupError:
|
|
1128
|
+
pass
|
|
1129
|
+
|
|
1130
|
+
# Verify port is free; if not, keep clearing listeners until stable.
|
|
1131
|
+
_clear_port_listeners(PORT)
|
|
1132
|
+
|
|
1133
|
+
if _check_port(PORT) is None:
|
|
1134
|
+
ok(f"Process {pid} stopped")
|
|
1135
|
+
else:
|
|
1136
|
+
err(f"Port {PORT} is still in use after kill attempt.")
|
|
1137
|
+
if IS_WINDOWS:
|
|
1138
|
+
info(f"Run manually: netstat -ano | findstr :{PORT}")
|
|
1139
|
+
info("Then: taskkill /PID <pid> /T /F")
|
|
1140
|
+
info("Or inspect: powershell Get-NetTCPConnection -LocalPort 5000 -State Listen")
|
|
1141
|
+
else:
|
|
1142
|
+
info(f"Run: lsof -ti TCP:{PORT} -sTCP:LISTEN | xargs kill -9")
|
|
1143
|
+
sys.exit(1)
|
|
1144
|
+
else:
|
|
1145
|
+
# Unknown PID — try to kill all listeners we can find
|
|
1146
|
+
extra_pids = _find_pids_on_port(PORT)
|
|
1147
|
+
if not extra_pids:
|
|
1148
|
+
err("Cannot determine PID automatically.")
|
|
1149
|
+
if IS_WINDOWS:
|
|
1150
|
+
info(f"Run manually: netstat -ano | findstr :{PORT}")
|
|
1151
|
+
info("Then: taskkill /PID <pid> /F")
|
|
1152
|
+
else:
|
|
1153
|
+
info(f"Run: lsof -ti TCP:{PORT} -sTCP:LISTEN | xargs kill -9")
|
|
1154
|
+
info("Then re-run: python start.py")
|
|
1155
|
+
sys.exit(1)
|
|
1156
|
+
|
|
1157
|
+
_clear_port_listeners(PORT)
|
|
1158
|
+
if _check_port(PORT) is None:
|
|
1159
|
+
ok(f"Cleared port {PORT} by terminating PID(s): {', '.join(str(x) for x in extra_pids)}")
|
|
1160
|
+
else:
|
|
1161
|
+
err(f"Port {PORT} is still busy.")
|
|
1162
|
+
if IS_WINDOWS:
|
|
1163
|
+
info(f"Run manually: netstat -ano | findstr :{PORT}")
|
|
1164
|
+
info("Then: taskkill /PID <pid> /T /F")
|
|
1165
|
+
else:
|
|
1166
|
+
info(f"Run: lsof -ti TCP:{PORT} -sTCP:LISTEN | xargs kill -9")
|
|
1167
|
+
sys.exit(1)
|
|
1168
|
+
except Exception as e:
|
|
1169
|
+
err(f"Failed to kill process: {e}")
|
|
1170
|
+
if IS_WINDOWS:
|
|
1171
|
+
if pid and pid > 0:
|
|
1172
|
+
info(f"Try manually: taskkill /PID {pid} /F")
|
|
1173
|
+
info(f"Or list listeners: netstat -ano | findstr :{PORT}")
|
|
1174
|
+
else:
|
|
1175
|
+
info(f"Try manually: kill -9 {pid}")
|
|
1176
|
+
sys.exit(1)
|
|
1177
|
+
|
|
1178
|
+
token = TOKEN_FILE.read_text().strip() if TOKEN_FILE.exists() else "—"
|
|
1179
|
+
local_ip = _detect_local_ip()
|
|
1180
|
+
|
|
1181
|
+
localtunnel_enabled = bool(shutil.which("npx") or shutil.which("npx.cmd"))
|
|
1182
|
+
|
|
1183
|
+
localtunnel_url: str | None = None
|
|
1184
|
+
if localtunnel_enabled:
|
|
1185
|
+
localtunnel_url = _start_localtunnel(PORT)
|
|
1186
|
+
else:
|
|
1187
|
+
localtunnel_url = _find_localtunnel_url_from_log()
|
|
1188
|
+
|
|
1189
|
+
if localtunnel_url:
|
|
1190
|
+
tunnel_line = f" Public URL → {GREEN}{BOLD}{localtunnel_url}{RESET} {DIM}(localtunnel){RESET}"
|
|
1191
|
+
tunnel_hint = f" · public → {GREEN}{localtunnel_url}{RESET}"
|
|
1192
|
+
elif localtunnel_enabled:
|
|
1193
|
+
tunnel_line = f" Public URL → {YELLOW}(starting — URL pending, check localtunnel.log){RESET}"
|
|
1194
|
+
tunnel_hint = f" · public → run: {DIM}npx localtunnel --port {PORT}{RESET}"
|
|
1195
|
+
else:
|
|
1196
|
+
tunnel_line = ""
|
|
1197
|
+
tunnel_hint = f" · public → install Node.js first, then run: {DIM}npx localtunnel --port {PORT}{RESET}"
|
|
1198
|
+
|
|
1199
|
+
# ── Generate and display QR code ──────────────────────────────────────────
|
|
1200
|
+
qr_url = f"http://{local_ip}:{PORT}"
|
|
1201
|
+
_display_connect_qr(qr_url, token)
|
|
1202
|
+
|
|
1203
|
+
print(f"""
|
|
1204
|
+
{GREEN}{BOLD}Backend is starting up!{RESET}
|
|
1205
|
+
|
|
1206
|
+
Local URL → {CYAN}http://127.0.0.1:{PORT}{RESET}
|
|
1207
|
+
Network URL → {CYAN}http://{local_ip}:{PORT}{RESET}
|
|
1208
|
+
{(tunnel_line + chr(10)) if tunnel_line else ''} API docs → {CYAN}http://127.0.0.1:{PORT}/docs{RESET}
|
|
1209
|
+
Access Token → {BOLD}{MAGENTA}{token}{RESET}
|
|
1210
|
+
|
|
1211
|
+
{DIM}Keep this terminal open. Press Ctrl+C to stop the server.{RESET}
|
|
1212
|
+
|
|
1213
|
+
{YELLOW}Mobile app setup:{RESET}
|
|
1214
|
+
{BOLD}Option A — Scan QR code (easiest):{RESET}
|
|
1215
|
+
1. Open the app → Settings → tap the {BOLD}QR icon{RESET} 📷
|
|
1216
|
+
2. Scan the QR code shown above
|
|
1217
|
+
3. Done — auto-connected!
|
|
1218
|
+
|
|
1219
|
+
{BOLD}Option B — Manual setup:{RESET}
|
|
1220
|
+
1. Build / install the SuperBrain APK on your Android device.
|
|
1221
|
+
2. Open the app → tap the ⚙ settings icon.
|
|
1222
|
+
3. Set {BOLD}Server URL{RESET} to:
|
|
1223
|
+
· Same WiFi → http://{local_ip}:{PORT}
|
|
1224
|
+
{tunnel_hint}
|
|
1225
|
+
· Port fwd → http://<your-public-ip>:{PORT}
|
|
1226
|
+
4. Set {BOLD}Access Token{RESET} to: {BOLD}{MAGENTA}{token}{RESET}
|
|
1227
|
+
5. Tap {BOLD}Save{RESET} → Connected!
|
|
1228
|
+
|
|
1229
|
+
{YELLOW}Data Management:{RESET}
|
|
1230
|
+
• {BOLD}Export:{RESET} In app Settings → Data Import/Export → choose format (JSON/ZIP)
|
|
1231
|
+
• {BOLD}Import:{RESET} Upload backup file in app → Data Import/Export → select file
|
|
1232
|
+
• {BOLD}Reset:{RESET} Run {BOLD}python reset.py{RESET} for safe data cleanup options
|
|
1233
|
+
|
|
1234
|
+
{DIM}Security Note: Keep token.txt private. Anyone with this token can use your API.{RESET}
|
|
1235
|
+
{DIM}The app securely stores your Access Token locally — it's never transmitted anywhere but your server.{RESET}
|
|
1236
|
+
""")
|
|
1237
|
+
|
|
1238
|
+
os.chdir(BASE_DIR)
|
|
1239
|
+
os.execv(str(VENV_PYTHON), [str(VENV_PYTHON), "-m", "uvicorn", "api:app",
|
|
1240
|
+
"--host", "0.0.0.0", "--port", str(PORT), "--reload"])
|
|
1241
|
+
|
|
1242
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
1243
|
+
# Main
|
|
1244
|
+
# ══════════════════════════════════════════════════════════════════════════════
|
|
1245
|
+
def main():
|
|
1246
|
+
os.chdir(BASE_DIR)
|
|
1247
|
+
banner()
|
|
1248
|
+
|
|
1249
|
+
reset_mode = "--reset" in sys.argv
|
|
1250
|
+
|
|
1251
|
+
if SETUP_DONE.exists() and not reset_mode:
|
|
1252
|
+
# Already configured — just launch
|
|
1253
|
+
print(f" {GREEN}Setup already complete.{RESET} Starting backend …")
|
|
1254
|
+
print(f" {DIM}Run python start.py --reset to redo the setup wizard.{RESET}")
|
|
1255
|
+
launch_backend()
|
|
1256
|
+
return
|
|
1257
|
+
|
|
1258
|
+
print(f"""
|
|
1259
|
+
Welcome to SuperBrain! This wizard will guide you through:
|
|
1260
|
+
|
|
1261
|
+
1 · Create Python virtual environment
|
|
1262
|
+
2 · Install all required packages
|
|
1263
|
+
3 · Configure AI provider keys + Instagram credentials
|
|
1264
|
+
4 · Set up an offline AI model via Ollama (qwen3-vl:4b)
|
|
1265
|
+
5 · Set up offline audio transcription (Whisper + ffmpeg)
|
|
1266
|
+
6 · Configure remote access (localtunnel or port forwarding)
|
|
1267
|
+
7 · Generate Access Token & initialise database
|
|
1268
|
+
|
|
1269
|
+
Press {BOLD}Enter{RESET} to accept defaults shown in [{DIM}brackets{RESET}].
|
|
1270
|
+
You can re-run this wizard any time with: {BOLD}python start.py --reset{RESET}
|
|
1271
|
+
""")
|
|
1272
|
+
input(f" Press {BOLD}Enter{RESET} to begin … ")
|
|
1273
|
+
|
|
1274
|
+
try:
|
|
1275
|
+
setup_venv()
|
|
1276
|
+
install_deps()
|
|
1277
|
+
setup_api_keys()
|
|
1278
|
+
setup_ollama()
|
|
1279
|
+
setup_whisper()
|
|
1280
|
+
setup_remote_access()
|
|
1281
|
+
setup_token_and_db()
|
|
1282
|
+
except KeyboardInterrupt:
|
|
1283
|
+
nl()
|
|
1284
|
+
warn("Setup interrupted. Re-run python start.py to continue.")
|
|
1285
|
+
sys.exit(1)
|
|
1286
|
+
|
|
1287
|
+
# Mark setup done
|
|
1288
|
+
SETUP_DONE.write_text("ok")
|
|
1289
|
+
|
|
1290
|
+
nl()
|
|
1291
|
+
print(f" {GREEN}{BOLD}{'═'*60}{RESET}")
|
|
1292
|
+
print(f" {GREEN}{BOLD} ✓ Setup complete!{RESET}")
|
|
1293
|
+
print(f" {GREEN}{BOLD}{'═'*60}{RESET}")
|
|
1294
|
+
nl()
|
|
1295
|
+
|
|
1296
|
+
if ask_yn("Start the backend now?", default=True):
|
|
1297
|
+
launch_backend()
|
|
1298
|
+
else:
|
|
1299
|
+
info("Run python start.py whenever you want to start the backend.")
|
|
1300
|
+
|
|
1301
|
+
if __name__ == "__main__":
|
|
1302
|
+
main()
|