superbrain-server 1.0.2-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/superbrain.js +196 -0
- package/package.json +23 -0
- package/payload/.dockerignore +45 -0
- package/payload/.env.example +58 -0
- package/payload/Dockerfile +73 -0
- package/payload/analyzers/__init__.py +0 -0
- package/payload/analyzers/audio_transcribe.py +225 -0
- package/payload/analyzers/caption.py +244 -0
- package/payload/analyzers/music_identifier.py +346 -0
- package/payload/analyzers/text_analyzer.py +117 -0
- package/payload/analyzers/visual_analyze.py +218 -0
- package/payload/analyzers/webpage_analyzer.py +789 -0
- package/payload/analyzers/youtube_analyzer.py +320 -0
- package/payload/api.py +1676 -0
- package/payload/config/.api_keys.example +22 -0
- package/payload/config/model_rankings.json +492 -0
- package/payload/config/openrouter_free_models.json +1364 -0
- package/payload/config/whisper_model.txt +1 -0
- package/payload/config_settings.py +185 -0
- package/payload/core/__init__.py +0 -0
- package/payload/core/category_manager.py +219 -0
- package/payload/core/database.py +811 -0
- package/payload/core/link_checker.py +300 -0
- package/payload/core/model_router.py +1253 -0
- package/payload/docker-compose.yml +120 -0
- package/payload/instagram/__init__.py +0 -0
- package/payload/instagram/instagram_downloader.py +253 -0
- package/payload/instagram/instagram_login.py +190 -0
- package/payload/main.py +912 -0
- package/payload/requirements.txt +39 -0
- package/payload/reset.py +311 -0
- package/payload/start-docker-prod.sh +125 -0
- package/payload/start-docker.sh +56 -0
- package/payload/start.py +1302 -0
- package/payload/static/favicon.ico +0 -0
- package/payload/stop-docker.sh +16 -0
- package/payload/utils/__init__.py +0 -0
- package/payload/utils/db_stats.py +108 -0
- package/payload/utils/manage_token.py +91 -0
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Text Content Analyzer
|
|
4
|
+
Analyzes text files (like info.txt from Instagram posts) using the ModelRouter.
|
|
5
|
+
Routes to the best available free model: Groq → Gemini → OpenRouter → Local Ollama.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import sys
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
# Ensure backend root is in sys.path (needed when run as a subprocess)
|
|
12
|
+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
|
13
|
+
|
|
14
|
+
from core.model_router import get_router
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def analyze_text(file_path: str) -> dict:
|
|
18
|
+
"""
|
|
19
|
+
Analyze a text file using the best available AI model.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
dict with keys: success (bool), file (str), analysis (str), content (str)
|
|
23
|
+
or error (str) on failure.
|
|
24
|
+
"""
|
|
25
|
+
path = Path(file_path)
|
|
26
|
+
|
|
27
|
+
if not path.exists():
|
|
28
|
+
return {"success": False, "error": f"File not found: {file_path}"}
|
|
29
|
+
if not path.is_file():
|
|
30
|
+
return {"success": False, "error": f"Not a file: {file_path}"}
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
34
|
+
content = f.read().strip()
|
|
35
|
+
except Exception as e:
|
|
36
|
+
return {"success": False, "error": f"Could not read file: {e}"}
|
|
37
|
+
|
|
38
|
+
if not content:
|
|
39
|
+
return {"success": False, "error": "File is empty"}
|
|
40
|
+
|
|
41
|
+
prompt = f"""You are analyzing an Instagram post's metadata. Read the following information and provide insights.
|
|
42
|
+
|
|
43
|
+
POST INFORMATION:
|
|
44
|
+
{content}
|
|
45
|
+
|
|
46
|
+
TASK: Analyze this post and provide:
|
|
47
|
+
1. Main topic / theme
|
|
48
|
+
2. Content type (travel guide, vlog, educational, product review, etc.)
|
|
49
|
+
3. Key highlights and takeaways
|
|
50
|
+
4. Notable details (locations, products, people, links, tips mentioned)
|
|
51
|
+
5. Target audience
|
|
52
|
+
|
|
53
|
+
Be specific and concise. Max 300 words."""
|
|
54
|
+
|
|
55
|
+
print("🔄 Generating text analysis...")
|
|
56
|
+
|
|
57
|
+
router = get_router()
|
|
58
|
+
try:
|
|
59
|
+
analysis = router.generate_text(prompt)
|
|
60
|
+
except RuntimeError as e:
|
|
61
|
+
return {"success": False, "error": f"All models failed: {e}"}
|
|
62
|
+
|
|
63
|
+
if not analysis:
|
|
64
|
+
return {"success": False, "error": "Model returned empty response"}
|
|
65
|
+
|
|
66
|
+
return {
|
|
67
|
+
"success": True,
|
|
68
|
+
"file": path.name,
|
|
69
|
+
"analysis": analysis,
|
|
70
|
+
"content": content,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def main():
|
|
75
|
+
"""Main entry point."""
|
|
76
|
+
|
|
77
|
+
if len(sys.argv) > 1:
|
|
78
|
+
file_path = sys.argv[1]
|
|
79
|
+
else:
|
|
80
|
+
print("=" * 70)
|
|
81
|
+
print("📄 TEXT ANALYZER")
|
|
82
|
+
print("=" * 70)
|
|
83
|
+
print()
|
|
84
|
+
file_path = input("Enter text file path: ").strip()
|
|
85
|
+
|
|
86
|
+
file_path = file_path.strip('"').strip("'").strip()
|
|
87
|
+
|
|
88
|
+
if not file_path:
|
|
89
|
+
print("❌ No path provided!")
|
|
90
|
+
return
|
|
91
|
+
|
|
92
|
+
print()
|
|
93
|
+
result = analyze_text(file_path)
|
|
94
|
+
|
|
95
|
+
if result["success"]:
|
|
96
|
+
print("=" * 70)
|
|
97
|
+
print("📊 TEXT ANALYSIS RESULTS")
|
|
98
|
+
print("=" * 70)
|
|
99
|
+
print()
|
|
100
|
+
print(f"📄 File: {result['file']}")
|
|
101
|
+
print()
|
|
102
|
+
print("📝 ORIGINAL CONTENT:")
|
|
103
|
+
print("-" * 70)
|
|
104
|
+
print(result["content"])
|
|
105
|
+
print("-" * 70)
|
|
106
|
+
print()
|
|
107
|
+
print("🔍 ANALYSIS:")
|
|
108
|
+
print("-" * 70)
|
|
109
|
+
print(result["analysis"])
|
|
110
|
+
print("-" * 70)
|
|
111
|
+
print()
|
|
112
|
+
else:
|
|
113
|
+
print(f"❌ Error: {result['error']}")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
if __name__ == "__main__":
|
|
117
|
+
main()
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Video & Image Visual Analyzer
|
|
4
|
+
Uses the ModelRouter to pick the best available free vision model
|
|
5
|
+
(Gemini → Groq Vision → OpenRouter Vision → Local Ollama as last resort)
|
|
6
|
+
|
|
7
|
+
For videos: extracts key frames using OpenCV (or ffmpeg fallback),
|
|
8
|
+
then sends them to the vision model.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import sys
|
|
12
|
+
import base64
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import List
|
|
15
|
+
|
|
16
|
+
# Ensure backend root is in sys.path (needed when run as a subprocess)
|
|
17
|
+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
|
18
|
+
|
|
19
|
+
from core.model_router import get_router
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
23
|
+
# Frame extraction helpers
|
|
24
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
25
|
+
|
|
26
|
+
def _frames_cv2(video_path: str, max_frames: int = 4) -> List[str]:
|
|
27
|
+
"""Extract evenly-spaced frames using OpenCV → base64 JPEG list."""
|
|
28
|
+
import cv2
|
|
29
|
+
cap = cv2.VideoCapture(video_path)
|
|
30
|
+
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
31
|
+
if total <= 0:
|
|
32
|
+
cap.release()
|
|
33
|
+
return []
|
|
34
|
+
indices = [int(i * total / max_frames) for i in range(max_frames)]
|
|
35
|
+
out = []
|
|
36
|
+
for idx in indices:
|
|
37
|
+
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
|
38
|
+
ret, frame = cap.read()
|
|
39
|
+
if not ret:
|
|
40
|
+
continue
|
|
41
|
+
ok, buf = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 75])
|
|
42
|
+
if ok:
|
|
43
|
+
out.append(base64.b64encode(buf.tobytes()).decode())
|
|
44
|
+
cap.release()
|
|
45
|
+
return out
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _get_duration_s(video_path: str) -> int:
|
|
49
|
+
import subprocess
|
|
50
|
+
r = subprocess.run(
|
|
51
|
+
["ffprobe", "-v", "error", "-select_streams", "v:0",
|
|
52
|
+
"-show_entries", "format=duration", "-of", "csv=p=0", video_path],
|
|
53
|
+
capture_output=True, text=True,
|
|
54
|
+
)
|
|
55
|
+
try:
|
|
56
|
+
return max(1, int(float(r.stdout.strip())))
|
|
57
|
+
except Exception:
|
|
58
|
+
return 60
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _frames_ffmpeg(video_path: str, max_frames: int = 4) -> List[str]:
|
|
62
|
+
"""Fallback frame extraction via ffmpeg."""
|
|
63
|
+
import subprocess, tempfile, os
|
|
64
|
+
out = []
|
|
65
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
66
|
+
dur = _get_duration_s(video_path)
|
|
67
|
+
interval = max(1, dur // max_frames)
|
|
68
|
+
pattern = os.path.join(tmpdir, "frame_%02d.jpg")
|
|
69
|
+
subprocess.run(
|
|
70
|
+
["ffmpeg", "-i", video_path, "-vf", f"fps=1/{interval}",
|
|
71
|
+
"-vframes", str(max_frames), "-q:v", "3", pattern],
|
|
72
|
+
capture_output=True,
|
|
73
|
+
)
|
|
74
|
+
for fname in sorted(os.listdir(tmpdir)):
|
|
75
|
+
with open(os.path.join(tmpdir, fname), "rb") as f:
|
|
76
|
+
out.append(base64.b64encode(f.read()).decode())
|
|
77
|
+
return out
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def extract_frames(video_path: str, max_frames: int = 4) -> List[str]:
|
|
81
|
+
"""Extract frames from a video, trying OpenCV first then ffmpeg."""
|
|
82
|
+
try:
|
|
83
|
+
import cv2 # noqa: F401
|
|
84
|
+
frames = _frames_cv2(video_path, max_frames)
|
|
85
|
+
if frames:
|
|
86
|
+
print(f" 📸 Extracted {len(frames)} frame(s) via OpenCV")
|
|
87
|
+
return frames
|
|
88
|
+
except ImportError:
|
|
89
|
+
pass
|
|
90
|
+
except Exception as e:
|
|
91
|
+
print(f" ⚠️ OpenCV failed: {e}")
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
frames = _frames_ffmpeg(video_path, max_frames)
|
|
95
|
+
if frames:
|
|
96
|
+
print(f" 📸 Extracted {len(frames)} frame(s) via ffmpeg")
|
|
97
|
+
return frames
|
|
98
|
+
except Exception as e:
|
|
99
|
+
print(f" ⚠️ ffmpeg failed: {e}")
|
|
100
|
+
|
|
101
|
+
return []
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def image_to_b64(image_path: str) -> str:
|
|
105
|
+
"""Read an image file and return a base64 JPEG string."""
|
|
106
|
+
try:
|
|
107
|
+
from PIL import Image
|
|
108
|
+
import io
|
|
109
|
+
img = Image.open(image_path).convert("RGB")
|
|
110
|
+
buf = io.BytesIO()
|
|
111
|
+
img.save(buf, format="JPEG", quality=80)
|
|
112
|
+
return base64.b64encode(buf.getvalue()).decode()
|
|
113
|
+
except ImportError:
|
|
114
|
+
with open(image_path, "rb") as f:
|
|
115
|
+
return base64.b64encode(f.read()).decode()
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
119
|
+
# Analyze
|
|
120
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
121
|
+
|
|
122
|
+
VISION_PROMPT = """Analyze this image/video frame from an Instagram post. Provide:
|
|
123
|
+
1. Main subject and what is happening
|
|
124
|
+
2. Any visible text, captions, or overlays
|
|
125
|
+
3. Setting / location clues
|
|
126
|
+
4. Products, brands, or notable items visible
|
|
127
|
+
5. Overall content theme
|
|
128
|
+
|
|
129
|
+
Be concise and factual."""
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def analyze(file_path: str) -> None:
|
|
133
|
+
"""Analyze a video or image file and print structured results."""
|
|
134
|
+
|
|
135
|
+
print("=" * 70)
|
|
136
|
+
print("🎬 VIDEO & IMAGE ANALYZER")
|
|
137
|
+
print("=" * 70)
|
|
138
|
+
print()
|
|
139
|
+
|
|
140
|
+
path = Path(file_path)
|
|
141
|
+
if not path.exists():
|
|
142
|
+
print(f"❌ File not found: {file_path}")
|
|
143
|
+
return
|
|
144
|
+
if path.is_dir():
|
|
145
|
+
print("❌ Provide a file, not a folder")
|
|
146
|
+
return
|
|
147
|
+
|
|
148
|
+
suffix = path.suffix.lower()
|
|
149
|
+
if suffix not in (".mp4", ".avi", ".mov", ".mkv", ".jpg", ".jpeg", ".png", ".gif"):
|
|
150
|
+
print(f"❌ Unsupported file type: {suffix}")
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
print(f"📹 File: {path.name}")
|
|
154
|
+
print()
|
|
155
|
+
|
|
156
|
+
# Build image list
|
|
157
|
+
images_b64: List[str] = []
|
|
158
|
+
|
|
159
|
+
if suffix in (".mp4", ".avi", ".mov", ".mkv"):
|
|
160
|
+
print("🎥 Extracting video frames...")
|
|
161
|
+
images_b64 = extract_frames(str(path), max_frames=4)
|
|
162
|
+
if not images_b64:
|
|
163
|
+
print("❌ Could not extract any frames from video")
|
|
164
|
+
return
|
|
165
|
+
else:
|
|
166
|
+
print("🖼️ Loading image...")
|
|
167
|
+
try:
|
|
168
|
+
images_b64 = [image_to_b64(str(path))]
|
|
169
|
+
except Exception as e:
|
|
170
|
+
print(f"❌ Could not load image: {e}")
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
# Send to vision model via router
|
|
174
|
+
print()
|
|
175
|
+
print("🤖 Analyzing with vision AI...")
|
|
176
|
+
print()
|
|
177
|
+
|
|
178
|
+
router = get_router()
|
|
179
|
+
try:
|
|
180
|
+
result = router.analyze_images(VISION_PROMPT, images_b64)
|
|
181
|
+
except RuntimeError as e:
|
|
182
|
+
print(f"❌ Vision analysis failed: {e}")
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
print("=" * 70)
|
|
186
|
+
print("📊 VISUAL ANALYSIS RESULTS")
|
|
187
|
+
print("=" * 70)
|
|
188
|
+
print()
|
|
189
|
+
print("📝 ANALYSIS:")
|
|
190
|
+
print("-" * 70)
|
|
191
|
+
print(result)
|
|
192
|
+
print("-" * 70)
|
|
193
|
+
print()
|
|
194
|
+
print("=" * 70)
|
|
195
|
+
print("✅ Done!")
|
|
196
|
+
print("=" * 70)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def main():
|
|
200
|
+
if len(sys.argv) > 1:
|
|
201
|
+
file_path = sys.argv[1]
|
|
202
|
+
else:
|
|
203
|
+
print("=" * 70)
|
|
204
|
+
print("🎬 VIDEO & IMAGE ANALYZER")
|
|
205
|
+
print("=" * 70)
|
|
206
|
+
print()
|
|
207
|
+
file_path = input("📂 File path: ").strip()
|
|
208
|
+
|
|
209
|
+
file_path = file_path.strip('"').strip("'").strip()
|
|
210
|
+
|
|
211
|
+
if file_path:
|
|
212
|
+
analyze(file_path)
|
|
213
|
+
else:
|
|
214
|
+
print("❌ No path provided!")
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
if __name__ == "__main__":
|
|
218
|
+
main()
|