cmo-dog 2.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cmo_dog-2.0.0/.env.example +20 -0
- cmo_dog-2.0.0/.gitignore +20 -0
- cmo_dog-2.0.0/PKG-INFO +16 -0
- cmo_dog-2.0.0/README.md +220 -0
- cmo_dog-2.0.0/app/__init__.py +1 -0
- cmo_dog-2.0.0/app/config.py +29 -0
- cmo_dog-2.0.0/app/main.py +257 -0
- cmo_dog-2.0.0/app/orchestrator.py +679 -0
- cmo_dog-2.0.0/app/routes/__init__.py +0 -0
- cmo_dog-2.0.0/app/routes/admin.py +79 -0
- cmo_dog-2.0.0/app/routes/auth.py +173 -0
- cmo_dog-2.0.0/app/routes/billing.py +138 -0
- cmo_dog-2.0.0/app/routes/monitors.py +70 -0
- cmo_dog-2.0.0/app/schemas.py +88 -0
- cmo_dog-2.0.0/app/services/__init__.py +0 -0
- cmo_dog-2.0.0/app/services/gmail_sender.py +160 -0
- cmo_dog-2.0.0/app/services/monitor_scheduler.py +327 -0
- cmo_dog-2.0.0/app/services/monitor_service.py +135 -0
- cmo_dog-2.0.0/app/services/run_history_service.py +122 -0
- cmo_dog-2.0.0/app/services/user_service.py +249 -0
- cmo_dog-2.0.0/assets/onni.png +0 -0
- cmo_dog-2.0.0/build.sh +107 -0
- cmo_dog-2.0.0/package-lock.json +6 -0
- cmo_dog-2.0.0/pyproject.toml +38 -0
- cmo_dog-2.0.0/scripts/create_assistants.py +58 -0
- cmo_dog-2.0.0/scripts/model_list.py +29 -0
- cmo_dog-2.0.0/scripts/smoke_stream.sh +181 -0
- cmo_dog-2.0.0/scripts/smoke_test.sh +325 -0
- cmo_dog-2.0.0/start-docker.sh +23 -0
- cmo_dog-2.0.0/terraform/backend.tf +9 -0
- cmo_dog-2.0.0/terraform/bootstrap/state-bucket.tf +52 -0
- cmo_dog-2.0.0/terraform/main.tf +125 -0
- cmo_dog-2.0.0/terraform/modules/apprunner/main.tf +182 -0
- cmo_dog-2.0.0/terraform/modules/apprunner/outputs.tf +19 -0
- cmo_dog-2.0.0/terraform/modules/apprunner/variables.tf +124 -0
- cmo_dog-2.0.0/terraform/modules/ecr/main.tf +39 -0
- cmo_dog-2.0.0/terraform/modules/ecr/outputs.tf +14 -0
- cmo_dog-2.0.0/terraform/modules/ecr/variables.tf +21 -0
- cmo_dog-2.0.0/terraform/modules/ssm-secrets/main.tf +18 -0
- cmo_dog-2.0.0/terraform/modules/ssm-secrets/outputs.tf +9 -0
- cmo_dog-2.0.0/terraform/modules/ssm-secrets/variables.tf +15 -0
- cmo_dog-2.0.0/terraform/outputs.tf +24 -0
- cmo_dog-2.0.0/terraform/variables.tf +295 -0
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
BACKBOARD_API_KEY=
|
|
2
|
+
BACKBOARD_ASSISTANT_CONTENT=
|
|
3
|
+
BACKBOARD_ASSISTANT_COMPETITOR=
|
|
4
|
+
BACKBOARD_ASSISTANT_BRAND=
|
|
5
|
+
BACKBOARD_ASSISTANT_AUDIT=
|
|
6
|
+
BACKBOARD_ASSISTANT_STORAGE=
|
|
7
|
+
|
|
8
|
+
STRIPE_SECRET_KEY=
|
|
9
|
+
STRIPE_WEBHOOK_SECRET=
|
|
10
|
+
STRIPE_PRICE_ID=
|
|
11
|
+
|
|
12
|
+
# Both point to port 8000 — Next.js handles /api/* proxying to FastAPI internally.
|
|
13
|
+
# For App Runner, set both to your App Runner URL (https://xxx.us-east-1.awsapprunner.com).
|
|
14
|
+
DOMAIN_CLIENT=http://localhost:8000
|
|
15
|
+
DOMAIN_SERVER=http://localhost:8000
|
|
16
|
+
|
|
17
|
+
GOOGLE_CLIENT_ID=
|
|
18
|
+
GOOGLE_CLIENT_SECRET=
|
|
19
|
+
|
|
20
|
+
ADMIN_EMAILS=you@example.com,other@example.com
|
cmo_dog-2.0.0/.gitignore
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
.env
|
|
2
|
+
.venv
|
|
3
|
+
uv.lock
|
|
4
|
+
__pycache__/
|
|
5
|
+
*.pyc
|
|
6
|
+
*.pyo
|
|
7
|
+
terraform/terraform.tfvars
|
|
8
|
+
terraform/terraform.tfstate
|
|
9
|
+
terraform/terraform.tfstate.backup
|
|
10
|
+
.DS_Store
|
|
11
|
+
.cursor
|
|
12
|
+
.cursorignore
|
|
13
|
+
terraform/.terraform.lock.hcl
|
|
14
|
+
terraform/.terraform/
|
|
15
|
+
terraform/bootstrap/.terraform.lock.hcl
|
|
16
|
+
terraform/bootstrap/.terraform/
|
|
17
|
+
terraform/bootstrap/terraform.tfstate
|
|
18
|
+
terraform/bootstrap/terraform.tfstate.backup
|
|
19
|
+
model_list.csv
|
|
20
|
+
terraform/.terraform.lock.hcl
|
cmo_dog-2.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cmo-dog
|
|
3
|
+
Version: 2.0.0
|
|
4
|
+
Summary: AI CMO Terminal — Backboard agents for traffic and users
|
|
5
|
+
Requires-Python: >=3.11
|
|
6
|
+
Requires-Dist: apscheduler>=3.10
|
|
7
|
+
Requires-Dist: backboard-sdk>=1.5.0
|
|
8
|
+
Requires-Dist: bcrypt>=4.0
|
|
9
|
+
Requires-Dist: fastapi>=0.115.0
|
|
10
|
+
Requires-Dist: httpx>=0.27.0
|
|
11
|
+
Requires-Dist: pydantic-settings>=2.0
|
|
12
|
+
Requires-Dist: pydantic>=2.0
|
|
13
|
+
Requires-Dist: python-dotenv>=1.0
|
|
14
|
+
Requires-Dist: sse-starlette>=2.0
|
|
15
|
+
Requires-Dist: stripe>=11.0
|
|
16
|
+
Requires-Dist: uvicorn[standard]>=0.32.0
|
cmo_dog-2.0.0/README.md
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="assets/onni.png" alt="Onni — CMO.dog" width="200" style="border-radius:50%;" />
|
|
3
|
+
</p>
|
|
4
|
+
|
|
5
|
+
<h1 align="center">CMO.dog</h1>
|
|
6
|
+
|
|
7
|
+
<p align="center">
|
|
8
|
+
<strong>Drop in a URL. Onni fetches your Chief Marketing Officer.</strong>
|
|
9
|
+
<br />
|
|
10
|
+
A good Finnish dog who audits your site, maps your competitors, and finds your brand voice — in seconds.
|
|
11
|
+
</p>
|
|
12
|
+
|
|
13
|
+
<p align="center">
|
|
14
|
+
<a href="https://backboard.io"><img src="https://img.shields.io/badge/Powered%20by-Backboard.io-6366f1?style=flat-square" alt="Powered by Backboard" /></a>
|
|
15
|
+
<img src="https://img.shields.io/badge/Python-3.11%2B-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.11+" />
|
|
16
|
+
<img src="https://img.shields.io/badge/Next.js-15-black?style=flat-square&logo=next.js" alt="Next.js 15" />
|
|
17
|
+
<img src="https://img.shields.io/badge/FastAPI-0.115-009688?style=flat-square&logo=fastapi&logoColor=white" alt="FastAPI" />
|
|
18
|
+
<img src="https://img.shields.io/badge/license-MIT-brightgreen?style=flat-square" alt="MIT License" />
|
|
19
|
+
</p>
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
## Who is Onni?
|
|
24
|
+
|
|
25
|
+
Onni is a Finnish dog. In Finnish, *onni* means **luck** — and that's what he brings your marketing.
|
|
26
|
+
|
|
27
|
+
Hiring a CMO costs $200k/year. Good ones take months to ramp up.
|
|
28
|
+
|
|
29
|
+
**Onni does it in 60 seconds.**
|
|
30
|
+
|
|
31
|
+
Type in any URL. Four AI agents fire up in parallel — they read your site, search the web, and hand back:
|
|
32
|
+
|
|
33
|
+
- A **site audit** with real scores (performance, SEO, accessibility, best practices)
|
|
34
|
+
- A **competitor landscape** with categories and pricing
|
|
35
|
+
- A **brand voice profile** so your copy sounds like *you*
|
|
36
|
+
- A ranked list of **SEO fixes** with step-by-step instructions
|
|
37
|
+
- An **AI chat** interface so you can keep drilling into next steps
|
|
38
|
+
|
|
39
|
+
No prompt engineering. No setup. Just a URL.
|
|
40
|
+
|
|
41
|
+
---
|
|
42
|
+
|
|
43
|
+
## Demo
|
|
44
|
+
|
|
45
|
+
> Type a URL → watch Onni run → read your marketing brief
|
|
46
|
+
|
|
47
|
+
```
|
|
48
|
+
> Checking what content and documents you have...
|
|
49
|
+
> Content and documents summarized.
|
|
50
|
+
> Now let me check out your competition...
|
|
51
|
+
> Searching: yoursite.com competitors alternative
|
|
52
|
+
> Evaluating competitor positioning strategy...
|
|
53
|
+
> Competitor analysis complete.
|
|
54
|
+
> Now let me figure out your brand voice...
|
|
55
|
+
> Brand voice guide ready
|
|
56
|
+
> Running website audit...
|
|
57
|
+
> Scanning page structure and metadata...
|
|
58
|
+
> Page speed and core web vitals measured
|
|
59
|
+
> Found 7 SEO optimization opportunities (score: 64/100)
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
---
|
|
63
|
+
|
|
64
|
+
## Features
|
|
65
|
+
|
|
66
|
+
| | |
|
|
67
|
+
|---|---|
|
|
68
|
+
| **Website Audit** | Performance, SEO, Accessibility, and Best Practices scores with specific failing checks and how-to-fix guides |
|
|
69
|
+
| **Competitor Intel** | Direct vs. secondary competitors with pricing — sourced live from the web |
|
|
70
|
+
| **Brand Voice** | 2–3 sentence brand profile your team can actually use |
|
|
71
|
+
| **SEO Fix Queue** | Ranked issues (Critical → High → Medium) with numbered remediation steps |
|
|
72
|
+
| **AI CMO Chat** | Ask follow-up questions about your audit — Onni remembers context |
|
|
73
|
+
| **Live Terminal** | Real-time SSE stream so you see agents working, not a spinner |
|
|
74
|
+
|
|
75
|
+
---
|
|
76
|
+
|
|
77
|
+
## Stack
|
|
78
|
+
|
|
79
|
+
| Layer | Tech |
|
|
80
|
+
|---|---|
|
|
81
|
+
| AI Agents | [Backboard.io](https://backboard.io) — threads, assistants, web search |
|
|
82
|
+
| API | FastAPI + Uvicorn, Server-Sent Events |
|
|
83
|
+
| Frontend | Next.js 15, Tailwind CSS, shadcn/ui |
|
|
84
|
+
| Runtime | Python 3.11+ via `uv`, Node 18+ |
|
|
85
|
+
| Schemas | Pydantic v2 |
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## Quick Start
|
|
90
|
+
|
|
91
|
+
### 1. Clone & install
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
git clone https://github.com/your-org/cmo.dog.git
|
|
95
|
+
cd cmo.dog
|
|
96
|
+
|
|
97
|
+
# Python deps
|
|
98
|
+
uv sync
|
|
99
|
+
|
|
100
|
+
# Frontend deps
|
|
101
|
+
cd web && npm install && cd ..
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### 2. Configure `.env`
|
|
105
|
+
|
|
106
|
+
```bash
|
|
107
|
+
cp .env.example .env
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
```env
|
|
111
|
+
BACKBOARD_API_KEY=your_key_here
|
|
112
|
+
|
|
113
|
+
# Assistant IDs — created once, reused forever (never delete these)
|
|
114
|
+
BACKBOARD_ASSISTANT_CONTENT=asst_...
|
|
115
|
+
BACKBOARD_ASSISTANT_COMPETITOR=asst_...
|
|
116
|
+
BACKBOARD_ASSISTANT_BRAND=asst_...
|
|
117
|
+
BACKBOARD_ASSISTANT_AUDIT=asst_...
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
> Get a free Backboard API key at [backboard.io](https://backboard.io). Create your four assistants once and paste the IDs here.
|
|
121
|
+
|
|
122
|
+
### 3. Run
|
|
123
|
+
|
|
124
|
+
```bash
|
|
125
|
+
./start.sh
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
API → `http://localhost:8000`
|
|
129
|
+
App → `http://localhost:3000`
|
|
130
|
+
|
|
131
|
+
---
|
|
132
|
+
|
|
133
|
+
## How It Works
|
|
134
|
+
|
|
135
|
+
```
|
|
136
|
+
User enters URL
|
|
137
|
+
│
|
|
138
|
+
▼
|
|
139
|
+
FastAPI /runs ──────────────────────────────────────────────┐
|
|
140
|
+
│ │
|
|
141
|
+
│ SSE stream → browser terminal │
|
|
142
|
+
▼ │
|
|
143
|
+
┌─────────────────────────────────────────────────────────┐ │
|
|
144
|
+
│ Orchestrator Pipeline │ │
|
|
145
|
+
│ │ │
|
|
146
|
+
│ 1. Content Agent → summarize site + docs │ │
|
|
147
|
+
│ 2. Competitor Agent → find rivals, pricing │ │
|
|
148
|
+
│ 3. Brand Agent → extract voice & tone │ │
|
|
149
|
+
│ 4. Audit Agent → scores + SEO checks + fixes │ │
|
|
150
|
+
└─────────────────────────────────────────────────────────┘ │
|
|
151
|
+
│ │
|
|
152
|
+
└──────────────── RunStatus (Pydantic) ─────────────────┘
|
|
153
|
+
│
|
|
154
|
+
GET /runs/{id}
|
|
155
|
+
│
|
|
156
|
+
Next.js dashboard
|
|
157
|
+
(audit · competitors · chat)
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
Each agent is a [Backboard](https://backboard.io) assistant with live web search enabled — no stale training data, no hallucinated competitors.
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
## Project Structure
|
|
165
|
+
|
|
166
|
+
```
|
|
167
|
+
cmo.dog/
|
|
168
|
+
├── app/
|
|
169
|
+
│ ├── main.py # FastAPI routes + SSE
|
|
170
|
+
│ ├── orchestrator.py # Agent pipeline
|
|
171
|
+
│ └── schemas.py # Pydantic models
|
|
172
|
+
├── web/
|
|
173
|
+
│ ├── src/app/ # Next.js pages
|
|
174
|
+
│ └── src/components/ # UI components
|
|
175
|
+
├── assets/
|
|
176
|
+
│ └── onni.png # The dog himself
|
|
177
|
+
├── scripts/ # Smoke tests
|
|
178
|
+
├── pyproject.toml # uv/hatch config
|
|
179
|
+
└── start.sh # Clean start script
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
---
|
|
183
|
+
|
|
184
|
+
## API
|
|
185
|
+
|
|
186
|
+
```http
|
|
187
|
+
POST /runs body: { website_url } → { run_id }
|
|
188
|
+
GET /runs/{id} → RunStatus JSON
|
|
189
|
+
GET /runs/{id}/stream → SSE terminal lines
|
|
190
|
+
POST /runs/{id}/chat body: { message } → { reply }
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
---
|
|
194
|
+
|
|
195
|
+
## Contributing
|
|
196
|
+
|
|
197
|
+
Pull requests welcome. Keep it surgical — one concern per PR.
|
|
198
|
+
|
|
199
|
+
- Backend logic lives in `app/` only. No logic in the frontend.
|
|
200
|
+
- Pydantic models for all data shapes.
|
|
201
|
+
- `uv` for Python deps, `npm` for frontend.
|
|
202
|
+
- Never delete Backboard assistants — they carry persistent data.
|
|
203
|
+
|
|
204
|
+
---
|
|
205
|
+
|
|
206
|
+
## Built with
|
|
207
|
+
|
|
208
|
+
- [Backboard.io](https://backboard.io) — AI agent infrastructure
|
|
209
|
+
- [FastAPI](https://fastapi.tiangolo.com)
|
|
210
|
+
- [Next.js](https://nextjs.org)
|
|
211
|
+
- [Tailwind CSS](https://tailwindcss.com)
|
|
212
|
+
- [shadcn/ui](https://ui.shadcn.com)
|
|
213
|
+
|
|
214
|
+
---
|
|
215
|
+
|
|
216
|
+
<p align="center">
|
|
217
|
+
Made with 🐾 and too many competitor analyses.
|
|
218
|
+
<br />
|
|
219
|
+
<a href="https://cmo.dog">cmo.dog</a> · <a href="https://backboard.io">backboard.io</a>
|
|
220
|
+
</p>
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# AI CMO Terminal API
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""App configuration via pydantic-settings."""
|
|
2
|
+
|
|
3
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Settings(BaseSettings):
|
|
7
|
+
model_config = SettingsConfigDict(env_file=".env", extra="ignore")
|
|
8
|
+
|
|
9
|
+
backboard_api_key: str = ""
|
|
10
|
+
backboard_assistant_storage: str = ""
|
|
11
|
+
|
|
12
|
+
stripe_secret_key: str = ""
|
|
13
|
+
stripe_webhook_secret: str = ""
|
|
14
|
+
stripe_price_id: str = ""
|
|
15
|
+
|
|
16
|
+
google_client_id: str = ""
|
|
17
|
+
google_client_secret: str = ""
|
|
18
|
+
|
|
19
|
+
domain_client: str = ""
|
|
20
|
+
domain_server: str = ""
|
|
21
|
+
free_prompts_limit: int = 5
|
|
22
|
+
admin_emails: str = "" # comma-separated list of admin email addresses
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def admin_email_set(self) -> set[str]:
|
|
26
|
+
return {e.strip().lower() for e in self.admin_emails.split(",") if e.strip()}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
settings = Settings()
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""FastAPI app: runs API and SSE stream. All logic in API."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import csv
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import uuid
|
|
8
|
+
from contextlib import asynccontextmanager
|
|
9
|
+
from functools import lru_cache
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
from dotenv import load_dotenv
|
|
15
|
+
load_dotenv()
|
|
16
|
+
except ImportError:
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
from fastapi import FastAPI, Header, HTTPException
|
|
20
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
21
|
+
from sse_starlette.sse import EventSourceResponse
|
|
22
|
+
|
|
23
|
+
from app import orchestrator
|
|
24
|
+
from app.config import settings
|
|
25
|
+
from app.routes.auth import router as auth_router
|
|
26
|
+
from app.routes.billing import router as billing_router
|
|
27
|
+
from app.routes.admin import router as admin_router
|
|
28
|
+
from app.routes.monitors import router as monitors_router
|
|
29
|
+
from app.services.monitor_scheduler import start_scheduler, stop_scheduler
|
|
30
|
+
from app.services.user_service import find_user_by_token, increment_prompts
|
|
31
|
+
from app.services.run_history_service import list_runs, get_run_detail
|
|
32
|
+
from pydantic import BaseModel as PydanticBaseModel
|
|
33
|
+
|
|
34
|
+
from app.schemas import AnalyticsMetric, ChatMessage, FeedItem, RunCreate, RunResponse, RunStatus
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@asynccontextmanager
|
|
38
|
+
async def lifespan(app: FastAPI):
|
|
39
|
+
start_scheduler()
|
|
40
|
+
yield
|
|
41
|
+
stop_scheduler()
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
app = FastAPI(title="AI CMO Terminal API", lifespan=lifespan)
|
|
45
|
+
|
|
46
|
+
app.add_middleware(
|
|
47
|
+
CORSMiddleware,
|
|
48
|
+
allow_origins=[
|
|
49
|
+
"http://localhost:8000",
|
|
50
|
+
"http://127.0.0.1:8000",
|
|
51
|
+
"http://localhost:9000",
|
|
52
|
+
"http://127.0.0.1:9000",
|
|
53
|
+
"http://localhost:3000",
|
|
54
|
+
"http://127.0.0.1:3000",
|
|
55
|
+
"https://cmo.dog",
|
|
56
|
+
"https://www.cmo.dog",
|
|
57
|
+
],
|
|
58
|
+
allow_credentials=True,
|
|
59
|
+
allow_methods=["*"],
|
|
60
|
+
allow_headers=["*"],
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
app.include_router(auth_router)
|
|
64
|
+
app.include_router(billing_router)
|
|
65
|
+
app.include_router(admin_router)
|
|
66
|
+
app.include_router(monitors_router)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@app.get("/health")
|
|
70
|
+
async def health():
|
|
71
|
+
return {"status": "ok"}
|
|
72
|
+
|
|
73
|
+
@app.post("/api/runs", response_model=RunResponse)
|
|
74
|
+
async def create_run(body: RunCreate, x_user_token: str = Header(None)):
|
|
75
|
+
if not x_user_token:
|
|
76
|
+
raise HTTPException(status_code=401, detail="Sign up to use Onni")
|
|
77
|
+
|
|
78
|
+
user = await find_user_by_token(x_user_token)
|
|
79
|
+
if not user:
|
|
80
|
+
raise HTTPException(status_code=401, detail="Invalid token — sign up again")
|
|
81
|
+
|
|
82
|
+
plan = user.get("plan", "free")
|
|
83
|
+
prompts_used = int(user.get("prompts_used", 0))
|
|
84
|
+
|
|
85
|
+
if plan == "free" and prompts_used >= settings.free_prompts_limit:
|
|
86
|
+
raise HTTPException(
|
|
87
|
+
status_code=402,
|
|
88
|
+
detail={
|
|
89
|
+
"error": "limit_reached",
|
|
90
|
+
"prompts_used": prompts_used,
|
|
91
|
+
"prompts_limit": settings.free_prompts_limit,
|
|
92
|
+
},
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Deduct token before the run starts — no matter what happens next
|
|
96
|
+
try:
|
|
97
|
+
await increment_prompts(user["user_id"])
|
|
98
|
+
except Exception as e:
|
|
99
|
+
print(f"[billing] WARN: increment_prompts failed for {user['user_id']}: {e}")
|
|
100
|
+
|
|
101
|
+
run_id = uuid.uuid4().hex[:12]
|
|
102
|
+
orchestrator._runs[run_id] = RunStatus(
|
|
103
|
+
run_id=run_id,
|
|
104
|
+
status="pending",
|
|
105
|
+
website_url=body.website_url,
|
|
106
|
+
llm_provider=body.llm_provider,
|
|
107
|
+
model_name=body.model_name,
|
|
108
|
+
credits=2000,
|
|
109
|
+
analytics_overview=[
|
|
110
|
+
AnalyticsMetric(key="performance", label="Performance", score=44, tone="red"),
|
|
111
|
+
AnalyticsMetric(key="accessibility", label="Accessibility", score=78, tone="yellow"),
|
|
112
|
+
AnalyticsMetric(key="best_practices", label="Best Practices", score=73, tone="yellow"),
|
|
113
|
+
AnalyticsMetric(key="seo", label="SEO", score=92, tone="green"),
|
|
114
|
+
],
|
|
115
|
+
feed_items=[],
|
|
116
|
+
chat_status="loading",
|
|
117
|
+
)
|
|
118
|
+
orchestrator._terminal_lines[run_id] = []
|
|
119
|
+
orchestrator._terminal_events[run_id] = asyncio.Event()
|
|
120
|
+
|
|
121
|
+
asyncio.create_task(
|
|
122
|
+
orchestrator.run_orchestrator(
|
|
123
|
+
run_id, body.website_url, plan=plan, user_id=user["user_id"],
|
|
124
|
+
llm_provider=body.llm_provider, model_name=body.model_name,
|
|
125
|
+
)
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
return RunResponse(run_id=run_id)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
@app.get("/api/runs/{run_id}", response_model=RunStatus)
|
|
132
|
+
async def get_run(run_id: str, x_user_token: str = Header(None)):
|
|
133
|
+
run = orchestrator.get_run(run_id)
|
|
134
|
+
if run:
|
|
135
|
+
return run
|
|
136
|
+
# Fall back to persisted history if not in memory (e.g. after restart)
|
|
137
|
+
if x_user_token:
|
|
138
|
+
user = await find_user_by_token(x_user_token)
|
|
139
|
+
if user:
|
|
140
|
+
detail = await get_run_detail(run_id, user["user_id"])
|
|
141
|
+
if detail:
|
|
142
|
+
return RunStatus(**{k: v for k, v in detail.items() if k in RunStatus.model_fields})
|
|
143
|
+
raise HTTPException(status_code=404, detail="Run not found")
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@app.get("/api/history")
|
|
147
|
+
async def get_history(x_user_token: str = Header(None)):
|
|
148
|
+
if not x_user_token:
|
|
149
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
150
|
+
user = await find_user_by_token(x_user_token)
|
|
151
|
+
if not user:
|
|
152
|
+
raise HTTPException(status_code=401, detail="Invalid token")
|
|
153
|
+
summaries = await list_runs(user["user_id"])
|
|
154
|
+
return {"runs": summaries}
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
async def stream_generator(run_id: str):
|
|
158
|
+
run = orchestrator.get_run(run_id)
|
|
159
|
+
if not run:
|
|
160
|
+
yield {"data": '{"line": "> Run not found."}'}
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
evt = orchestrator.get_terminal_event(run_id)
|
|
164
|
+
pos = 0
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
while True:
|
|
168
|
+
# Replay buffered lines (catches up any client that connected late)
|
|
169
|
+
lines = orchestrator.get_terminal_lines(run_id)
|
|
170
|
+
while pos < len(lines):
|
|
171
|
+
yield {"data": json.dumps({"line": lines[pos]})}
|
|
172
|
+
pos += 1
|
|
173
|
+
|
|
174
|
+
# Done if run has finished
|
|
175
|
+
r = orchestrator.get_run(run_id)
|
|
176
|
+
if r and r.status in ("completed", "failed"):
|
|
177
|
+
break
|
|
178
|
+
|
|
179
|
+
# Wait for next line (or timeout to re-check status)
|
|
180
|
+
evt.clear()
|
|
181
|
+
try:
|
|
182
|
+
await asyncio.wait_for(asyncio.shield(evt.wait()), timeout=30.0)
|
|
183
|
+
except asyncio.TimeoutError:
|
|
184
|
+
pass
|
|
185
|
+
except asyncio.CancelledError:
|
|
186
|
+
pass
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@app.get("/api/runs/{run_id}/stream")
|
|
190
|
+
async def stream_run(run_id: str):
|
|
191
|
+
if not orchestrator.get_run(run_id):
|
|
192
|
+
raise HTTPException(status_code=404, detail="Run not found")
|
|
193
|
+
return EventSourceResponse(stream_generator(run_id))
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class ChatRequest(PydanticBaseModel):
|
|
197
|
+
message: str
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
@app.post("/api/runs/{run_id}/chat")
|
|
201
|
+
async def chat(run_id: str, body: ChatRequest):
|
|
202
|
+
run = orchestrator.get_run(run_id)
|
|
203
|
+
if not run:
|
|
204
|
+
raise HTTPException(status_code=404, detail="Run not found")
|
|
205
|
+
run.chat_messages.append(ChatMessage(role="user", content=body.message))
|
|
206
|
+
reply = await orchestrator.chat_reply(run_id, body.message)
|
|
207
|
+
run.chat_messages.append(ChatMessage(role="assistant", content=reply))
|
|
208
|
+
return {"messages": run.chat_messages}
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
_EXCLUDED_PROVIDERS: set[str] = set()
|
|
212
|
+
|
|
213
|
+
@lru_cache(maxsize=1)
|
|
214
|
+
def _load_model_list() -> dict[str, Any]:
|
|
215
|
+
"""Parse model_list.csv once and return provider-grouped data."""
|
|
216
|
+
csv_path = Path(__file__).parent.parent / "model_list.csv"
|
|
217
|
+
providers: dict[str, list[dict]] = {}
|
|
218
|
+
|
|
219
|
+
if not csv_path.exists():
|
|
220
|
+
return {"providers": [], "models": {}}
|
|
221
|
+
|
|
222
|
+
with open(csv_path, newline="") as f:
|
|
223
|
+
for row in csv.DictReader(f):
|
|
224
|
+
if row.get("model_type") != "llm":
|
|
225
|
+
continue
|
|
226
|
+
if row.get("supports_tools") != "True":
|
|
227
|
+
continue
|
|
228
|
+
provider = row.get("provider", "")
|
|
229
|
+
if provider in _EXCLUDED_PROVIDERS:
|
|
230
|
+
continue
|
|
231
|
+
try:
|
|
232
|
+
input_cost = float(row.get("input_cost_per_1m_tokens") or 0)
|
|
233
|
+
output_cost = float(row.get("output_cost_per_1m_tokens") or 0)
|
|
234
|
+
context = int(row.get("context_limit") or 0)
|
|
235
|
+
except (ValueError, TypeError):
|
|
236
|
+
continue
|
|
237
|
+
providers.setdefault(provider, []).append({
|
|
238
|
+
"id": row["name"],
|
|
239
|
+
"context": context,
|
|
240
|
+
"input_cost": input_cost,
|
|
241
|
+
"output_cost": output_cost,
|
|
242
|
+
})
|
|
243
|
+
|
|
244
|
+
return {
|
|
245
|
+
"providers": sorted(providers.keys()),
|
|
246
|
+
"models": providers,
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
@app.get("/api/models")
|
|
251
|
+
async def get_models():
|
|
252
|
+
return _load_model_list()
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def run():
|
|
256
|
+
import uvicorn
|
|
257
|
+
uvicorn.run("app.main:app", host="0.0.0.0", port=9000, reload=True)
|