@nordsym/apiclaw 2.1.0 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -2
- package/dist/bin-http.js +0 -0
- package/dist/bin.bundled.js +79288 -0
- package/dist/funnel-client.d.ts +24 -0
- package/dist/funnel-client.d.ts.map +1 -0
- package/dist/funnel-client.js +131 -0
- package/dist/funnel-client.js.map +1 -0
- package/dist/funnel.test.d.ts +2 -0
- package/dist/funnel.test.d.ts.map +1 -0
- package/dist/funnel.test.js +145 -0
- package/dist/funnel.test.js.map +1 -0
- package/dist/gateway-client.d.ts.map +1 -1
- package/dist/gateway-client.js +24 -2
- package/dist/gateway-client.js.map +1 -1
- package/dist/index.bundled.js +61263 -0
- package/dist/index.js +161 -74
- package/dist/index.js.map +1 -1
- package/dist/postinstall.d.ts +0 -5
- package/dist/postinstall.d.ts.map +1 -1
- package/dist/postinstall.js +24 -3
- package/dist/postinstall.js.map +1 -1
- package/dist/registration-guard.d.ts +29 -0
- package/dist/registration-guard.d.ts.map +1 -0
- package/dist/registration-guard.js +87 -0
- package/dist/registration-guard.js.map +1 -0
- package/package.json +7 -2
- package/.claude/settings.local.json +0 -9
- package/.env.prod +0 -1
- package/apiclaw-README.md +0 -494
- package/convex/_generated/api.d.ts +0 -137
- package/convex/_generated/api.js +0 -23
- package/convex/_generated/dataModel.d.ts +0 -60
- package/convex/_generated/server.d.ts +0 -143
- package/convex/_generated/server.js +0 -93
- package/convex/adminActivate.ts +0 -53
- package/convex/adminStats.ts +0 -306
- package/convex/agents.ts +0 -939
- package/convex/analytics.ts +0 -187
- package/convex/apiKeys.ts +0 -220
- package/convex/backfillAnalytics.ts +0 -272
- package/convex/backfillSearchLogs.ts +0 -35
- package/convex/billing.ts +0 -834
- package/convex/capabilities.ts +0 -157
- package/convex/chains.ts +0 -1318
- package/convex/credits.ts +0 -211
- package/convex/crons.ts +0 -50
- package/convex/debugFilestackLogs.ts +0 -16
- package/convex/debugGetToken.ts +0 -18
- package/convex/directCall.ts +0 -713
- package/convex/earnProgress.ts +0 -753
- package/convex/email.ts +0 -329
- package/convex/feedback.ts +0 -265
- package/convex/http.ts +0 -3430
- package/convex/inbound.ts +0 -32
- package/convex/logs.ts +0 -701
- package/convex/migrateFilestack.ts +0 -81
- package/convex/migratePartnersProd.ts +0 -174
- package/convex/migratePratham.ts +0 -126
- package/convex/migrateProviderWorkspaces.ts +0 -175
- package/convex/mou.ts +0 -91
- package/convex/providerKeys.ts +0 -289
- package/convex/providers.ts +0 -1135
- package/convex/purchases.ts +0 -183
- package/convex/ratelimit.ts +0 -104
- package/convex/schema.ts +0 -869
- package/convex/searchLogs.ts +0 -265
- package/convex/seedAPILayerAPIs.ts +0 -191
- package/convex/seedDirectCallConfigs.ts +0 -336
- package/convex/seedPratham.ts +0 -149
- package/convex/spendAlerts.ts +0 -442
- package/convex/stripeActions.ts +0 -607
- package/convex/teams.ts +0 -243
- package/convex/telemetry.ts +0 -81
- package/convex/tsconfig.json +0 -25
- package/convex/updateAPIStatus.ts +0 -44
- package/convex/usage.ts +0 -260
- package/convex/usageReports.ts +0 -357
- package/convex/waitlist.ts +0 -55
- package/convex/webhooks.ts +0 -494
- package/convex/workspaceSettings.ts +0 -143
- package/convex/workspaces.ts +0 -1331
- package/convex.json +0 -3
- package/direct-test.mjs +0 -51
- package/email-templates/filestack-provider-outreach.html +0 -162
- package/email-templates/partnership-template.html +0 -116
- package/email-templates/pratham-draft-preview.txt +0 -57
- package/email-templates/pratham-partnership-draft.html +0 -141
- package/reports/APIClaw-Session-Report-2026-04-05.pdf +0 -0
- package/reports/pipeline/PIPELINE-REPORT.json +0 -153
- package/reports/pipeline/acquire_apisguru.json +0 -17
- package/reports/pipeline/capabilities.json +0 -38
- package/reports/pipeline/discover_azure_recursive.json +0 -1551
- package/reports/pipeline/discover_github.json +0 -25
- package/reports/pipeline/discover_github_repos.json +0 -49
- package/reports/pipeline/discover_swaggerhub.json +0 -24
- package/reports/pipeline/discover_well_known.json +0 -23
- package/reports/pipeline/fetch_specs.json +0 -19
- package/reports/pipeline/generate_providers.json +0 -14
- package/reports/pipeline/match_registry.json +0 -11
- package/reports/pipeline/parse_specs.json +0 -17
- package/reports/pipeline/promote_candidates.json +0 -34
- package/reports/pipeline/validate.json +0 -30
- package/reports/pipeline/validate_smoke_details.json +0 -3835
- package/reports/session-report-2026-04-05.html +0 -433
- package/seed-apis-direct.mjs +0 -106
- package/src/access-control.ts +0 -174
- package/src/adapters/base.ts +0 -364
- package/src/adapters/claude-desktop.ts +0 -41
- package/src/adapters/cline.ts +0 -88
- package/src/adapters/continue.ts +0 -91
- package/src/adapters/cursor.ts +0 -43
- package/src/adapters/custom.ts +0 -188
- package/src/adapters/detect.ts +0 -202
- package/src/adapters/index.ts +0 -47
- package/src/adapters/windsurf.ts +0 -44
- package/src/bin-http.ts +0 -45
- package/src/bin.ts +0 -34
- package/src/capability-router.ts +0 -331
- package/src/chainExecutor.ts +0 -730
- package/src/chainResolver.test.ts +0 -246
- package/src/chainResolver.ts +0 -658
- package/src/cli/commands/demo.ts +0 -109
- package/src/cli/commands/doctor.ts +0 -435
- package/src/cli/commands/index.ts +0 -9
- package/src/cli/commands/login.ts +0 -203
- package/src/cli/commands/mcp-install.ts +0 -373
- package/src/cli/commands/restore.ts +0 -333
- package/src/cli/commands/setup.ts +0 -297
- package/src/cli/commands/uninstall.ts +0 -240
- package/src/cli/index.ts +0 -148
- package/src/cli.ts +0 -370
- package/src/confirmation.ts +0 -296
- package/src/credentials.ts +0 -455
- package/src/credits.ts +0 -329
- package/src/crypto.ts +0 -75
- package/src/discovery.ts +0 -568
- package/src/enterprise/env.ts +0 -156
- package/src/enterprise/index.ts +0 -7
- package/src/enterprise/script-generator.ts +0 -481
- package/src/execute-dynamic.ts +0 -617
- package/src/execute.ts +0 -2386
- package/src/gateway-client.ts +0 -192
- package/src/hivr-whitelist.ts +0 -110
- package/src/http-api.ts +0 -286
- package/src/http-server-minimal.ts +0 -154
- package/src/index.ts +0 -2611
- package/src/intelligent-gateway.ts +0 -339
- package/src/mcp-analytics.ts +0 -156
- package/src/metered.ts +0 -149
- package/src/open-apis-generated.ts +0 -157
- package/src/open-apis.ts +0 -558
- package/src/postinstall.ts +0 -18
- package/src/product-whitelist.ts +0 -246
- package/src/proxy.ts +0 -36
- package/src/session.ts +0 -129
- package/src/stripe.ts +0 -497
- package/src/telemetry.ts +0 -71
- package/src/test.ts +0 -135
- package/src/types/convex-api.d.ts +0 -20
- package/src/types/convex-api.ts +0 -21
- package/src/types.ts +0 -109
- package/src/ui/colors.ts +0 -219
- package/src/ui/errors.ts +0 -394
- package/src/ui/index.ts +0 -17
- package/src/ui/prompts.ts +0 -390
- package/src/ui/spinner.ts +0 -325
- package/src/utils/backup.ts +0 -224
- package/src/utils/config.ts +0 -318
- package/src/utils/os.ts +0 -124
- package/src/utils/paths.ts +0 -203
- package/src/webhook.ts +0 -107
- package/test-10-working.cjs +0 -97
- package/test-14-final.cjs +0 -96
- package/test-actual-handlers.ts +0 -92
- package/test-apilayer-all-14.ts +0 -249
- package/test-apilayer-fixed.ts +0 -248
- package/test-direct-endpoints.ts +0 -174
- package/test-exact-endpoints.ts +0 -144
- package/test-final.ts +0 -83
- package/test-full-routing.ts +0 -100
- package/test-handlers-correct.ts +0 -217
- package/test-numverify-key.ts +0 -41
- package/test-via-handlers.ts +0 -92
- package/test-worldnews.mjs +0 -26
- package/tsconfig.json +0 -20
package/convex/http.ts
DELETED
|
@@ -1,3430 +0,0 @@
|
|
|
1
|
-
import { httpRouter } from "convex/server";
|
|
2
|
-
import { httpAction } from "./_generated/server";
|
|
3
|
-
import { api, internal } from "./_generated/api";
|
|
4
|
-
import {
|
|
5
|
-
createCheckoutSession,
|
|
6
|
-
createPortalSession,
|
|
7
|
-
handleStripeWebhook,
|
|
8
|
-
checkoutOptions,
|
|
9
|
-
portalOptions,
|
|
10
|
-
webhookOptions,
|
|
11
|
-
} from "./stripeActions";
|
|
12
|
-
|
|
13
|
-
const http = httpRouter();
|
|
14
|
-
|
|
15
|
-
// Provider catalog — all 20 Direct Call providers
|
|
16
|
-
interface ProviderMeta {
|
|
17
|
-
name: string;
|
|
18
|
-
description: string;
|
|
19
|
-
category: string;
|
|
20
|
-
pricing: string;
|
|
21
|
-
regions: string[];
|
|
22
|
-
tags: string[];
|
|
23
|
-
isLLM: boolean; // can serve /v1/chat/completions
|
|
24
|
-
envKey?: string; // env var name for API key
|
|
25
|
-
baseUrl?: string; // chat completions base URL (LLM providers only)
|
|
26
|
-
speed: "fast" | "medium" | "slow"; // latency tier
|
|
27
|
-
costTier: "free" | "cheap" | "medium" | "expensive"; // relative cost
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
const PROVIDERS: Record<string, ProviderMeta> = {
|
|
31
|
-
openrouter: {
|
|
32
|
-
name: "OpenRouter",
|
|
33
|
-
description: "Multi-model LLM API. Access GPT, Claude, Llama, Gemini, and 800+ models.",
|
|
34
|
-
category: "llm",
|
|
35
|
-
pricing: "Varies by model",
|
|
36
|
-
regions: ["Global"],
|
|
37
|
-
tags: ["llm", "ai", "gpt", "claude", "gemini", "llama"],
|
|
38
|
-
isLLM: true,
|
|
39
|
-
envKey: "OPENROUTER_API_KEY",
|
|
40
|
-
baseUrl: "https://openrouter.ai/api/v1/chat/completions",
|
|
41
|
-
speed: "medium",
|
|
42
|
-
costTier: "medium",
|
|
43
|
-
},
|
|
44
|
-
groq: {
|
|
45
|
-
name: "Groq",
|
|
46
|
-
description: "Ultra-fast LLM inference. Llama, Mixtral, Gemma at lightning speed.",
|
|
47
|
-
category: "llm",
|
|
48
|
-
pricing: "~$0.05-0.27/M tokens",
|
|
49
|
-
regions: ["Global"],
|
|
50
|
-
tags: ["llm", "fast", "llama", "mixtral", "gemma"],
|
|
51
|
-
isLLM: true,
|
|
52
|
-
envKey: "GROQ_API_KEY",
|
|
53
|
-
baseUrl: "https://api.groq.com/openai/v1/chat/completions",
|
|
54
|
-
speed: "fast",
|
|
55
|
-
costTier: "cheap",
|
|
56
|
-
},
|
|
57
|
-
mistral: {
|
|
58
|
-
name: "Mistral",
|
|
59
|
-
description: "Mistral AI models. Efficient European LLMs with strong coding.",
|
|
60
|
-
category: "llm",
|
|
61
|
-
pricing: "~$0.10-2.00/M tokens",
|
|
62
|
-
regions: ["EU", "Global"],
|
|
63
|
-
tags: ["llm", "mistral", "eu", "coding", "embeddings"],
|
|
64
|
-
isLLM: true,
|
|
65
|
-
envKey: "MISTRAL_API_KEY",
|
|
66
|
-
baseUrl: "https://api.mistral.ai/v1/chat/completions",
|
|
67
|
-
speed: "fast",
|
|
68
|
-
costTier: "cheap",
|
|
69
|
-
},
|
|
70
|
-
together: {
|
|
71
|
-
name: "Together AI",
|
|
72
|
-
description: "Open-source model inference. Llama, Qwen, DeepSeek at scale.",
|
|
73
|
-
category: "llm",
|
|
74
|
-
pricing: "~$0.10-0.90/M tokens",
|
|
75
|
-
regions: ["Global"],
|
|
76
|
-
tags: ["llm", "open-source", "llama", "qwen", "deepseek"],
|
|
77
|
-
isLLM: true,
|
|
78
|
-
envKey: "TOGETHER_API_KEY",
|
|
79
|
-
baseUrl: "https://api.together.xyz/v1/chat/completions",
|
|
80
|
-
speed: "fast",
|
|
81
|
-
costTier: "cheap",
|
|
82
|
-
},
|
|
83
|
-
openai: {
|
|
84
|
-
name: "OpenAI",
|
|
85
|
-
description: "GPT-5.4, GPT-4o, o3, o4-mini. Direct access, no middleman markup.",
|
|
86
|
-
category: "llm",
|
|
87
|
-
pricing: "~$2.50-15.00/M tokens",
|
|
88
|
-
regions: ["Global"],
|
|
89
|
-
tags: ["llm", "gpt", "openai", "gpt-5", "o3", "o4", "coding"],
|
|
90
|
-
isLLM: true,
|
|
91
|
-
envKey: "OPENAI_API_KEY",
|
|
92
|
-
baseUrl: "https://api.openai.com/v1/chat/completions",
|
|
93
|
-
speed: "medium",
|
|
94
|
-
costTier: "expensive",
|
|
95
|
-
},
|
|
96
|
-
xai: {
|
|
97
|
-
name: "xAI",
|
|
98
|
-
description: "Grok models by xAI. Reasoning, coding, and real-time knowledge via X/Twitter data.",
|
|
99
|
-
category: "llm",
|
|
100
|
-
pricing: "~$0.30-3.00/M tokens",
|
|
101
|
-
regions: ["Global"],
|
|
102
|
-
tags: ["llm", "grok", "reasoning", "xai", "x", "twitter"],
|
|
103
|
-
isLLM: true,
|
|
104
|
-
envKey: "XAI_API_KEY",
|
|
105
|
-
baseUrl: "https://api.x.ai/v1/chat/completions",
|
|
106
|
-
speed: "medium",
|
|
107
|
-
costTier: "medium",
|
|
108
|
-
},
|
|
109
|
-
anthropic: {
|
|
110
|
-
name: "Anthropic",
|
|
111
|
-
description: "Claude models by Anthropic. Best-in-class reasoning, coding, and analysis.",
|
|
112
|
-
category: "llm",
|
|
113
|
-
pricing: "~$0.80-15.00/M tokens",
|
|
114
|
-
regions: ["Global"],
|
|
115
|
-
tags: ["llm", "claude", "anthropic", "reasoning", "coding", "analysis"],
|
|
116
|
-
isLLM: true,
|
|
117
|
-
envKey: "ANTHROPIC_API_KEY",
|
|
118
|
-
baseUrl: "https://api.anthropic.com/v1/messages",
|
|
119
|
-
speed: "medium",
|
|
120
|
-
costTier: "expensive",
|
|
121
|
-
},
|
|
122
|
-
cohere: {
|
|
123
|
-
name: "Cohere",
|
|
124
|
-
description: "Enterprise LLM with strong RAG and reranking capabilities.",
|
|
125
|
-
category: "llm",
|
|
126
|
-
pricing: "~$0.15-2.50/M tokens",
|
|
127
|
-
regions: ["Global"],
|
|
128
|
-
tags: ["llm", "rag", "rerank", "enterprise", "embeddings"],
|
|
129
|
-
isLLM: false, // Cohere uses non-OpenAI-compatible API format
|
|
130
|
-
envKey: "COHERE_API_KEY",
|
|
131
|
-
speed: "medium",
|
|
132
|
-
costTier: "medium",
|
|
133
|
-
},
|
|
134
|
-
"46elks": {
|
|
135
|
-
name: "46elks",
|
|
136
|
-
description: "SMS API for EU/Nordics. GDPR compliant.",
|
|
137
|
-
category: "sms",
|
|
138
|
-
pricing: "~$0.035/SMS",
|
|
139
|
-
regions: ["EU", "Nordic"],
|
|
140
|
-
tags: ["sms", "eu", "gdpr", "nordic"],
|
|
141
|
-
isLLM: false,
|
|
142
|
-
envKey: "ELKS_API_KEY",
|
|
143
|
-
speed: "fast",
|
|
144
|
-
costTier: "cheap",
|
|
145
|
-
},
|
|
146
|
-
twilio: {
|
|
147
|
-
name: "Twilio",
|
|
148
|
-
description: "SMS and Voice API. Global coverage.",
|
|
149
|
-
category: "sms",
|
|
150
|
-
pricing: "~$0.04/SMS, ~$0.01/min voice",
|
|
151
|
-
regions: ["Global"],
|
|
152
|
-
tags: ["sms", "voice", "global"],
|
|
153
|
-
isLLM: false,
|
|
154
|
-
envKey: "TWILIO_AUTH_TOKEN",
|
|
155
|
-
speed: "fast",
|
|
156
|
-
costTier: "cheap",
|
|
157
|
-
},
|
|
158
|
-
resend: {
|
|
159
|
-
name: "Resend",
|
|
160
|
-
description: "Modern email API. Developer-friendly.",
|
|
161
|
-
category: "email",
|
|
162
|
-
pricing: "~$0.001/email",
|
|
163
|
-
regions: ["Global"],
|
|
164
|
-
tags: ["email", "transactional"],
|
|
165
|
-
isLLM: false,
|
|
166
|
-
envKey: "RESEND_API_KEY",
|
|
167
|
-
speed: "fast",
|
|
168
|
-
costTier: "free",
|
|
169
|
-
},
|
|
170
|
-
brave_search: {
|
|
171
|
-
name: "Brave Search",
|
|
172
|
-
description: "Privacy-focused web search API.",
|
|
173
|
-
category: "search",
|
|
174
|
-
pricing: "~$0.005/search",
|
|
175
|
-
regions: ["Global"],
|
|
176
|
-
tags: ["search", "web", "privacy"],
|
|
177
|
-
isLLM: false,
|
|
178
|
-
envKey: "BRAVE_API_KEY",
|
|
179
|
-
speed: "fast",
|
|
180
|
-
costTier: "cheap",
|
|
181
|
-
},
|
|
182
|
-
serper: {
|
|
183
|
-
name: "Serper",
|
|
184
|
-
description: "Google Search API. Fast SERP results for AI agents.",
|
|
185
|
-
category: "search",
|
|
186
|
-
pricing: "~$0.001/search",
|
|
187
|
-
regions: ["Global"],
|
|
188
|
-
tags: ["search", "google", "serp"],
|
|
189
|
-
isLLM: false,
|
|
190
|
-
envKey: "SERPER_API_KEY",
|
|
191
|
-
speed: "fast",
|
|
192
|
-
costTier: "cheap",
|
|
193
|
-
},
|
|
194
|
-
elevenlabs: {
|
|
195
|
-
name: "ElevenLabs",
|
|
196
|
-
description: "Text-to-speech API. High quality AI voices.",
|
|
197
|
-
category: "tts",
|
|
198
|
-
pricing: "~$0.0003/char",
|
|
199
|
-
regions: ["Global"],
|
|
200
|
-
tags: ["tts", "voice", "audio", "speech"],
|
|
201
|
-
isLLM: false,
|
|
202
|
-
envKey: "ELEVENLABS_API_KEY",
|
|
203
|
-
speed: "medium",
|
|
204
|
-
costTier: "medium",
|
|
205
|
-
},
|
|
206
|
-
deepgram: {
|
|
207
|
-
name: "Deepgram",
|
|
208
|
-
description: "Speech-to-text API. Fast, accurate transcription with Nova-3.",
|
|
209
|
-
category: "stt",
|
|
210
|
-
pricing: "~$0.0043/min",
|
|
211
|
-
regions: ["Global"],
|
|
212
|
-
tags: ["stt", "transcription", "voice", "audio"],
|
|
213
|
-
isLLM: false,
|
|
214
|
-
envKey: "DEEPGRAM_API_KEY",
|
|
215
|
-
speed: "fast",
|
|
216
|
-
costTier: "cheap",
|
|
217
|
-
},
|
|
218
|
-
assemblyai: {
|
|
219
|
-
name: "AssemblyAI",
|
|
220
|
-
description: "Speech-to-text with speaker diarization, summarization, and sentiment.",
|
|
221
|
-
category: "stt",
|
|
222
|
-
pricing: "~$0.01/min",
|
|
223
|
-
regions: ["Global"],
|
|
224
|
-
tags: ["stt", "transcription", "diarization", "sentiment"],
|
|
225
|
-
isLLM: false,
|
|
226
|
-
envKey: "ASSEMBLYAI_API_KEY",
|
|
227
|
-
speed: "medium",
|
|
228
|
-
costTier: "cheap",
|
|
229
|
-
},
|
|
230
|
-
replicate: {
|
|
231
|
-
name: "Replicate",
|
|
232
|
-
description: "Run AI models (Whisper, SDXL, Llama, Flux, etc). Pay per prediction.",
|
|
233
|
-
category: "ai",
|
|
234
|
-
pricing: "Varies by model",
|
|
235
|
-
regions: ["Global"],
|
|
236
|
-
tags: ["ai", "ml", "whisper", "image", "audio", "transcription"],
|
|
237
|
-
isLLM: false,
|
|
238
|
-
envKey: "REPLICATE_API_TOKEN",
|
|
239
|
-
speed: "slow",
|
|
240
|
-
costTier: "medium",
|
|
241
|
-
},
|
|
242
|
-
stability: {
|
|
243
|
-
name: "Stability AI",
|
|
244
|
-
description: "Image generation API. Stable Diffusion 3, SDXL.",
|
|
245
|
-
category: "image",
|
|
246
|
-
pricing: "~$0.03/image",
|
|
247
|
-
regions: ["Global"],
|
|
248
|
-
tags: ["image", "generation", "stable-diffusion", "sdxl"],
|
|
249
|
-
isLLM: false,
|
|
250
|
-
envKey: "STABILITY_API_KEY",
|
|
251
|
-
speed: "slow",
|
|
252
|
-
costTier: "medium",
|
|
253
|
-
},
|
|
254
|
-
firecrawl: {
|
|
255
|
-
name: "Firecrawl",
|
|
256
|
-
description: "Web scraping and crawling API. Extract clean data from any URL.",
|
|
257
|
-
category: "scraping",
|
|
258
|
-
pricing: "~$0.001/page",
|
|
259
|
-
regions: ["Global"],
|
|
260
|
-
tags: ["scraping", "web", "crawl", "extract"],
|
|
261
|
-
isLLM: false,
|
|
262
|
-
envKey: "FIRECRAWL_API_KEY",
|
|
263
|
-
speed: "medium",
|
|
264
|
-
costTier: "cheap",
|
|
265
|
-
},
|
|
266
|
-
github: {
|
|
267
|
-
name: "GitHub",
|
|
268
|
-
description: "GitHub API. Search repos, manage code, access developer data.",
|
|
269
|
-
category: "code",
|
|
270
|
-
pricing: "Free tier available",
|
|
271
|
-
regions: ["Global"],
|
|
272
|
-
tags: ["github", "code", "repos", "developer"],
|
|
273
|
-
isLLM: false,
|
|
274
|
-
envKey: "GITHUB_TOKEN",
|
|
275
|
-
speed: "fast",
|
|
276
|
-
costTier: "free",
|
|
277
|
-
},
|
|
278
|
-
e2b: {
|
|
279
|
-
name: "E2B",
|
|
280
|
-
description: "Secure code sandbox for AI agents. Run Python, shell in isolated environments.",
|
|
281
|
-
category: "sandbox",
|
|
282
|
-
pricing: "$0.000028/s (2 vCPU)",
|
|
283
|
-
regions: ["Global"],
|
|
284
|
-
tags: ["sandbox", "code", "python", "execution", "ai", "agents"],
|
|
285
|
-
isLLM: false,
|
|
286
|
-
envKey: "E2B_API_KEY",
|
|
287
|
-
speed: "medium",
|
|
288
|
-
costTier: "cheap",
|
|
289
|
-
},
|
|
290
|
-
apilayer: {
|
|
291
|
-
name: "APILayer",
|
|
292
|
-
description: "14 APIs: exchange rates, market data, aviation, PDF, screenshots, email/phone verification, VAT, news, scraping, and more.",
|
|
293
|
-
category: "multi",
|
|
294
|
-
pricing: "Free tier available, paid plans per API",
|
|
295
|
-
regions: ["Global"],
|
|
296
|
-
tags: ["exchange", "stocks", "aviation", "pdf", "screenshot", "verification", "vat", "news", "scraping"],
|
|
297
|
-
isLLM: false,
|
|
298
|
-
envKey: "APILAYER_API_KEY",
|
|
299
|
-
speed: "medium",
|
|
300
|
-
costTier: "cheap",
|
|
301
|
-
},
|
|
302
|
-
voyage: {
|
|
303
|
-
name: "Voyage AI",
|
|
304
|
-
description: "State-of-the-art embeddings for RAG and agent memory. Best-in-class retrieval quality.",
|
|
305
|
-
category: "embeddings",
|
|
306
|
-
pricing: "~$0.02-0.18/M tokens",
|
|
307
|
-
regions: ["Global"],
|
|
308
|
-
tags: ["embeddings", "rag", "agent-memory", "retrieval", "voyage-3", "code-embeddings"],
|
|
309
|
-
isLLM: false,
|
|
310
|
-
envKey: "VOYAGE_API_KEY",
|
|
311
|
-
speed: "fast",
|
|
312
|
-
costTier: "cheap",
|
|
313
|
-
},
|
|
314
|
-
};
|
|
315
|
-
|
|
316
|
-
// ==============================================
|
|
317
|
-
// PROVIDER COST TABLE (per million tokens, USD)
|
|
318
|
-
// ==============================================
|
|
319
|
-
const MODEL_COSTS: Record<string, { input: number; output: number }> = {
|
|
320
|
-
// OpenAI
|
|
321
|
-
"gpt-5.4": { input: 12.50, output: 50.00 },
|
|
322
|
-
"gpt-5": { input: 10.00, output: 40.00 },
|
|
323
|
-
"gpt-4o": { input: 2.50, output: 10.00 },
|
|
324
|
-
"gpt-4o-mini": { input: 0.15, output: 0.60 },
|
|
325
|
-
"gpt-4.1": { input: 2.00, output: 8.00 },
|
|
326
|
-
"o3": { input: 10.00, output: 40.00 },
|
|
327
|
-
"o4-mini": { input: 1.10, output: 4.40 },
|
|
328
|
-
// Groq (heavily discounted)
|
|
329
|
-
"llama-3.3-70b-versatile": { input: 0.059, output: 0.079 },
|
|
330
|
-
"llama-3.1-8b-instant": { input: 0.05, output: 0.08 },
|
|
331
|
-
"llama-3.1-70b-versatile": { input: 0.059, output: 0.079 },
|
|
332
|
-
"gemma2-9b-it": { input: 0.02, output: 0.02 },
|
|
333
|
-
"mixtral-8x7b-32768": { input: 0.024, output: 0.024 },
|
|
334
|
-
// Mistral
|
|
335
|
-
"mistral-small-latest": { input: 0.10, output: 0.30 },
|
|
336
|
-
"mistral-large-latest": { input: 2.00, output: 6.00 },
|
|
337
|
-
"mistral-medium-latest": { input: 0.40, output: 1.20 },
|
|
338
|
-
"codestral-latest": { input: 0.30, output: 0.90 },
|
|
339
|
-
"pixtral-large-latest": { input: 2.00, output: 6.00 },
|
|
340
|
-
"open-mistral-nemo": { input: 0.15, output: 0.15 },
|
|
341
|
-
// Together
|
|
342
|
-
"deepseek-ai/DeepSeek-R1": { input: 0.55, output: 2.19 },
|
|
343
|
-
"deepseek-ai/DeepSeek-V3": { input: 0.30, output: 0.88 },
|
|
344
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo": { input: 0.18, output: 0.18 },
|
|
345
|
-
"Qwen/Qwen2.5-72B-Instruct-Turbo": { input: 0.18, output: 0.18 },
|
|
346
|
-
// xAI
|
|
347
|
-
"grok-4.20-reasoning": { input: 3.00, output: 15.00 },
|
|
348
|
-
"grok-3": { input: 3.00, output: 15.00 },
|
|
349
|
-
"grok-3-mini": { input: 0.30, output: 0.50 },
|
|
350
|
-
"grok-2-latest": { input: 2.00, output: 10.00 },
|
|
351
|
-
// Anthropic (direct or via OpenRouter)
|
|
352
|
-
"claude-sonnet-4-6": { input: 3.00, output: 15.00 },
|
|
353
|
-
"claude-opus-4-6": { input: 15.00, output: 75.00 },
|
|
354
|
-
"claude-opus-4": { input: 15.00, output: 75.00 },
|
|
355
|
-
"claude-4-sonnet": { input: 3.00, output: 15.00 },
|
|
356
|
-
"claude-4-opus": { input: 15.00, output: 75.00 },
|
|
357
|
-
"claude-3.5-sonnet": { input: 3.00, output: 15.00 },
|
|
358
|
-
"claude-3-5-sonnet-20241022": { input: 3.00, output: 15.00 },
|
|
359
|
-
"claude-haiku-4-5": { input: 0.80, output: 4.00 },
|
|
360
|
-
"claude-3-5-haiku-20241022": { input: 0.80, output: 4.00 },
|
|
361
|
-
"anthropic/claude-sonnet-4-6": { input: 3.00, output: 15.00 },
|
|
362
|
-
"anthropic/claude-haiku-3.5": { input: 0.80, output: 4.00 },
|
|
363
|
-
};
|
|
364
|
-
|
|
365
|
-
// APIClaw margin: 15% on top of provider cost (market standard)
|
|
366
|
-
const APICLAW_MARGIN = 0.15;
|
|
367
|
-
|
|
368
|
-
function calculateCallCost(model: string, usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number }): { providerCost: number; apiclawCost: number } {
|
|
369
|
-
if (!usage) return { providerCost: 0, apiclawCost: 0 };
|
|
370
|
-
|
|
371
|
-
// Find cost entry (try exact match, then partial)
|
|
372
|
-
let costs = MODEL_COSTS[model];
|
|
373
|
-
if (!costs) {
|
|
374
|
-
const modelLower = model.toLowerCase();
|
|
375
|
-
const key = Object.keys(MODEL_COSTS).find(k => modelLower.includes(k.toLowerCase()));
|
|
376
|
-
if (key) costs = MODEL_COSTS[key];
|
|
377
|
-
}
|
|
378
|
-
if (!costs) {
|
|
379
|
-
// Unknown model -- estimate at medium tier
|
|
380
|
-
costs = { input: 1.00, output: 3.00 };
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
const inputTokens = usage.prompt_tokens || 0;
|
|
384
|
-
const outputTokens = usage.completion_tokens || 0;
|
|
385
|
-
|
|
386
|
-
const providerCost = (inputTokens * costs.input + outputTokens * costs.output) / 1_000_000;
|
|
387
|
-
const apiclawCost = providerCost * (1 + APICLAW_MARGIN);
|
|
388
|
-
|
|
389
|
-
return { providerCost, apiclawCost };
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
// ==============================================
|
|
393
|
-
// INTELLIGENT LLM ROUTER
|
|
394
|
-
// ==============================================
|
|
395
|
-
|
|
396
|
-
// Model-to-provider mapping: which direct providers can serve which model patterns
|
|
397
|
-
const MODEL_PROVIDER_MAP: { pattern: RegExp; provider: string; nativeModel: string }[] = [
|
|
398
|
-
// Groq-native models (ultra-fast inference)
|
|
399
|
-
{ pattern: /^(groq\/)?llama-3\.3-70b/i, provider: "groq", nativeModel: "llama-3.3-70b-versatile" },
|
|
400
|
-
{ pattern: /^(groq\/)?llama-3\.1-8b/i, provider: "groq", nativeModel: "llama-3.1-8b-instant" },
|
|
401
|
-
{ pattern: /^(groq\/)?llama-3\.1-70b/i, provider: "groq", nativeModel: "llama-3.1-70b-versatile" },
|
|
402
|
-
{ pattern: /^(groq\/)?gemma2?-9b/i, provider: "groq", nativeModel: "gemma2-9b-it" },
|
|
403
|
-
{ pattern: /^(groq\/)?mixtral-8x7b/i, provider: "groq", nativeModel: "mixtral-8x7b-32768" },
|
|
404
|
-
// Mistral-native models
|
|
405
|
-
{ pattern: /^(mistralai\/)?mistral-small/i, provider: "mistral", nativeModel: "mistral-small-latest" },
|
|
406
|
-
{ pattern: /^(mistralai\/)?mistral-large/i, provider: "mistral", nativeModel: "mistral-large-latest" },
|
|
407
|
-
{ pattern: /^(mistralai\/)?mistral-medium/i, provider: "mistral", nativeModel: "mistral-medium-latest" },
|
|
408
|
-
{ pattern: /^(mistralai\/)?codestral/i, provider: "mistral", nativeModel: "codestral-latest" },
|
|
409
|
-
{ pattern: /^(mistralai\/)?pixtral/i, provider: "mistral", nativeModel: "pixtral-large-latest" },
|
|
410
|
-
{ pattern: /^(mistralai\/)?mistral-nemo/i, provider: "mistral", nativeModel: "open-mistral-nemo" },
|
|
411
|
-
// Together-native models (open-source at scale)
|
|
412
|
-
{ pattern: /^(together\/)?meta-llama\/Llama-3\.3-70B/i, provider: "together", nativeModel: "meta-llama/Llama-3.3-70B-Instruct-Turbo" },
|
|
413
|
-
{ pattern: /^(together\/)?Qwen\/Qwen2\.5-72B/i, provider: "together", nativeModel: "Qwen/Qwen2.5-72B-Instruct-Turbo" },
|
|
414
|
-
{ pattern: /^(together\/)?deepseek-ai\/DeepSeek-R1/i, provider: "together", nativeModel: "deepseek-ai/DeepSeek-R1" },
|
|
415
|
-
{ pattern: /^(together\/)?deepseek-ai\/DeepSeek-V3/i, provider: "together", nativeModel: "deepseek-ai/DeepSeek-V3" },
|
|
416
|
-
// OpenAI direct models
|
|
417
|
-
{ pattern: /^(openai\/)?gpt-5\.4/i, provider: "openai", nativeModel: "gpt-5.4" },
|
|
418
|
-
{ pattern: /^(openai\/)?gpt-5/i, provider: "openai", nativeModel: "gpt-5" },
|
|
419
|
-
{ pattern: /^(openai\/)?gpt-4o/i, provider: "openai", nativeModel: "gpt-4o" },
|
|
420
|
-
{ pattern: /^(openai\/)?gpt-4\.1/i, provider: "openai", nativeModel: "gpt-4.1" },
|
|
421
|
-
{ pattern: /^(openai\/)?o3/i, provider: "openai", nativeModel: "o3" },
|
|
422
|
-
{ pattern: /^(openai\/)?o4-mini/i, provider: "openai", nativeModel: "o4-mini" },
|
|
423
|
-
// xAI/Grok models
|
|
424
|
-
{ pattern: /^(xai\/)?grok-4/i, provider: "xai", nativeModel: "grok-4.20-reasoning" },
|
|
425
|
-
{ pattern: /^(xai\/)?grok-3-mini/i, provider: "xai", nativeModel: "grok-3-mini" },
|
|
426
|
-
{ pattern: /^(xai\/)?grok-3/i, provider: "xai", nativeModel: "grok-3" },
|
|
427
|
-
{ pattern: /^(xai\/)?grok-2/i, provider: "xai", nativeModel: "grok-2-latest" },
|
|
428
|
-
// Anthropic direct models
|
|
429
|
-
{ pattern: /^(anthropic\/)?claude-sonnet-4-6/i, provider: "anthropic", nativeModel: "claude-sonnet-4-6-20250514" },
|
|
430
|
-
{ pattern: /^(anthropic\/)?claude-4-sonnet/i, provider: "anthropic", nativeModel: "claude-sonnet-4-6-20250514" },
|
|
431
|
-
{ pattern: /^(anthropic\/)?claude-opus-4/i, provider: "anthropic", nativeModel: "claude-opus-4-6-20250514" },
|
|
432
|
-
{ pattern: /^(anthropic\/)?claude-4-opus/i, provider: "anthropic", nativeModel: "claude-opus-4-6-20250514" },
|
|
433
|
-
{ pattern: /^(anthropic\/)?claude-3[\.\-]5-sonnet/i, provider: "anthropic", nativeModel: "claude-3-5-sonnet-20241022" },
|
|
434
|
-
{ pattern: /^(anthropic\/)?claude-haiku-4/i, provider: "anthropic", nativeModel: "claude-haiku-4-5-20251001" },
|
|
435
|
-
{ pattern: /^(anthropic\/)?claude-3[\.\-]5-haiku/i, provider: "anthropic", nativeModel: "claude-3-5-haiku-20241022" },
|
|
436
|
-
// Shorthand aliases -- route common names to cheapest/fastest direct provider
|
|
437
|
-
{ pattern: /^deepseek-r1$/i, provider: "together", nativeModel: "deepseek-ai/DeepSeek-R1" },
|
|
438
|
-
{ pattern: /^deepseek-v3$/i, provider: "together", nativeModel: "deepseek-ai/DeepSeek-V3" },
|
|
439
|
-
{ pattern: /^llama-?3\.?3/i, provider: "groq", nativeModel: "llama-3.3-70b-versatile" },
|
|
440
|
-
{ pattern: /^llama-?3\.?1-?8b/i, provider: "groq", nativeModel: "llama-3.1-8b-instant" },
|
|
441
|
-
{ pattern: /^qwen-?2\.?5/i, provider: "together", nativeModel: "Qwen/Qwen2.5-72B-Instruct-Turbo" },
|
|
442
|
-
];
|
|
443
|
-
|
|
444
|
-
interface RoutingDecision {
|
|
445
|
-
provider: string;
|
|
446
|
-
model: string;
|
|
447
|
-
baseUrl: string;
|
|
448
|
-
apiKey: string;
|
|
449
|
-
reason: string;
|
|
450
|
-
extraHeaders?: Record<string, string>;
|
|
451
|
-
}
|
|
452
|
-
|
|
453
|
-
// ==============================================
|
|
454
|
-
// ADVISOR: Analyzes prompts to pick optimal model+provider
|
|
455
|
-
// Runs only when model is "auto" or unspecified and routing mode is "balanced"
|
|
456
|
-
// Uses Mistral Small (~$0.00001/decision) for near-zero cost intelligence
|
|
457
|
-
// ==============================================
|
|
458
|
-
|
|
459
|
-
const ADVISOR_SYSTEM_PROMPT = `You are an LLM routing advisor. Given a user prompt, pick the optimal provider and model.
|
|
460
|
-
|
|
461
|
-
PROVIDERS (use exact provider key and model name):
|
|
462
|
-
|
|
463
|
-
provider: "mistral", model: "mistral-small-latest" -- Fast, cheap. Simple Q&A, translation, summarization.
|
|
464
|
-
provider: "mistral", model: "mistral-large-latest" -- Strong reasoning, coding, complex analysis.
|
|
465
|
-
provider: "mistral", model: "codestral-latest" -- Code generation, debugging, technical.
|
|
466
|
-
provider: "together", model: "meta-llama/Llama-3.3-70B-Instruct-Turbo" -- Strong open-source all-rounder.
|
|
467
|
-
provider: "together", model: "deepseek-ai/DeepSeek-R1" -- Deep reasoning, math, chain-of-thought.
|
|
468
|
-
provider: "together", model: "Qwen/Qwen2.5-72B-Instruct-Turbo" -- Multilingual, strong CJK.
|
|
469
|
-
provider: "openrouter", model: "anthropic/claude-sonnet-4-6" -- Best quality. Complex multi-step, nuanced writing.
|
|
470
|
-
provider: "openrouter", model: "openai/gpt-4o" -- Vision, function calling, broad knowledge.
|
|
471
|
-
provider: "openrouter", model: "google/gemini-2.0-flash-001" -- Fast multimodal, long context.
|
|
472
|
-
|
|
473
|
-
Respond with ONLY JSON:
|
|
474
|
-
{"provider":"mistral","model":"mistral-small-latest","reason":"simple factual query"}`;
|
|
475
|
-
|
|
476
|
-
interface AdvisorDecision {
|
|
477
|
-
provider: string;
|
|
478
|
-
model: string;
|
|
479
|
-
reason: string;
|
|
480
|
-
}
|
|
481
|
-
|
|
482
|
-
async function advisorPickModel(
|
|
483
|
-
messages: Array<{ role: string; content: string }>,
|
|
484
|
-
settings: { blockedProviders: string[] }
|
|
485
|
-
): Promise<AdvisorDecision | null> {
|
|
486
|
-
// Extract first user message for analysis (keep it short)
|
|
487
|
-
const userMsg = messages.find(m => m.role === "user");
|
|
488
|
-
if (!userMsg) return null;
|
|
489
|
-
|
|
490
|
-
const promptPreview = typeof userMsg.content === "string"
|
|
491
|
-
? userMsg.content.slice(0, 500)
|
|
492
|
-
: JSON.stringify(userMsg.content).slice(0, 500);
|
|
493
|
-
|
|
494
|
-
// Use Mistral Small as the advisor (fast + cheap)
|
|
495
|
-
const mistralKey = process.env.MISTRAL_API_KEY;
|
|
496
|
-
if (!mistralKey) return null;
|
|
497
|
-
|
|
498
|
-
try {
|
|
499
|
-
const response = await fetch("https://api.mistral.ai/v1/chat/completions", {
|
|
500
|
-
method: "POST",
|
|
501
|
-
headers: {
|
|
502
|
-
"Authorization": `Bearer ${mistralKey}`,
|
|
503
|
-
"Content-Type": "application/json",
|
|
504
|
-
},
|
|
505
|
-
body: JSON.stringify({
|
|
506
|
-
model: "mistral-small-latest",
|
|
507
|
-
messages: [
|
|
508
|
-
{ role: "system", content: ADVISOR_SYSTEM_PROMPT },
|
|
509
|
-
{ role: "user", content: `Route this prompt:\n\n${promptPreview}` },
|
|
510
|
-
],
|
|
511
|
-
max_tokens: 100,
|
|
512
|
-
temperature: 0,
|
|
513
|
-
}),
|
|
514
|
-
});
|
|
515
|
-
|
|
516
|
-
if (!response.ok) return null;
|
|
517
|
-
|
|
518
|
-
const data: any = await response.json();
|
|
519
|
-
const content = data?.choices?.[0]?.message?.content?.trim();
|
|
520
|
-
if (!content) return null;
|
|
521
|
-
|
|
522
|
-
// Parse JSON response (handle markdown code blocks)
|
|
523
|
-
const jsonStr = content.replace(/```json?\n?/g, "").replace(/```/g, "").trim();
|
|
524
|
-
const decision = JSON.parse(jsonStr) as AdvisorDecision;
|
|
525
|
-
|
|
526
|
-
// Validate the decision
|
|
527
|
-
if (!decision.provider || !decision.model) return null;
|
|
528
|
-
|
|
529
|
-
// Check if the suggested provider is blocked
|
|
530
|
-
if (settings.blockedProviders.includes(decision.provider)) return null;
|
|
531
|
-
|
|
532
|
-
return decision;
|
|
533
|
-
} catch {
|
|
534
|
-
// Advisor failed silently -- fall through to rule-based routing
|
|
535
|
-
return null;
|
|
536
|
-
}
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
async function routeLLMRequest(
|
|
540
|
-
requestedModel: string,
|
|
541
|
-
settings: {
|
|
542
|
-
routingMode: string;
|
|
543
|
-
preferredProviders: string[];
|
|
544
|
-
blockedProviders: string[];
|
|
545
|
-
allowOpenRouterFallback: boolean;
|
|
546
|
-
},
|
|
547
|
-
messages?: Array<{ role: string; content: string }>
|
|
548
|
-
): Promise<RoutingDecision | null> {
|
|
549
|
-
// 1. Direct provider match -- always wins, no advisor needed
|
|
550
|
-
for (const mapping of MODEL_PROVIDER_MAP) {
|
|
551
|
-
if (!mapping.pattern.test(requestedModel)) continue;
|
|
552
|
-
if (settings.blockedProviders.includes(mapping.provider)) continue;
|
|
553
|
-
|
|
554
|
-
const providerMeta = PROVIDERS[mapping.provider];
|
|
555
|
-
if (!providerMeta?.isLLM || !providerMeta.envKey || !providerMeta.baseUrl) continue;
|
|
556
|
-
|
|
557
|
-
const apiKey = process.env[providerMeta.envKey];
|
|
558
|
-
if (!apiKey) continue;
|
|
559
|
-
|
|
560
|
-
// For "highest_quality" mode, prefer OpenRouter (more model options)
|
|
561
|
-
if (settings.routingMode === "highest_quality" && !settings.preferredProviders.includes(mapping.provider)) {
|
|
562
|
-
continue;
|
|
563
|
-
}
|
|
564
|
-
|
|
565
|
-
return {
|
|
566
|
-
provider: mapping.provider,
|
|
567
|
-
model: mapping.nativeModel,
|
|
568
|
-
baseUrl: providerMeta.baseUrl,
|
|
569
|
-
apiKey,
|
|
570
|
-
reason: `direct_${mapping.provider}`,
|
|
571
|
-
};
|
|
572
|
-
}
|
|
573
|
-
|
|
574
|
-
// 2. ADVISOR -- intelligent model selection for ambiguous routing
|
|
575
|
-
// Triggers when: model is generic ("auto", empty, or provider-prefixed like "openai/gpt-4o")
|
|
576
|
-
// AND routing mode is "balanced" (default)
|
|
577
|
-
// AND we have messages to analyze
|
|
578
|
-
const isAutoModel = !requestedModel || requestedModel === "auto";
|
|
579
|
-
const useAdvisor = isAutoModel && settings.routingMode === "balanced" && messages && messages.length > 0;
|
|
580
|
-
|
|
581
|
-
if (useAdvisor) {
|
|
582
|
-
const advisorDecision = await advisorPickModel(messages, settings);
|
|
583
|
-
if (advisorDecision) {
|
|
584
|
-
// Map advisor decision to a routing decision
|
|
585
|
-
const providerKey = advisorDecision.provider;
|
|
586
|
-
const providerMeta = PROVIDERS[providerKey];
|
|
587
|
-
|
|
588
|
-
if (providerMeta?.isLLM && providerMeta.envKey && providerMeta.baseUrl) {
|
|
589
|
-
const apiKey = process.env[providerMeta.envKey];
|
|
590
|
-
if (apiKey) {
|
|
591
|
-
return {
|
|
592
|
-
provider: providerKey,
|
|
593
|
-
model: advisorDecision.model,
|
|
594
|
-
baseUrl: providerMeta.baseUrl,
|
|
595
|
-
apiKey,
|
|
596
|
-
reason: `advisor_${providerKey}: ${advisorDecision.reason}`,
|
|
597
|
-
...(providerKey === "openrouter" ? {
|
|
598
|
-
extraHeaders: { "HTTP-Referer": "https://apiclaw.cloud", "X-Title": "APIClaw Gateway" },
|
|
599
|
-
} : {}),
|
|
600
|
-
};
|
|
601
|
-
}
|
|
602
|
-
}
|
|
603
|
-
|
|
604
|
-
// Advisor picked a provider we don't have direct keys for -- route via OpenRouter
|
|
605
|
-
if (!settings.blockedProviders.includes("openrouter") && settings.allowOpenRouterFallback !== false) {
|
|
606
|
-
const orKey = process.env.OPENROUTER_API_KEY;
|
|
607
|
-
if (orKey) {
|
|
608
|
-
return {
|
|
609
|
-
provider: "openrouter",
|
|
610
|
-
model: advisorDecision.model,
|
|
611
|
-
baseUrl: "https://openrouter.ai/api/v1/chat/completions",
|
|
612
|
-
apiKey: orKey,
|
|
613
|
-
reason: `advisor_via_openrouter: ${advisorDecision.reason}`,
|
|
614
|
-
extraHeaders: { "HTTP-Referer": "https://apiclaw.cloud", "X-Title": "APIClaw Gateway" },
|
|
615
|
-
};
|
|
616
|
-
}
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
// Advisor failed -- fall through to rule-based routing
|
|
620
|
-
}
|
|
621
|
-
|
|
622
|
-
// 3. Static routing mode preferences (fallback)
|
|
623
|
-
if (settings.routingMode === "fastest") {
|
|
624
|
-
for (const fastProvider of ["groq", "together", "mistral"]) {
|
|
625
|
-
if (settings.blockedProviders.includes(fastProvider)) continue;
|
|
626
|
-
const meta = PROVIDERS[fastProvider];
|
|
627
|
-
if (!meta?.isLLM || !meta.envKey || !meta.baseUrl) continue;
|
|
628
|
-
const key = process.env[meta.envKey];
|
|
629
|
-
if (!key) continue;
|
|
630
|
-
if (requestedModel.includes("anthropic/") || requestedModel.includes("openai/") || requestedModel.includes("google/")) break;
|
|
631
|
-
return {
|
|
632
|
-
provider: fastProvider,
|
|
633
|
-
model: requestedModel,
|
|
634
|
-
baseUrl: meta.baseUrl,
|
|
635
|
-
apiKey: key,
|
|
636
|
-
reason: `fastest_mode_${fastProvider}`,
|
|
637
|
-
};
|
|
638
|
-
}
|
|
639
|
-
}
|
|
640
|
-
|
|
641
|
-
// 4. Preferred providers check
|
|
642
|
-
for (const preferred of settings.preferredProviders) {
|
|
643
|
-
if (settings.blockedProviders.includes(preferred)) continue;
|
|
644
|
-
const meta = PROVIDERS[preferred];
|
|
645
|
-
if (!meta?.isLLM || !meta.envKey || !meta.baseUrl) continue;
|
|
646
|
-
const key = process.env[meta.envKey];
|
|
647
|
-
if (!key) continue;
|
|
648
|
-
return {
|
|
649
|
-
provider: preferred,
|
|
650
|
-
model: requestedModel,
|
|
651
|
-
baseUrl: meta.baseUrl,
|
|
652
|
-
apiKey: key,
|
|
653
|
-
reason: `preferred_${preferred}`,
|
|
654
|
-
};
|
|
655
|
-
}
|
|
656
|
-
|
|
657
|
-
// 5. Fallback to OpenRouter
|
|
658
|
-
if (!settings.blockedProviders.includes("openrouter") && settings.allowOpenRouterFallback !== false) {
|
|
659
|
-
const orKey = process.env.OPENROUTER_API_KEY;
|
|
660
|
-
if (orKey) {
|
|
661
|
-
return {
|
|
662
|
-
provider: "openrouter",
|
|
663
|
-
model: requestedModel,
|
|
664
|
-
baseUrl: "https://openrouter.ai/api/v1/chat/completions",
|
|
665
|
-
apiKey: orKey,
|
|
666
|
-
reason: "openrouter_fallback",
|
|
667
|
-
extraHeaders: {
|
|
668
|
-
"HTTP-Referer": "https://apiclaw.cloud",
|
|
669
|
-
"X-Title": "APIClaw Gateway",
|
|
670
|
-
},
|
|
671
|
-
};
|
|
672
|
-
}
|
|
673
|
-
}
|
|
674
|
-
|
|
675
|
-
return null; // No provider available
|
|
676
|
-
}
|
|
677
|
-
|
|
678
|
-
// ==============================================
|
|
679
|
-
// ANTHROPIC MESSAGES API TRANSLATION
|
|
680
|
-
// Translates OpenAI chat format to/from Anthropic Messages API
|
|
681
|
-
// ==============================================
|
|
682
|
-
|
|
683
|
-
function openaiToAnthropicRequest(
|
|
684
|
-
model: string,
|
|
685
|
-
messages: Array<{ role: string; content: any }>,
|
|
686
|
-
rest: Record<string, any>
|
|
687
|
-
): { body: any; headers: Record<string, string> } {
|
|
688
|
-
// Extract system message
|
|
689
|
-
const systemMessages = messages.filter(m => m.role === "system");
|
|
690
|
-
const nonSystemMessages = messages.filter(m => m.role !== "system");
|
|
691
|
-
const systemText = systemMessages.map(m => typeof m.content === "string" ? m.content : JSON.stringify(m.content)).join("\n\n");
|
|
692
|
-
|
|
693
|
-
const body: any = {
|
|
694
|
-
model,
|
|
695
|
-
messages: nonSystemMessages.map(m => ({
|
|
696
|
-
role: m.role === "assistant" ? "assistant" : "user",
|
|
697
|
-
content: m.content,
|
|
698
|
-
})),
|
|
699
|
-
max_tokens: rest.max_tokens || rest.max_completion_tokens || 4096,
|
|
700
|
-
};
|
|
701
|
-
if (systemText) body.system = systemText;
|
|
702
|
-
if (rest.temperature !== undefined) body.temperature = rest.temperature;
|
|
703
|
-
if (rest.top_p !== undefined) body.top_p = rest.top_p;
|
|
704
|
-
if (rest.stop) body.stop_sequences = Array.isArray(rest.stop) ? rest.stop : [rest.stop];
|
|
705
|
-
|
|
706
|
-
return { body, headers: {} };
|
|
707
|
-
}
|
|
708
|
-
|
|
709
|
-
function anthropicToOpenaiResponse(anthropicData: any, model: string): any {
|
|
710
|
-
const content = anthropicData.content?.[0]?.text || "";
|
|
711
|
-
const inputTokens = anthropicData.usage?.input_tokens || 0;
|
|
712
|
-
const outputTokens = anthropicData.usage?.output_tokens || 0;
|
|
713
|
-
|
|
714
|
-
return {
|
|
715
|
-
id: anthropicData.id || `chatcmpl-${Date.now()}`,
|
|
716
|
-
object: "chat.completion",
|
|
717
|
-
created: Math.floor(Date.now() / 1000),
|
|
718
|
-
model,
|
|
719
|
-
choices: [{
|
|
720
|
-
index: 0,
|
|
721
|
-
message: { role: "assistant", content },
|
|
722
|
-
finish_reason: anthropicData.stop_reason === "end_turn" ? "stop" : (anthropicData.stop_reason || "stop"),
|
|
723
|
-
}],
|
|
724
|
-
usage: {
|
|
725
|
-
prompt_tokens: inputTokens,
|
|
726
|
-
completion_tokens: outputTokens,
|
|
727
|
-
total_tokens: inputTokens + outputTokens,
|
|
728
|
-
},
|
|
729
|
-
};
|
|
730
|
-
}
|
|
731
|
-
|
|
732
|
-
// CORS headers
|
|
733
|
-
const corsHeaders = {
|
|
734
|
-
"Access-Control-Allow-Origin": "*",
|
|
735
|
-
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
|
|
736
|
-
"Access-Control-Allow-Headers": "Content-Type, Authorization, X-APIClaw-Internal, X-APIClaw-Subagent",
|
|
737
|
-
};
|
|
738
|
-
|
|
739
|
-
// Helper for JSON responses
|
|
740
|
-
function jsonResponse(data: unknown, status = 200) {
|
|
741
|
-
return new Response(JSON.stringify(data), {
|
|
742
|
-
status,
|
|
743
|
-
headers: { "Content-Type": "application/json", ...corsHeaders },
|
|
744
|
-
});
|
|
745
|
-
}
|
|
746
|
-
|
|
747
|
-
// ============================================
|
|
748
|
-
// UNIFIED AUTH: resolves workspace from any auth method
|
|
749
|
-
// Priority: 1) Authorization: Bearer sk-claw-... (API key)
|
|
750
|
-
// 2) X-APIClaw-Identifier (legacy MCP workspace ID)
|
|
751
|
-
// 3) Anonymous (still allowed, just untracked)
|
|
752
|
-
// ============================================
|
|
753
|
-
|
|
754
|
-
async function resolveWorkspaceFromRequest(
|
|
755
|
-
ctx: any,
|
|
756
|
-
request: Request
|
|
757
|
-
): Promise<{ workspaceId?: string; keyId?: string; authMethod: "api-key" | "identifier" | "anonymous" }> {
|
|
758
|
-
// 1. Check for API key auth (Bearer sk-claw-...)
|
|
759
|
-
const authHeader = request.headers.get("Authorization");
|
|
760
|
-
if (authHeader?.startsWith("Bearer sk-claw-")) {
|
|
761
|
-
const rawKey = authHeader.slice(7); // Remove "Bearer "
|
|
762
|
-
try {
|
|
763
|
-
const resolved = await ctx.runQuery(internal.apiKeys.resolveKey, { rawKey });
|
|
764
|
-
if (resolved) {
|
|
765
|
-
// Touch lastUsedAt (fire and forget)
|
|
766
|
-
ctx.runMutation(api.apiKeys.touchKey, { keyId: resolved.keyId }).catch(() => {});
|
|
767
|
-
return { workspaceId: resolved.workspaceId, keyId: resolved.keyId, authMethod: "api-key" };
|
|
768
|
-
}
|
|
769
|
-
} catch (e: any) {
|
|
770
|
-
console.error("[Auth] API key resolution failed:", e.message);
|
|
771
|
-
}
|
|
772
|
-
// Invalid key - don't fall through to anonymous
|
|
773
|
-
return { authMethod: "anonymous" };
|
|
774
|
-
}
|
|
775
|
-
|
|
776
|
-
// 2. Check for legacy identifier
|
|
777
|
-
const identifier = request.headers.get("X-APIClaw-Identifier");
|
|
778
|
-
if (identifier && !identifier.startsWith("anon:") && identifier !== "unknown" && identifier.length > 20) {
|
|
779
|
-
return { workspaceId: identifier, authMethod: "identifier" };
|
|
780
|
-
}
|
|
781
|
-
|
|
782
|
-
// 3. Anonymous
|
|
783
|
-
return { authMethod: "anonymous" };
|
|
784
|
-
}
|
|
785
|
-
|
|
786
|
-
// Helper to validate session and log API usage
|
|
787
|
-
async function validateAndLogProxyCall(
|
|
788
|
-
ctx: any,
|
|
789
|
-
request: Request,
|
|
790
|
-
provider: string,
|
|
791
|
-
action: string
|
|
792
|
-
): Promise<{ valid: boolean; workspaceId?: string; subagentId?: string; error?: string; authMethod?: string }> {
|
|
793
|
-
const subagentId = request.headers.get("X-APIClaw-Subagent") || "main";
|
|
794
|
-
|
|
795
|
-
// Resolve workspace from any auth method
|
|
796
|
-
const auth = await resolveWorkspaceFromRequest(ctx, request);
|
|
797
|
-
const resolvedWorkspaceId = auth.workspaceId;
|
|
798
|
-
const identifier = request.headers.get("X-APIClaw-Identifier") || auth.workspaceId || "unknown";
|
|
799
|
-
|
|
800
|
-
console.log("[Proxy] Call received", { provider, action, authMethod: auth.authMethod, workspaceId: resolvedWorkspaceId, subagentId });
|
|
801
|
-
|
|
802
|
-
// ALWAYS log to analytics (even if identifier is missing)
|
|
803
|
-
try {
|
|
804
|
-
const result = await ctx.runMutation(api.analytics.log, {
|
|
805
|
-
event: "api_call",
|
|
806
|
-
provider,
|
|
807
|
-
identifier: identifier,
|
|
808
|
-
workspaceId: resolvedWorkspaceId as any,
|
|
809
|
-
metadata: { action, subagentId, authMethod: auth.authMethod },
|
|
810
|
-
});
|
|
811
|
-
console.log("[Proxy] Analytics logged:", result);
|
|
812
|
-
} catch (e: any) {
|
|
813
|
-
console.error("[Proxy] Analytics logging failed:", e.message, e.stack);
|
|
814
|
-
}
|
|
815
|
-
|
|
816
|
-
// If we have a workspace, log and increment usage
|
|
817
|
-
if (resolvedWorkspaceId) {
|
|
818
|
-
try {
|
|
819
|
-
await ctx.runMutation(api.logs.createProxyLog, {
|
|
820
|
-
workspaceId: resolvedWorkspaceId as any,
|
|
821
|
-
provider,
|
|
822
|
-
action,
|
|
823
|
-
subagentId,
|
|
824
|
-
});
|
|
825
|
-
|
|
826
|
-
await ctx.runMutation(api.workspaces.incrementUsage, {
|
|
827
|
-
workspaceId: resolvedWorkspaceId as any,
|
|
828
|
-
});
|
|
829
|
-
|
|
830
|
-
console.log("[Proxy] Workspace logged for:", resolvedWorkspaceId);
|
|
831
|
-
return { valid: true, workspaceId: resolvedWorkspaceId, subagentId, authMethod: auth.authMethod };
|
|
832
|
-
} catch (e: any) {
|
|
833
|
-
console.error("[Proxy] Workspace logging failed:", e.message);
|
|
834
|
-
}
|
|
835
|
-
}
|
|
836
|
-
|
|
837
|
-
// Return success regardless (don't block API calls)
|
|
838
|
-
return { valid: true, subagentId, authMethod: auth.authMethod };
|
|
839
|
-
}
|
|
840
|
-
|
|
841
|
-
// OPTIONS handler for CORS
|
|
842
|
-
http.route({
|
|
843
|
-
path: "/api/discover",
|
|
844
|
-
method: "OPTIONS",
|
|
845
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
846
|
-
});
|
|
847
|
-
|
|
848
|
-
http.route({
|
|
849
|
-
path: "/api/details",
|
|
850
|
-
method: "OPTIONS",
|
|
851
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
852
|
-
});
|
|
853
|
-
|
|
854
|
-
http.route({
|
|
855
|
-
path: "/api/balance",
|
|
856
|
-
method: "OPTIONS",
|
|
857
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
858
|
-
});
|
|
859
|
-
|
|
860
|
-
http.route({
|
|
861
|
-
path: "/api/purchase",
|
|
862
|
-
method: "OPTIONS",
|
|
863
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
864
|
-
});
|
|
865
|
-
|
|
866
|
-
http.route({
|
|
867
|
-
path: "/admin/grant-credits",
|
|
868
|
-
method: "OPTIONS",
|
|
869
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
870
|
-
});
|
|
871
|
-
|
|
872
|
-
// Full registry discovery — proxies to Vercel catalog (26,704 APIs)
|
|
873
|
-
http.route({
|
|
874
|
-
path: "/v1/discover",
|
|
875
|
-
method: "OPTIONS",
|
|
876
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
877
|
-
});
|
|
878
|
-
|
|
879
|
-
http.route({
|
|
880
|
-
path: "/v1/discover",
|
|
881
|
-
method: "POST",
|
|
882
|
-
handler: httpAction(async (ctx, request) => {
|
|
883
|
-
try {
|
|
884
|
-
const body = await request.json();
|
|
885
|
-
const query = body.query || "";
|
|
886
|
-
const category = body.category || "";
|
|
887
|
-
const callableOnly = body.callable_only ?? false;
|
|
888
|
-
const page = body.page || 1;
|
|
889
|
-
const limit = Math.min(body.limit || 20, 100);
|
|
890
|
-
|
|
891
|
-
// Build query params for the Vercel catalog endpoint
|
|
892
|
-
const params = new URLSearchParams();
|
|
893
|
-
if (query) params.set("q", query);
|
|
894
|
-
if (category) params.set("category", category);
|
|
895
|
-
if (callableOnly) params.set("callable", "true");
|
|
896
|
-
params.set("page", String(page));
|
|
897
|
-
params.set("limit", String(limit));
|
|
898
|
-
|
|
899
|
-
const catalogUrl = `https://apiclaw.cloud/api/catalog?${params.toString()}`;
|
|
900
|
-
const catalogRes = await fetch(catalogUrl);
|
|
901
|
-
|
|
902
|
-
if (!catalogRes.ok) {
|
|
903
|
-
return jsonResponse({ error: "Registry unavailable" }, 502);
|
|
904
|
-
}
|
|
905
|
-
|
|
906
|
-
const catalogData = await catalogRes.json() as {
|
|
907
|
-
items: Array<{ name: string; description: string; category: string; baseUrl: string; docsUrl: string; auth: string; pricing: string; callable?: boolean }>;
|
|
908
|
-
total: number;
|
|
909
|
-
page: number;
|
|
910
|
-
limit: number;
|
|
911
|
-
hasMore: boolean;
|
|
912
|
-
categories: Record<string, { total: number; callable: number }>;
|
|
913
|
-
totalCallable: number;
|
|
914
|
-
};
|
|
915
|
-
|
|
916
|
-
// Also include managed providers from PROVIDERS catalog
|
|
917
|
-
const managedProviders = Object.entries(PROVIDERS).map(([id, p]) => ({
|
|
918
|
-
providerId: id,
|
|
919
|
-
name: p.name,
|
|
920
|
-
description: p.description,
|
|
921
|
-
category: p.category,
|
|
922
|
-
managed: true,
|
|
923
|
-
}));
|
|
924
|
-
|
|
925
|
-
return jsonResponse({
|
|
926
|
-
apis: catalogData.items,
|
|
927
|
-
total: catalogData.total,
|
|
928
|
-
page: catalogData.page,
|
|
929
|
-
limit: catalogData.limit,
|
|
930
|
-
hasMore: catalogData.hasMore,
|
|
931
|
-
categories: catalogData.categories,
|
|
932
|
-
totalCallable: catalogData.totalCallable,
|
|
933
|
-
managedProviders: managedProviders,
|
|
934
|
-
_meta: {
|
|
935
|
-
registry: "26,704 discoverable APIs",
|
|
936
|
-
managed: `${managedProviders.length} managed providers`,
|
|
937
|
-
docs: "https://apiclaw.cloud/docs",
|
|
938
|
-
},
|
|
939
|
-
});
|
|
940
|
-
} catch (e: any) {
|
|
941
|
-
return jsonResponse({ error: "Discovery failed", details: e.message }, 500);
|
|
942
|
-
}
|
|
943
|
-
}),
|
|
944
|
-
});
|
|
945
|
-
|
|
946
|
-
// Discover managed providers only (legacy endpoint)
|
|
947
|
-
http.route({
|
|
948
|
-
path: "/api/discover",
|
|
949
|
-
method: "POST",
|
|
950
|
-
handler: httpAction(async (ctx, request) => {
|
|
951
|
-
try {
|
|
952
|
-
const startTime = Date.now();
|
|
953
|
-
const body = await request.json();
|
|
954
|
-
const query = (body.query || "").toLowerCase();
|
|
955
|
-
|
|
956
|
-
// Get optional auth context
|
|
957
|
-
const sessionToken = request.headers.get("X-APIClaw-Session");
|
|
958
|
-
const userAgent = request.headers.get("User-Agent");
|
|
959
|
-
|
|
960
|
-
const results = Object.entries(PROVIDERS)
|
|
961
|
-
.filter(([id, provider]) => {
|
|
962
|
-
if (!query) return true;
|
|
963
|
-
return (
|
|
964
|
-
provider.name.toLowerCase().includes(query) ||
|
|
965
|
-
provider.description.toLowerCase().includes(query) ||
|
|
966
|
-
provider.category.toLowerCase().includes(query) ||
|
|
967
|
-
provider.tags.some((tag) => tag.includes(query))
|
|
968
|
-
);
|
|
969
|
-
})
|
|
970
|
-
.map(([id, provider]) => ({
|
|
971
|
-
providerId: id,
|
|
972
|
-
...provider,
|
|
973
|
-
}));
|
|
974
|
-
|
|
975
|
-
const responseTimeMs = Date.now() - startTime;
|
|
976
|
-
|
|
977
|
-
// Log the search (fire and forget)
|
|
978
|
-
if (query) {
|
|
979
|
-
ctx.runMutation(internal.searchLogs.logSearch, {
|
|
980
|
-
query: body.query || "", // Original query (not lowercased)
|
|
981
|
-
resultsCount: results.length,
|
|
982
|
-
matchedProviders: results.map(r => r.providerId),
|
|
983
|
-
sessionToken: sessionToken || undefined,
|
|
984
|
-
userAgent: userAgent || undefined,
|
|
985
|
-
responseTimeMs,
|
|
986
|
-
}).catch(() => {}); // Ignore errors, don't block response
|
|
987
|
-
}
|
|
988
|
-
|
|
989
|
-
return jsonResponse({ providers: results, total: results.length });
|
|
990
|
-
} catch (e) {
|
|
991
|
-
return jsonResponse({ error: "Invalid request" }, 400);
|
|
992
|
-
}
|
|
993
|
-
}),
|
|
994
|
-
});
|
|
995
|
-
|
|
996
|
-
// Get provider details
|
|
997
|
-
http.route({
|
|
998
|
-
path: "/api/details",
|
|
999
|
-
method: "POST",
|
|
1000
|
-
handler: httpAction(async (ctx, request) => {
|
|
1001
|
-
try {
|
|
1002
|
-
const body = await request.json();
|
|
1003
|
-
const { providerId } = body;
|
|
1004
|
-
|
|
1005
|
-
if (!providerId) {
|
|
1006
|
-
return jsonResponse({ error: "providerId required" }, 400);
|
|
1007
|
-
}
|
|
1008
|
-
|
|
1009
|
-
const provider = PROVIDERS[providerId as keyof typeof PROVIDERS];
|
|
1010
|
-
if (!provider) {
|
|
1011
|
-
return jsonResponse({ error: "Provider not found" }, 404);
|
|
1012
|
-
}
|
|
1013
|
-
|
|
1014
|
-
return jsonResponse({
|
|
1015
|
-
providerId,
|
|
1016
|
-
...provider,
|
|
1017
|
-
creditsPerDollar: getCreditsPerDollar(providerId),
|
|
1018
|
-
documentation: `https://apiclaw.com/docs/${providerId}`,
|
|
1019
|
-
});
|
|
1020
|
-
} catch (e) {
|
|
1021
|
-
return jsonResponse({ error: "Invalid request" }, 400);
|
|
1022
|
-
}
|
|
1023
|
-
}),
|
|
1024
|
-
});
|
|
1025
|
-
|
|
1026
|
-
// Check balance
|
|
1027
|
-
http.route({
|
|
1028
|
-
path: "/api/balance",
|
|
1029
|
-
method: "GET",
|
|
1030
|
-
handler: httpAction(async (ctx, request) => {
|
|
1031
|
-
const url = new URL(request.url);
|
|
1032
|
-
const agentId = url.searchParams.get("agentId");
|
|
1033
|
-
|
|
1034
|
-
if (!agentId) {
|
|
1035
|
-
return jsonResponse({ error: "agentId required" }, 400);
|
|
1036
|
-
}
|
|
1037
|
-
|
|
1038
|
-
const credits = await ctx.runQuery(api.credits.getAgentCredits, { agentId });
|
|
1039
|
-
|
|
1040
|
-
if (!credits) {
|
|
1041
|
-
return jsonResponse({
|
|
1042
|
-
agentId,
|
|
1043
|
-
balanceUsd: 0,
|
|
1044
|
-
currency: "USD",
|
|
1045
|
-
message: "No account found. Top up to get started!",
|
|
1046
|
-
});
|
|
1047
|
-
}
|
|
1048
|
-
|
|
1049
|
-
return jsonResponse({
|
|
1050
|
-
agentId: credits.agentId,
|
|
1051
|
-
balanceUsd: credits.balanceUsd,
|
|
1052
|
-
currency: credits.currency,
|
|
1053
|
-
});
|
|
1054
|
-
}),
|
|
1055
|
-
});
|
|
1056
|
-
|
|
1057
|
-
// Purchase API access
|
|
1058
|
-
http.route({
|
|
1059
|
-
path: "/api/purchase",
|
|
1060
|
-
method: "POST",
|
|
1061
|
-
handler: httpAction(async (ctx, request) => {
|
|
1062
|
-
try {
|
|
1063
|
-
const body = await request.json();
|
|
1064
|
-
const { agentId, providerId, amountUsd } = body;
|
|
1065
|
-
|
|
1066
|
-
if (!agentId || !providerId || !amountUsd) {
|
|
1067
|
-
return jsonResponse(
|
|
1068
|
-
{ error: "agentId, providerId, and amountUsd required" },
|
|
1069
|
-
400
|
|
1070
|
-
);
|
|
1071
|
-
}
|
|
1072
|
-
|
|
1073
|
-
if (amountUsd < 1 || amountUsd > 1000) {
|
|
1074
|
-
return jsonResponse(
|
|
1075
|
-
{ error: "amountUsd must be between 1 and 1000" },
|
|
1076
|
-
400
|
|
1077
|
-
);
|
|
1078
|
-
}
|
|
1079
|
-
|
|
1080
|
-
const provider = PROVIDERS[providerId as keyof typeof PROVIDERS];
|
|
1081
|
-
if (!provider) {
|
|
1082
|
-
return jsonResponse({ error: "Provider not found" }, 404);
|
|
1083
|
-
}
|
|
1084
|
-
|
|
1085
|
-
// Check balance first
|
|
1086
|
-
const credits = await ctx.runQuery(api.credits.getAgentCredits, { agentId });
|
|
1087
|
-
if (!credits || credits.balanceUsd < amountUsd) {
|
|
1088
|
-
return jsonResponse(
|
|
1089
|
-
{
|
|
1090
|
-
error: "Insufficient balance",
|
|
1091
|
-
currentBalance: credits?.balanceUsd || 0,
|
|
1092
|
-
required: amountUsd,
|
|
1093
|
-
},
|
|
1094
|
-
402
|
|
1095
|
-
);
|
|
1096
|
-
}
|
|
1097
|
-
|
|
1098
|
-
// Execute purchase
|
|
1099
|
-
const purchase = await ctx.runMutation(api.purchases.purchaseAccess, {
|
|
1100
|
-
agentId,
|
|
1101
|
-
providerId,
|
|
1102
|
-
amountUsd,
|
|
1103
|
-
credentials: generateCredentials(providerId),
|
|
1104
|
-
});
|
|
1105
|
-
|
|
1106
|
-
if (!purchase) {
|
|
1107
|
-
return jsonResponse({ error: "Purchase failed" }, 500);
|
|
1108
|
-
}
|
|
1109
|
-
|
|
1110
|
-
return jsonResponse({
|
|
1111
|
-
success: true,
|
|
1112
|
-
purchase: {
|
|
1113
|
-
id: purchase._id,
|
|
1114
|
-
providerId: purchase.providerId,
|
|
1115
|
-
amountUsd: purchase.amountUsd,
|
|
1116
|
-
creditsGranted: purchase.creditsGranted,
|
|
1117
|
-
status: purchase.status,
|
|
1118
|
-
},
|
|
1119
|
-
message: `Successfully purchased $${amountUsd} of ${provider.name} credits`,
|
|
1120
|
-
});
|
|
1121
|
-
} catch (e: any) {
|
|
1122
|
-
return jsonResponse({ error: e.message || "Purchase failed" }, 400);
|
|
1123
|
-
}
|
|
1124
|
-
}),
|
|
1125
|
-
});
|
|
1126
|
-
|
|
1127
|
-
// Admin: Grant credits
|
|
1128
|
-
http.route({
|
|
1129
|
-
path: "/admin/grant-credits",
|
|
1130
|
-
method: "POST",
|
|
1131
|
-
handler: httpAction(async (ctx, request) => {
|
|
1132
|
-
try {
|
|
1133
|
-
const body = await request.json();
|
|
1134
|
-
const { agentId, amount, reason } = body;
|
|
1135
|
-
|
|
1136
|
-
if (!agentId || !amount) {
|
|
1137
|
-
return jsonResponse({ error: "agentId and amount required" }, 400);
|
|
1138
|
-
}
|
|
1139
|
-
|
|
1140
|
-
// TODO: Add admin auth check here
|
|
1141
|
-
// For now, allow grants (this is for Hivr integration)
|
|
1142
|
-
|
|
1143
|
-
const result = await ctx.runMutation(api.credits.addCredits, {
|
|
1144
|
-
agentId,
|
|
1145
|
-
amountUsd: amount,
|
|
1146
|
-
source: reason || "admin_grant",
|
|
1147
|
-
});
|
|
1148
|
-
|
|
1149
|
-
return jsonResponse({
|
|
1150
|
-
success: true,
|
|
1151
|
-
agentId,
|
|
1152
|
-
credited: amount,
|
|
1153
|
-
newBalance: result?.balanceUsd,
|
|
1154
|
-
reason,
|
|
1155
|
-
});
|
|
1156
|
-
} catch (e: any) {
|
|
1157
|
-
return jsonResponse({ error: e.message || "Grant failed" }, 400);
|
|
1158
|
-
}
|
|
1159
|
-
}),
|
|
1160
|
-
});
|
|
1161
|
-
|
|
1162
|
-
// Helper functions
|
|
1163
|
-
function getCreditsPerDollar(providerId: string): number {
|
|
1164
|
-
const rates: Record<string, number> = {
|
|
1165
|
-
"46elks": 30,
|
|
1166
|
-
twilio: 25,
|
|
1167
|
-
resend: 1000,
|
|
1168
|
-
brave_search: 200,
|
|
1169
|
-
openrouter: 100,
|
|
1170
|
-
elevenlabs: 3333,
|
|
1171
|
-
};
|
|
1172
|
-
return rates[providerId] || 100;
|
|
1173
|
-
}
|
|
1174
|
-
|
|
1175
|
-
function generateCredentials(providerId: string): object {
|
|
1176
|
-
// In production, this would generate or retrieve actual API keys
|
|
1177
|
-
// For now, return placeholder indicating how to use
|
|
1178
|
-
return {
|
|
1179
|
-
type: "apiclaw_proxy",
|
|
1180
|
-
endpoint: `https://brilliant-puffin-712.convex.site/proxy/${providerId}`,
|
|
1181
|
-
note: "Use APIClaw proxy endpoint. Credentials managed automatically.",
|
|
1182
|
-
};
|
|
1183
|
-
}
|
|
1184
|
-
|
|
1185
|
-
export default http;
|
|
1186
|
-
|
|
1187
|
-
// ==============================================
|
|
1188
|
-
// DIRECT CALL PROXY ENDPOINTS
|
|
1189
|
-
// ==============================================
|
|
1190
|
-
|
|
1191
|
-
// OpenRouter proxy
|
|
1192
|
-
http.route({
|
|
1193
|
-
path: "/proxy/openrouter",
|
|
1194
|
-
method: "POST",
|
|
1195
|
-
handler: httpAction(async (ctx, request) => {
|
|
1196
|
-
// Validate session and log usage
|
|
1197
|
-
await validateAndLogProxyCall(ctx, request, "openrouter", "chat");
|
|
1198
|
-
|
|
1199
|
-
const OPENROUTER_KEY = process.env.OPENROUTER_API_KEY;
|
|
1200
|
-
if (!OPENROUTER_KEY) {
|
|
1201
|
-
return jsonResponse({ error: "OpenRouter not configured" }, 500);
|
|
1202
|
-
}
|
|
1203
|
-
|
|
1204
|
-
try {
|
|
1205
|
-
const body = await request.json();
|
|
1206
|
-
|
|
1207
|
-
const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
|
1208
|
-
method: "POST",
|
|
1209
|
-
headers: {
|
|
1210
|
-
"Authorization": `Bearer ${OPENROUTER_KEY}`,
|
|
1211
|
-
"Content-Type": "application/json",
|
|
1212
|
-
"HTTP-Referer": "https://apiclaw.cloud",
|
|
1213
|
-
"X-Title": "APIClaw",
|
|
1214
|
-
},
|
|
1215
|
-
body: JSON.stringify(body),
|
|
1216
|
-
});
|
|
1217
|
-
|
|
1218
|
-
const data = await response.json();
|
|
1219
|
-
return jsonResponse(data, response.status);
|
|
1220
|
-
} catch (e: any) {
|
|
1221
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1222
|
-
}
|
|
1223
|
-
}),
|
|
1224
|
-
});
|
|
1225
|
-
|
|
1226
|
-
// Brave Search proxy
|
|
1227
|
-
http.route({
|
|
1228
|
-
path: "/proxy/brave_search",
|
|
1229
|
-
method: "POST",
|
|
1230
|
-
handler: httpAction(async (ctx, request) => {
|
|
1231
|
-
// Validate session and log usage
|
|
1232
|
-
await validateAndLogProxyCall(ctx, request, "brave_search", "search");
|
|
1233
|
-
|
|
1234
|
-
const BRAVE_KEY = process.env.BRAVE_API_KEY;
|
|
1235
|
-
if (!BRAVE_KEY) {
|
|
1236
|
-
return jsonResponse({ error: "Brave Search not configured" }, 500);
|
|
1237
|
-
}
|
|
1238
|
-
|
|
1239
|
-
try {
|
|
1240
|
-
const body = await request.json();
|
|
1241
|
-
const { query, count = 10 } = body;
|
|
1242
|
-
|
|
1243
|
-
const url = new URL("https://api.search.brave.com/res/v1/web/search");
|
|
1244
|
-
url.searchParams.set("q", query);
|
|
1245
|
-
url.searchParams.set("count", String(count));
|
|
1246
|
-
|
|
1247
|
-
const response = await fetch(url.toString(), {
|
|
1248
|
-
headers: { "X-Subscription-Token": BRAVE_KEY },
|
|
1249
|
-
});
|
|
1250
|
-
|
|
1251
|
-
const data = await response.json();
|
|
1252
|
-
return jsonResponse(data, response.status);
|
|
1253
|
-
} catch (e: any) {
|
|
1254
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1255
|
-
}
|
|
1256
|
-
}),
|
|
1257
|
-
});
|
|
1258
|
-
|
|
1259
|
-
// Resend email proxy
|
|
1260
|
-
http.route({
|
|
1261
|
-
path: "/proxy/resend",
|
|
1262
|
-
method: "POST",
|
|
1263
|
-
handler: httpAction(async (ctx, request) => {
|
|
1264
|
-
// Validate session and log usage
|
|
1265
|
-
await validateAndLogProxyCall(ctx, request, "resend", "send_email");
|
|
1266
|
-
|
|
1267
|
-
const RESEND_KEY = process.env.RESEND_API_KEY;
|
|
1268
|
-
if (!RESEND_KEY) {
|
|
1269
|
-
return jsonResponse({ error: "Resend not configured" }, 500);
|
|
1270
|
-
}
|
|
1271
|
-
|
|
1272
|
-
try {
|
|
1273
|
-
const body = await request.json();
|
|
1274
|
-
|
|
1275
|
-
const response = await fetch("https://api.resend.com/emails", {
|
|
1276
|
-
method: "POST",
|
|
1277
|
-
headers: {
|
|
1278
|
-
"Authorization": `Bearer ${RESEND_KEY}`,
|
|
1279
|
-
"Content-Type": "application/json",
|
|
1280
|
-
},
|
|
1281
|
-
body: JSON.stringify(body),
|
|
1282
|
-
});
|
|
1283
|
-
|
|
1284
|
-
const data = await response.json();
|
|
1285
|
-
return jsonResponse(data, response.status);
|
|
1286
|
-
} catch (e: any) {
|
|
1287
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1288
|
-
}
|
|
1289
|
-
}),
|
|
1290
|
-
});
|
|
1291
|
-
|
|
1292
|
-
// ElevenLabs TTS proxy
|
|
1293
|
-
http.route({
|
|
1294
|
-
path: "/proxy/elevenlabs",
|
|
1295
|
-
method: "POST",
|
|
1296
|
-
handler: httpAction(async (ctx, request) => {
|
|
1297
|
-
// Validate session and log usage
|
|
1298
|
-
await validateAndLogProxyCall(ctx, request, "elevenlabs", "text_to_speech");
|
|
1299
|
-
|
|
1300
|
-
const ELEVENLABS_KEY = process.env.ELEVENLABS_API_KEY;
|
|
1301
|
-
if (!ELEVENLABS_KEY) {
|
|
1302
|
-
return jsonResponse({ error: "ElevenLabs not configured" }, 500);
|
|
1303
|
-
}
|
|
1304
|
-
|
|
1305
|
-
try {
|
|
1306
|
-
const body = await request.json();
|
|
1307
|
-
const { text, voice_id = "21m00Tcm4TlvDq8ikWAM" } = body;
|
|
1308
|
-
|
|
1309
|
-
const response = await fetch(`https://api.elevenlabs.io/v1/text-to-speech/${voice_id}`, {
|
|
1310
|
-
method: "POST",
|
|
1311
|
-
headers: {
|
|
1312
|
-
"xi-api-key": ELEVENLABS_KEY,
|
|
1313
|
-
"Content-Type": "application/json",
|
|
1314
|
-
},
|
|
1315
|
-
body: JSON.stringify({
|
|
1316
|
-
text,
|
|
1317
|
-
model_id: "eleven_turbo_v2",
|
|
1318
|
-
}),
|
|
1319
|
-
});
|
|
1320
|
-
|
|
1321
|
-
if (!response.ok) {
|
|
1322
|
-
const error = await response.text();
|
|
1323
|
-
return jsonResponse({ error }, response.status);
|
|
1324
|
-
}
|
|
1325
|
-
|
|
1326
|
-
// Return audio as base64
|
|
1327
|
-
const arrayBuffer = await response.arrayBuffer();
|
|
1328
|
-
const base64 = Buffer.from(arrayBuffer).toString("base64");
|
|
1329
|
-
|
|
1330
|
-
return jsonResponse({
|
|
1331
|
-
audio_base64: base64,
|
|
1332
|
-
content_type: "audio/mpeg",
|
|
1333
|
-
});
|
|
1334
|
-
} catch (e: any) {
|
|
1335
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1336
|
-
}
|
|
1337
|
-
}),
|
|
1338
|
-
});
|
|
1339
|
-
|
|
1340
|
-
http.route({
|
|
1341
|
-
path: "/proxy/openrouter",
|
|
1342
|
-
method: "OPTIONS",
|
|
1343
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1344
|
-
});
|
|
1345
|
-
|
|
1346
|
-
http.route({
|
|
1347
|
-
path: "/proxy/brave_search",
|
|
1348
|
-
method: "OPTIONS",
|
|
1349
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1350
|
-
});
|
|
1351
|
-
|
|
1352
|
-
http.route({
|
|
1353
|
-
path: "/proxy/resend",
|
|
1354
|
-
method: "OPTIONS",
|
|
1355
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1356
|
-
});
|
|
1357
|
-
|
|
1358
|
-
http.route({
|
|
1359
|
-
path: "/proxy/elevenlabs",
|
|
1360
|
-
method: "OPTIONS",
|
|
1361
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1362
|
-
});
|
|
1363
|
-
|
|
1364
|
-
// 46elks SMS proxy
|
|
1365
|
-
http.route({
|
|
1366
|
-
path: "/proxy/46elks",
|
|
1367
|
-
method: "POST",
|
|
1368
|
-
handler: httpAction(async (ctx, request) => {
|
|
1369
|
-
// Validate session and log usage
|
|
1370
|
-
await validateAndLogProxyCall(ctx, request, "46elks", "send_sms");
|
|
1371
|
-
|
|
1372
|
-
const ELKS_USER = process.env.ELKS_API_USER;
|
|
1373
|
-
const ELKS_PASS = process.env.ELKS_API_PASSWORD;
|
|
1374
|
-
if (!ELKS_USER || !ELKS_PASS) {
|
|
1375
|
-
return jsonResponse({ error: "46elks not configured" }, 500);
|
|
1376
|
-
}
|
|
1377
|
-
|
|
1378
|
-
try {
|
|
1379
|
-
const body = await request.json();
|
|
1380
|
-
const { to, message, from = "APIClaw" } = body;
|
|
1381
|
-
|
|
1382
|
-
const auth = btoa(`${ELKS_USER}:${ELKS_PASS}`);
|
|
1383
|
-
|
|
1384
|
-
const response = await fetch("https://api.46elks.com/a1/sms", {
|
|
1385
|
-
method: "POST",
|
|
1386
|
-
headers: {
|
|
1387
|
-
"Authorization": `Basic ${auth}`,
|
|
1388
|
-
"Content-Type": "application/x-www-form-urlencoded",
|
|
1389
|
-
},
|
|
1390
|
-
body: new URLSearchParams({ from, to, message }),
|
|
1391
|
-
});
|
|
1392
|
-
|
|
1393
|
-
const data = await response.json();
|
|
1394
|
-
return jsonResponse(data, response.status);
|
|
1395
|
-
} catch (e: any) {
|
|
1396
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1397
|
-
}
|
|
1398
|
-
}),
|
|
1399
|
-
});
|
|
1400
|
-
|
|
1401
|
-
// Twilio SMS proxy
|
|
1402
|
-
http.route({
|
|
1403
|
-
path: "/proxy/twilio",
|
|
1404
|
-
method: "POST",
|
|
1405
|
-
handler: httpAction(async (ctx, request) => {
|
|
1406
|
-
// Validate session and log usage
|
|
1407
|
-
await validateAndLogProxyCall(ctx, request, "twilio", "send_sms");
|
|
1408
|
-
|
|
1409
|
-
const TWILIO_SID = process.env.TWILIO_ACCOUNT_SID;
|
|
1410
|
-
const TWILIO_TOKEN = process.env.TWILIO_AUTH_TOKEN;
|
|
1411
|
-
if (!TWILIO_SID || !TWILIO_TOKEN) {
|
|
1412
|
-
return jsonResponse({ error: "Twilio not configured" }, 500);
|
|
1413
|
-
}
|
|
1414
|
-
|
|
1415
|
-
try {
|
|
1416
|
-
const body = await request.json();
|
|
1417
|
-
const { to, message, from } = body;
|
|
1418
|
-
|
|
1419
|
-
if (!from) {
|
|
1420
|
-
return jsonResponse({ error: "Twilio requires 'from' number" }, 400);
|
|
1421
|
-
}
|
|
1422
|
-
|
|
1423
|
-
const auth = btoa(`${TWILIO_SID}:${TWILIO_TOKEN}`);
|
|
1424
|
-
|
|
1425
|
-
const response = await fetch(
|
|
1426
|
-
`https://api.twilio.com/2010-04-01/Accounts/${TWILIO_SID}/Messages.json`,
|
|
1427
|
-
{
|
|
1428
|
-
method: "POST",
|
|
1429
|
-
headers: {
|
|
1430
|
-
"Authorization": `Basic ${auth}`,
|
|
1431
|
-
"Content-Type": "application/x-www-form-urlencoded",
|
|
1432
|
-
},
|
|
1433
|
-
body: new URLSearchParams({ To: to, From: from, Body: message }),
|
|
1434
|
-
}
|
|
1435
|
-
);
|
|
1436
|
-
|
|
1437
|
-
const data = await response.json();
|
|
1438
|
-
return jsonResponse(data, response.status);
|
|
1439
|
-
} catch (e: any) {
|
|
1440
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1441
|
-
}
|
|
1442
|
-
}),
|
|
1443
|
-
});
|
|
1444
|
-
|
|
1445
|
-
// CORS for new endpoints
|
|
1446
|
-
http.route({
|
|
1447
|
-
path: "/proxy/46elks",
|
|
1448
|
-
method: "OPTIONS",
|
|
1449
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1450
|
-
});
|
|
1451
|
-
|
|
1452
|
-
http.route({
|
|
1453
|
-
path: "/proxy/twilio",
|
|
1454
|
-
method: "OPTIONS",
|
|
1455
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1456
|
-
});
|
|
1457
|
-
|
|
1458
|
-
// GitHub API proxy
|
|
1459
|
-
http.route({
|
|
1460
|
-
path: "/proxy/github",
|
|
1461
|
-
method: "POST",
|
|
1462
|
-
handler: httpAction(async (ctx, request) => {
|
|
1463
|
-
// Validate session and log usage
|
|
1464
|
-
const body = await request.json();
|
|
1465
|
-
const action = body.action || "search_repos";
|
|
1466
|
-
await validateAndLogProxyCall(ctx, request, "github", action);
|
|
1467
|
-
|
|
1468
|
-
const GITHUB_TOKEN = process.env.GITHUB_TOKEN;
|
|
1469
|
-
if (!GITHUB_TOKEN) {
|
|
1470
|
-
return jsonResponse({ error: "GitHub not configured" }, 500);
|
|
1471
|
-
}
|
|
1472
|
-
|
|
1473
|
-
try {
|
|
1474
|
-
const { action, ...params } = body;
|
|
1475
|
-
let url: string;
|
|
1476
|
-
let method = "GET";
|
|
1477
|
-
let fetchBody: string | undefined;
|
|
1478
|
-
|
|
1479
|
-
// Route based on action
|
|
1480
|
-
switch (action) {
|
|
1481
|
-
case "search_repos":
|
|
1482
|
-
const { query, sort = "stars", limit = 10 } = params;
|
|
1483
|
-
url = `https://api.github.com/search/repositories?q=${encodeURIComponent(query)}&sort=${sort}&per_page=${limit}`;
|
|
1484
|
-
break;
|
|
1485
|
-
|
|
1486
|
-
case "get_repo":
|
|
1487
|
-
const { owner, repo } = params;
|
|
1488
|
-
url = `https://api.github.com/repos/${owner}/${repo}`;
|
|
1489
|
-
break;
|
|
1490
|
-
|
|
1491
|
-
case "list_issues":
|
|
1492
|
-
const { owner: issueOwner, repo: issueRepo, state = "open", limit: issueLimit = 10 } = params;
|
|
1493
|
-
url = `https://api.github.com/repos/${issueOwner}/${issueRepo}/issues?state=${state}&per_page=${issueLimit}`;
|
|
1494
|
-
break;
|
|
1495
|
-
|
|
1496
|
-
case "create_issue":
|
|
1497
|
-
const { owner: createOwner, repo: createRepo, title, body: issueBody = "" } = params;
|
|
1498
|
-
url = `https://api.github.com/repos/${createOwner}/${createRepo}/issues`;
|
|
1499
|
-
method = "POST";
|
|
1500
|
-
fetchBody = JSON.stringify({ title, body: issueBody });
|
|
1501
|
-
break;
|
|
1502
|
-
|
|
1503
|
-
case "get_file":
|
|
1504
|
-
const { owner: fileOwner, repo: fileRepo, path } = params;
|
|
1505
|
-
url = `https://api.github.com/repos/${fileOwner}/${fileRepo}/contents/${path}`;
|
|
1506
|
-
break;
|
|
1507
|
-
|
|
1508
|
-
default:
|
|
1509
|
-
return jsonResponse({ error: `Unknown action: ${action}` }, 400);
|
|
1510
|
-
}
|
|
1511
|
-
|
|
1512
|
-
const response = await fetch(url, {
|
|
1513
|
-
method,
|
|
1514
|
-
headers: {
|
|
1515
|
-
"Authorization": `Bearer ${GITHUB_TOKEN}`,
|
|
1516
|
-
"Accept": "application/vnd.github+json",
|
|
1517
|
-
"User-Agent": "APIClaw",
|
|
1518
|
-
...(fetchBody ? { "Content-Type": "application/json" } : {}),
|
|
1519
|
-
},
|
|
1520
|
-
...(fetchBody ? { body: fetchBody } : {}),
|
|
1521
|
-
});
|
|
1522
|
-
|
|
1523
|
-
const data = await response.json();
|
|
1524
|
-
return jsonResponse(data, response.status);
|
|
1525
|
-
} catch (e: any) {
|
|
1526
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1527
|
-
}
|
|
1528
|
-
}),
|
|
1529
|
-
});
|
|
1530
|
-
|
|
1531
|
-
http.route({
|
|
1532
|
-
path: "/proxy/github",
|
|
1533
|
-
method: "OPTIONS",
|
|
1534
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1535
|
-
});
|
|
1536
|
-
|
|
1537
|
-
// ==============================================
|
|
1538
|
-
// SERPER (Google Search) PROXY
|
|
1539
|
-
// ==============================================
|
|
1540
|
-
http.route({
|
|
1541
|
-
path: "/proxy/serper",
|
|
1542
|
-
method: "POST",
|
|
1543
|
-
handler: httpAction(async (ctx, request) => {
|
|
1544
|
-
await validateAndLogProxyCall(ctx, request, "serper", "search");
|
|
1545
|
-
const SERPER_KEY = process.env.SERPER_API_KEY;
|
|
1546
|
-
if (!SERPER_KEY) {
|
|
1547
|
-
return jsonResponse({ error: "Serper not configured" }, 500);
|
|
1548
|
-
}
|
|
1549
|
-
try {
|
|
1550
|
-
const body = await request.json();
|
|
1551
|
-
const { query, q, num = 10, gl = "us", hl = "en" } = body;
|
|
1552
|
-
const searchQuery = query || q;
|
|
1553
|
-
if (!searchQuery) {
|
|
1554
|
-
return jsonResponse({ error: "query required" }, 400);
|
|
1555
|
-
}
|
|
1556
|
-
const response = await fetch("https://google.serper.dev/search", {
|
|
1557
|
-
method: "POST",
|
|
1558
|
-
headers: {
|
|
1559
|
-
"X-API-KEY": SERPER_KEY,
|
|
1560
|
-
"Content-Type": "application/json",
|
|
1561
|
-
},
|
|
1562
|
-
body: JSON.stringify({ q: searchQuery, num, gl, hl }),
|
|
1563
|
-
});
|
|
1564
|
-
const data = await response.json();
|
|
1565
|
-
return jsonResponse(data, response.status);
|
|
1566
|
-
} catch (e: any) {
|
|
1567
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1568
|
-
}
|
|
1569
|
-
}),
|
|
1570
|
-
});
|
|
1571
|
-
|
|
1572
|
-
http.route({
|
|
1573
|
-
path: "/proxy/serper",
|
|
1574
|
-
method: "OPTIONS",
|
|
1575
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1576
|
-
});
|
|
1577
|
-
|
|
1578
|
-
// ==============================================
|
|
1579
|
-
// FIRECRAWL (Web Scraping) PROXY
|
|
1580
|
-
// ==============================================
|
|
1581
|
-
http.route({
|
|
1582
|
-
path: "/proxy/firecrawl",
|
|
1583
|
-
method: "POST",
|
|
1584
|
-
handler: httpAction(async (ctx, request) => {
|
|
1585
|
-
await validateAndLogProxyCall(ctx, request, "firecrawl", "scrape");
|
|
1586
|
-
const FIRECRAWL_KEY = process.env.FIRECRAWL_API_KEY;
|
|
1587
|
-
if (!FIRECRAWL_KEY) {
|
|
1588
|
-
return jsonResponse({ error: "Firecrawl not configured" }, 500);
|
|
1589
|
-
}
|
|
1590
|
-
try {
|
|
1591
|
-
const body = await request.json();
|
|
1592
|
-
const { url, formats = ["markdown"], onlyMainContent = true } = body;
|
|
1593
|
-
if (!url) {
|
|
1594
|
-
return jsonResponse({ error: "url required" }, 400);
|
|
1595
|
-
}
|
|
1596
|
-
const response = await fetch("https://api.firecrawl.dev/v1/scrape", {
|
|
1597
|
-
method: "POST",
|
|
1598
|
-
headers: {
|
|
1599
|
-
Authorization: `Bearer ${FIRECRAWL_KEY}`,
|
|
1600
|
-
"Content-Type": "application/json",
|
|
1601
|
-
},
|
|
1602
|
-
body: JSON.stringify({ url, formats, onlyMainContent }),
|
|
1603
|
-
});
|
|
1604
|
-
const data = await response.json();
|
|
1605
|
-
return jsonResponse(data, response.status);
|
|
1606
|
-
} catch (e: any) {
|
|
1607
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1608
|
-
}
|
|
1609
|
-
}),
|
|
1610
|
-
});
|
|
1611
|
-
|
|
1612
|
-
http.route({
|
|
1613
|
-
path: "/proxy/firecrawl",
|
|
1614
|
-
method: "OPTIONS",
|
|
1615
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1616
|
-
});
|
|
1617
|
-
|
|
1618
|
-
// ==============================================
|
|
1619
|
-
// GROQ (LLM) PROXY
|
|
1620
|
-
// ==============================================
|
|
1621
|
-
http.route({
|
|
1622
|
-
path: "/proxy/groq",
|
|
1623
|
-
method: "POST",
|
|
1624
|
-
handler: httpAction(async (ctx, request) => {
|
|
1625
|
-
await validateAndLogProxyCall(ctx, request, "groq", "chat");
|
|
1626
|
-
const GROQ_KEY = process.env.GROQ_API_KEY;
|
|
1627
|
-
if (!GROQ_KEY) {
|
|
1628
|
-
return jsonResponse({ error: "Groq not configured" }, 500);
|
|
1629
|
-
}
|
|
1630
|
-
try {
|
|
1631
|
-
const body = await request.json();
|
|
1632
|
-
const { model = "llama-3.3-70b-versatile", messages, temperature = 0.7, max_tokens = 1024 } = body;
|
|
1633
|
-
if (!messages) {
|
|
1634
|
-
return jsonResponse({ error: "messages required" }, 400);
|
|
1635
|
-
}
|
|
1636
|
-
const response = await fetch("https://api.groq.com/openai/v1/chat/completions", {
|
|
1637
|
-
method: "POST",
|
|
1638
|
-
headers: {
|
|
1639
|
-
Authorization: `Bearer ${GROQ_KEY}`,
|
|
1640
|
-
"Content-Type": "application/json",
|
|
1641
|
-
},
|
|
1642
|
-
body: JSON.stringify({ model, messages, temperature, max_tokens }),
|
|
1643
|
-
});
|
|
1644
|
-
const data = await response.json();
|
|
1645
|
-
return jsonResponse(data, response.status);
|
|
1646
|
-
} catch (e: any) {
|
|
1647
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1648
|
-
}
|
|
1649
|
-
}),
|
|
1650
|
-
});
|
|
1651
|
-
|
|
1652
|
-
http.route({
|
|
1653
|
-
path: "/proxy/groq",
|
|
1654
|
-
method: "OPTIONS",
|
|
1655
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1656
|
-
});
|
|
1657
|
-
|
|
1658
|
-
// ==============================================
|
|
1659
|
-
// MISTRAL (LLM/Embeddings) PROXY
|
|
1660
|
-
// ==============================================
|
|
1661
|
-
http.route({
|
|
1662
|
-
path: "/proxy/mistral",
|
|
1663
|
-
method: "POST",
|
|
1664
|
-
handler: httpAction(async (ctx, request) => {
|
|
1665
|
-
await validateAndLogProxyCall(ctx, request, "mistral", "chat");
|
|
1666
|
-
const MISTRAL_KEY = process.env.MISTRAL_API_KEY;
|
|
1667
|
-
if (!MISTRAL_KEY) {
|
|
1668
|
-
return jsonResponse({ error: "Mistral not configured" }, 500);
|
|
1669
|
-
}
|
|
1670
|
-
try {
|
|
1671
|
-
const body = await request.json();
|
|
1672
|
-
const { model = "mistral-small-latest", messages, temperature = 0.7, max_tokens = 1024 } = body;
|
|
1673
|
-
if (!messages) {
|
|
1674
|
-
return jsonResponse({ error: "messages required" }, 400);
|
|
1675
|
-
}
|
|
1676
|
-
const response = await fetch("https://api.mistral.ai/v1/chat/completions", {
|
|
1677
|
-
method: "POST",
|
|
1678
|
-
headers: {
|
|
1679
|
-
Authorization: `Bearer ${MISTRAL_KEY}`,
|
|
1680
|
-
"Content-Type": "application/json",
|
|
1681
|
-
},
|
|
1682
|
-
body: JSON.stringify({ model, messages, temperature, max_tokens }),
|
|
1683
|
-
});
|
|
1684
|
-
const data = await response.json();
|
|
1685
|
-
return jsonResponse(data, response.status);
|
|
1686
|
-
} catch (e: any) {
|
|
1687
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1688
|
-
}
|
|
1689
|
-
}),
|
|
1690
|
-
});
|
|
1691
|
-
|
|
1692
|
-
http.route({
|
|
1693
|
-
path: "/proxy/mistral",
|
|
1694
|
-
method: "OPTIONS",
|
|
1695
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1696
|
-
});
|
|
1697
|
-
|
|
1698
|
-
// ==============================================
|
|
1699
|
-
// COHERE (LLM/Rerank) PROXY
|
|
1700
|
-
// ==============================================
|
|
1701
|
-
http.route({
|
|
1702
|
-
path: "/proxy/cohere",
|
|
1703
|
-
method: "POST",
|
|
1704
|
-
handler: httpAction(async (ctx, request) => {
|
|
1705
|
-
await validateAndLogProxyCall(ctx, request, "cohere", "chat");
|
|
1706
|
-
const COHERE_KEY = process.env.COHERE_API_KEY;
|
|
1707
|
-
if (!COHERE_KEY) {
|
|
1708
|
-
return jsonResponse({ error: "Cohere not configured" }, 500);
|
|
1709
|
-
}
|
|
1710
|
-
try {
|
|
1711
|
-
const body = await request.json();
|
|
1712
|
-
const { model = "command-a-03-2025", message, chat_history, temperature = 0.7, max_tokens = 1024 } = body;
|
|
1713
|
-
if (!message) {
|
|
1714
|
-
return jsonResponse({ error: "message required" }, 400);
|
|
1715
|
-
}
|
|
1716
|
-
const response = await fetch("https://api.cohere.com/v2/chat", {
|
|
1717
|
-
method: "POST",
|
|
1718
|
-
headers: {
|
|
1719
|
-
Authorization: `Bearer ${COHERE_KEY}`,
|
|
1720
|
-
"Content-Type": "application/json",
|
|
1721
|
-
},
|
|
1722
|
-
body: JSON.stringify({ model, message, chat_history, temperature, max_tokens }),
|
|
1723
|
-
});
|
|
1724
|
-
const data = await response.json();
|
|
1725
|
-
return jsonResponse(data, response.status);
|
|
1726
|
-
} catch (e: any) {
|
|
1727
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1728
|
-
}
|
|
1729
|
-
}),
|
|
1730
|
-
});
|
|
1731
|
-
|
|
1732
|
-
http.route({
|
|
1733
|
-
path: "/proxy/cohere",
|
|
1734
|
-
method: "OPTIONS",
|
|
1735
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1736
|
-
});
|
|
1737
|
-
|
|
1738
|
-
// ==============================================
|
|
1739
|
-
// REPLICATE (ML Models) PROXY
|
|
1740
|
-
// ==============================================
|
|
1741
|
-
http.route({
|
|
1742
|
-
path: "/proxy/replicate",
|
|
1743
|
-
method: "POST",
|
|
1744
|
-
handler: httpAction(async (ctx, request) => {
|
|
1745
|
-
await validateAndLogProxyCall(ctx, request, "replicate", "prediction");
|
|
1746
|
-
const REPLICATE_KEY = process.env.REPLICATE_API_TOKEN;
|
|
1747
|
-
if (!REPLICATE_KEY) {
|
|
1748
|
-
return jsonResponse({ error: "Replicate not configured" }, 500);
|
|
1749
|
-
}
|
|
1750
|
-
try {
|
|
1751
|
-
const body = await request.json();
|
|
1752
|
-
const { model, input, version } = body;
|
|
1753
|
-
if (!model && !version) {
|
|
1754
|
-
return jsonResponse({ error: "model or version required" }, 400);
|
|
1755
|
-
}
|
|
1756
|
-
const endpoint = version
|
|
1757
|
-
? "https://api.replicate.com/v1/predictions"
|
|
1758
|
-
: `https://api.replicate.com/v1/models/${model}/predictions`;
|
|
1759
|
-
const payload = version ? { version, input } : { input };
|
|
1760
|
-
const response = await fetch(endpoint, {
|
|
1761
|
-
method: "POST",
|
|
1762
|
-
headers: {
|
|
1763
|
-
Authorization: `Bearer ${REPLICATE_KEY}`,
|
|
1764
|
-
"Content-Type": "application/json",
|
|
1765
|
-
Prefer: "wait",
|
|
1766
|
-
},
|
|
1767
|
-
body: JSON.stringify(payload),
|
|
1768
|
-
});
|
|
1769
|
-
const data = await response.json();
|
|
1770
|
-
return jsonResponse(data, response.status);
|
|
1771
|
-
} catch (e: any) {
|
|
1772
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1773
|
-
}
|
|
1774
|
-
}),
|
|
1775
|
-
});
|
|
1776
|
-
|
|
1777
|
-
http.route({
|
|
1778
|
-
path: "/proxy/replicate",
|
|
1779
|
-
method: "OPTIONS",
|
|
1780
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1781
|
-
});
|
|
1782
|
-
|
|
1783
|
-
// ==============================================
|
|
1784
|
-
// DEEPGRAM (Speech-to-Text) PROXY
|
|
1785
|
-
// ==============================================
|
|
1786
|
-
http.route({
|
|
1787
|
-
path: "/proxy/deepgram",
|
|
1788
|
-
method: "POST",
|
|
1789
|
-
handler: httpAction(async (ctx, request) => {
|
|
1790
|
-
await validateAndLogProxyCall(ctx, request, "deepgram", "transcribe");
|
|
1791
|
-
const DEEPGRAM_KEY = process.env.DEEPGRAM_API_KEY;
|
|
1792
|
-
if (!DEEPGRAM_KEY) {
|
|
1793
|
-
return jsonResponse({ error: "Deepgram not configured" }, 500);
|
|
1794
|
-
}
|
|
1795
|
-
try {
|
|
1796
|
-
const body = await request.json();
|
|
1797
|
-
const { url, model = "nova-3", language = "en", smart_format = true } = body;
|
|
1798
|
-
if (!url) {
|
|
1799
|
-
return jsonResponse({ error: "url required (audio file URL)" }, 400);
|
|
1800
|
-
}
|
|
1801
|
-
const params = new URLSearchParams({
|
|
1802
|
-
model,
|
|
1803
|
-
language,
|
|
1804
|
-
smart_format: String(smart_format),
|
|
1805
|
-
});
|
|
1806
|
-
const response = await fetch(
|
|
1807
|
-
`https://api.deepgram.com/v1/listen?${params}`,
|
|
1808
|
-
{
|
|
1809
|
-
method: "POST",
|
|
1810
|
-
headers: {
|
|
1811
|
-
Authorization: `Token ${DEEPGRAM_KEY}`,
|
|
1812
|
-
"Content-Type": "application/json",
|
|
1813
|
-
},
|
|
1814
|
-
body: JSON.stringify({ url }),
|
|
1815
|
-
}
|
|
1816
|
-
);
|
|
1817
|
-
const data = await response.json();
|
|
1818
|
-
return jsonResponse(data, response.status);
|
|
1819
|
-
} catch (e: any) {
|
|
1820
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1821
|
-
}
|
|
1822
|
-
}),
|
|
1823
|
-
});
|
|
1824
|
-
|
|
1825
|
-
http.route({
|
|
1826
|
-
path: "/proxy/deepgram",
|
|
1827
|
-
method: "OPTIONS",
|
|
1828
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1829
|
-
});
|
|
1830
|
-
|
|
1831
|
-
// ==============================================
|
|
1832
|
-
// E2B (Code Sandbox) PROXY
|
|
1833
|
-
// ==============================================
|
|
1834
|
-
http.route({
|
|
1835
|
-
path: "/proxy/e2b",
|
|
1836
|
-
method: "POST",
|
|
1837
|
-
handler: httpAction(async (ctx, request) => {
|
|
1838
|
-
await validateAndLogProxyCall(ctx, request, "e2b", "execute");
|
|
1839
|
-
const E2B_KEY = process.env.E2B_API_KEY;
|
|
1840
|
-
if (!E2B_KEY) {
|
|
1841
|
-
return jsonResponse({ error: "E2B not configured" }, 500);
|
|
1842
|
-
}
|
|
1843
|
-
try {
|
|
1844
|
-
const body = await request.json();
|
|
1845
|
-
const { code, language = "python", template = "base" } = body;
|
|
1846
|
-
if (!code) {
|
|
1847
|
-
return jsonResponse({ error: "code required" }, 400);
|
|
1848
|
-
}
|
|
1849
|
-
const response = await fetch("https://api.e2b.dev/sandboxes", {
|
|
1850
|
-
method: "POST",
|
|
1851
|
-
headers: {
|
|
1852
|
-
"X-API-Key": E2B_KEY,
|
|
1853
|
-
"Content-Type": "application/json",
|
|
1854
|
-
},
|
|
1855
|
-
body: JSON.stringify({ templateID: template, metadata: { language } }),
|
|
1856
|
-
});
|
|
1857
|
-
const sandbox = await response.json();
|
|
1858
|
-
if (!response.ok) {
|
|
1859
|
-
return jsonResponse(sandbox, response.status);
|
|
1860
|
-
}
|
|
1861
|
-
const execResponse = await fetch(
|
|
1862
|
-
`https://api.e2b.dev/sandboxes/${sandbox.sandboxID}/code/execution`,
|
|
1863
|
-
{
|
|
1864
|
-
method: "POST",
|
|
1865
|
-
headers: {
|
|
1866
|
-
"X-API-Key": E2B_KEY,
|
|
1867
|
-
"Content-Type": "application/json",
|
|
1868
|
-
},
|
|
1869
|
-
body: JSON.stringify({ code, language }),
|
|
1870
|
-
}
|
|
1871
|
-
);
|
|
1872
|
-
const result = await execResponse.json();
|
|
1873
|
-
return jsonResponse(result, execResponse.status);
|
|
1874
|
-
} catch (e: any) {
|
|
1875
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1876
|
-
}
|
|
1877
|
-
}),
|
|
1878
|
-
});
|
|
1879
|
-
|
|
1880
|
-
http.route({
|
|
1881
|
-
path: "/proxy/e2b",
|
|
1882
|
-
method: "OPTIONS",
|
|
1883
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1884
|
-
});
|
|
1885
|
-
|
|
1886
|
-
// ==============================================
|
|
1887
|
-
// TOGETHER AI (Open-source LLM Inference) PROXY
|
|
1888
|
-
// ==============================================
|
|
1889
|
-
http.route({
|
|
1890
|
-
path: "/proxy/together",
|
|
1891
|
-
method: "POST",
|
|
1892
|
-
handler: httpAction(async (ctx, request) => {
|
|
1893
|
-
await validateAndLogProxyCall(ctx, request, "together", "chat");
|
|
1894
|
-
const TOGETHER_KEY = process.env.TOGETHER_API_KEY;
|
|
1895
|
-
if (!TOGETHER_KEY) {
|
|
1896
|
-
return jsonResponse({ error: "Together AI not configured" }, 500);
|
|
1897
|
-
}
|
|
1898
|
-
try {
|
|
1899
|
-
const body = await request.json();
|
|
1900
|
-
const { model = "meta-llama/Llama-3.3-70B-Instruct-Turbo", messages, temperature = 0.7, max_tokens = 1024 } = body;
|
|
1901
|
-
if (!messages || !Array.isArray(messages)) {
|
|
1902
|
-
return jsonResponse({ error: "messages array required" }, 400);
|
|
1903
|
-
}
|
|
1904
|
-
const response = await fetch("https://api.together.xyz/v1/chat/completions", {
|
|
1905
|
-
method: "POST",
|
|
1906
|
-
headers: {
|
|
1907
|
-
Authorization: `Bearer ${TOGETHER_KEY}`,
|
|
1908
|
-
"Content-Type": "application/json",
|
|
1909
|
-
},
|
|
1910
|
-
body: JSON.stringify({ model, messages, temperature, max_tokens }),
|
|
1911
|
-
});
|
|
1912
|
-
const data = await response.json();
|
|
1913
|
-
return jsonResponse(data, response.status);
|
|
1914
|
-
} catch (e: any) {
|
|
1915
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1916
|
-
}
|
|
1917
|
-
}),
|
|
1918
|
-
});
|
|
1919
|
-
|
|
1920
|
-
http.route({
|
|
1921
|
-
path: "/proxy/together",
|
|
1922
|
-
method: "OPTIONS",
|
|
1923
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1924
|
-
});
|
|
1925
|
-
|
|
1926
|
-
// ==============================================
|
|
1927
|
-
// STABILITY AI (Image Generation) PROXY
|
|
1928
|
-
// ==============================================
|
|
1929
|
-
http.route({
|
|
1930
|
-
path: "/proxy/stability",
|
|
1931
|
-
method: "POST",
|
|
1932
|
-
handler: httpAction(async (ctx, request) => {
|
|
1933
|
-
await validateAndLogProxyCall(ctx, request, "stability", "generate");
|
|
1934
|
-
const STABILITY_KEY = process.env.STABILITY_API_KEY;
|
|
1935
|
-
if (!STABILITY_KEY) {
|
|
1936
|
-
return jsonResponse({ error: "Stability AI not configured" }, 500);
|
|
1937
|
-
}
|
|
1938
|
-
try {
|
|
1939
|
-
const body = await request.json();
|
|
1940
|
-
const { prompt, model = "sd3.5-large", output_format = "png", aspect_ratio = "1:1" } = body;
|
|
1941
|
-
if (!prompt) {
|
|
1942
|
-
return jsonResponse({ error: "prompt required" }, 400);
|
|
1943
|
-
}
|
|
1944
|
-
const formData = new FormData();
|
|
1945
|
-
formData.append("prompt", prompt);
|
|
1946
|
-
formData.append("output_format", output_format);
|
|
1947
|
-
formData.append("aspect_ratio", aspect_ratio);
|
|
1948
|
-
const response = await fetch(
|
|
1949
|
-
`https://api.stability.ai/v2beta/stable-image/generate/${model}`,
|
|
1950
|
-
{
|
|
1951
|
-
method: "POST",
|
|
1952
|
-
headers: {
|
|
1953
|
-
Authorization: `Bearer ${STABILITY_KEY}`,
|
|
1954
|
-
Accept: "application/json",
|
|
1955
|
-
},
|
|
1956
|
-
body: formData,
|
|
1957
|
-
}
|
|
1958
|
-
);
|
|
1959
|
-
const data = await response.json();
|
|
1960
|
-
return jsonResponse(data, response.status);
|
|
1961
|
-
} catch (e: any) {
|
|
1962
|
-
return jsonResponse({ error: e.message }, 500);
|
|
1963
|
-
}
|
|
1964
|
-
}),
|
|
1965
|
-
});
|
|
1966
|
-
|
|
1967
|
-
http.route({
|
|
1968
|
-
path: "/proxy/stability",
|
|
1969
|
-
method: "OPTIONS",
|
|
1970
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
1971
|
-
});
|
|
1972
|
-
|
|
1973
|
-
// ==============================================
|
|
1974
|
-
// ASSEMBLYAI (Audio Intelligence) PROXY
|
|
1975
|
-
// ==============================================
|
|
1976
|
-
http.route({
|
|
1977
|
-
path: "/proxy/assemblyai",
|
|
1978
|
-
method: "POST",
|
|
1979
|
-
handler: httpAction(async (ctx, request) => {
|
|
1980
|
-
await validateAndLogProxyCall(ctx, request, "assemblyai", "transcribe");
|
|
1981
|
-
const ASSEMBLYAI_KEY = process.env.ASSEMBLYAI_API_KEY;
|
|
1982
|
-
if (!ASSEMBLYAI_KEY) {
|
|
1983
|
-
return jsonResponse({ error: "AssemblyAI not configured" }, 500);
|
|
1984
|
-
}
|
|
1985
|
-
try {
|
|
1986
|
-
const body = await request.json();
|
|
1987
|
-
const { audio_url, language_detection = true, speaker_labels = true } = body;
|
|
1988
|
-
if (!audio_url) {
|
|
1989
|
-
return jsonResponse({ error: "audio_url required" }, 400);
|
|
1990
|
-
}
|
|
1991
|
-
const response = await fetch("https://api.assemblyai.com/v2/transcript", {
|
|
1992
|
-
method: "POST",
|
|
1993
|
-
headers: {
|
|
1994
|
-
Authorization: ASSEMBLYAI_KEY,
|
|
1995
|
-
"Content-Type": "application/json",
|
|
1996
|
-
},
|
|
1997
|
-
body: JSON.stringify({ audio_url, language_detection, speaker_labels }),
|
|
1998
|
-
});
|
|
1999
|
-
const data = await response.json();
|
|
2000
|
-
return jsonResponse(data, response.status);
|
|
2001
|
-
} catch (e: any) {
|
|
2002
|
-
return jsonResponse({ error: e.message }, 500);
|
|
2003
|
-
}
|
|
2004
|
-
}),
|
|
2005
|
-
});
|
|
2006
|
-
|
|
2007
|
-
http.route({
|
|
2008
|
-
path: "/proxy/assemblyai",
|
|
2009
|
-
method: "OPTIONS",
|
|
2010
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2011
|
-
});
|
|
2012
|
-
|
|
2013
|
-
// ==============================================
|
|
2014
|
-
// APILAYER (Multi-API: Exchange, Stocks, Aviation, etc.) PROXY
|
|
2015
|
-
// ==============================================
|
|
2016
|
-
http.route({
|
|
2017
|
-
path: "/proxy/apilayer",
|
|
2018
|
-
method: "POST",
|
|
2019
|
-
handler: httpAction(async (ctx, request) => {
|
|
2020
|
-
await validateAndLogProxyCall(ctx, request, "apilayer", "call");
|
|
2021
|
-
const APILAYER_KEY = process.env.APILAYER_API_KEY;
|
|
2022
|
-
if (!APILAYER_KEY) {
|
|
2023
|
-
return jsonResponse({ error: "APILayer not configured" }, 500);
|
|
2024
|
-
}
|
|
2025
|
-
try {
|
|
2026
|
-
const body = await request.json();
|
|
2027
|
-
const { service, endpoint, params = {} } = body;
|
|
2028
|
-
if (!service || !endpoint) {
|
|
2029
|
-
return jsonResponse({ error: "service and endpoint required (e.g. service:'exchangerates', endpoint:'/latest')" }, 400);
|
|
2030
|
-
}
|
|
2031
|
-
const queryString = new URLSearchParams(params).toString();
|
|
2032
|
-
const url = `https://api.apilayer.com/${service}${endpoint}${queryString ? '?' + queryString : ''}`;
|
|
2033
|
-
const response = await fetch(url, {
|
|
2034
|
-
method: "GET",
|
|
2035
|
-
headers: {
|
|
2036
|
-
apikey: APILAYER_KEY,
|
|
2037
|
-
},
|
|
2038
|
-
});
|
|
2039
|
-
const data = await response.json();
|
|
2040
|
-
return jsonResponse(data, response.status);
|
|
2041
|
-
} catch (e: any) {
|
|
2042
|
-
return jsonResponse({ error: e.message }, 500);
|
|
2043
|
-
}
|
|
2044
|
-
}),
|
|
2045
|
-
});
|
|
2046
|
-
|
|
2047
|
-
http.route({
|
|
2048
|
-
path: "/proxy/apilayer",
|
|
2049
|
-
method: "OPTIONS",
|
|
2050
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2051
|
-
});
|
|
2052
|
-
|
|
2053
|
-
// ==============================================
|
|
2054
|
-
// WORKSPACE / MAGIC LINK ENDPOINTS
|
|
2055
|
-
// ==============================================
|
|
2056
|
-
|
|
2057
|
-
// Create magic link and send email
|
|
2058
|
-
http.route({
|
|
2059
|
-
path: "/workspace/magic-link",
|
|
2060
|
-
method: "POST",
|
|
2061
|
-
handler: httpAction(async (ctx, request) => {
|
|
2062
|
-
try {
|
|
2063
|
-
const body = await request.json();
|
|
2064
|
-
const { email, fingerprint } = body;
|
|
2065
|
-
|
|
2066
|
-
if (!email || !email.includes("@")) {
|
|
2067
|
-
return jsonResponse({ error: "Valid email required" }, 400);
|
|
2068
|
-
}
|
|
2069
|
-
|
|
2070
|
-
// Create magic link
|
|
2071
|
-
const result = await ctx.runMutation(api.workspaces.createMagicLink, {
|
|
2072
|
-
email: email.toLowerCase(),
|
|
2073
|
-
fingerprint,
|
|
2074
|
-
});
|
|
2075
|
-
|
|
2076
|
-
// Send email directly - SIMPLE HTML (complex tables get stripped by Gmail)
|
|
2077
|
-
const verifyUrl = `https://apiclaw.cloud/auth/verify?token=${result.token}`;
|
|
2078
|
-
const html = `<div style="font-family:Arial,sans-serif;max-width:500px;margin:0 auto;padding:20px;">
|
|
2079
|
-
<h1>🦞 APIClaw</h1>
|
|
2080
|
-
<h2>An AI Agent Wants to Connect</h2>
|
|
2081
|
-
<p>Click below to verify your email and activate your workspace.</p>
|
|
2082
|
-
<p><a href="${verifyUrl}" style="background:#ef4444;color:white;padding:14px 32px;border-radius:8px;text-decoration:none;display:inline-block;">Verify Email</a></p>
|
|
2083
|
-
<p style="color:#666;font-size:13px;">Free tier: 50 API calls. This link expires in 1 hour.</p>
|
|
2084
|
-
<p style="color:#999;font-size:11px;">Or copy this link: ${verifyUrl}</p>
|
|
2085
|
-
</div>`;
|
|
2086
|
-
|
|
2087
|
-
const RESEND_KEY = process.env.RESEND_API_KEY;
|
|
2088
|
-
if (!RESEND_KEY) {
|
|
2089
|
-
console.error("RESEND_API_KEY not configured");
|
|
2090
|
-
return jsonResponse({ error: "Email service not configured" }, 500);
|
|
2091
|
-
}
|
|
2092
|
-
|
|
2093
|
-
const emailResponse = await fetch("https://api.resend.com/emails", {
|
|
2094
|
-
method: "POST",
|
|
2095
|
-
headers: {
|
|
2096
|
-
"Authorization": `Bearer ${RESEND_KEY}`,
|
|
2097
|
-
"Content-Type": "application/json",
|
|
2098
|
-
},
|
|
2099
|
-
body: JSON.stringify({
|
|
2100
|
-
from: "APIClaw <noreply@apiclaw.cloud>",
|
|
2101
|
-
to: email.toLowerCase(),
|
|
2102
|
-
subject: "🦞 Verify Your Email — APIClaw",
|
|
2103
|
-
html: html,
|
|
2104
|
-
}),
|
|
2105
|
-
});
|
|
2106
|
-
|
|
2107
|
-
if (!emailResponse.ok) {
|
|
2108
|
-
const errorText = await emailResponse.text();
|
|
2109
|
-
console.error("Resend error:", emailResponse.status, errorText);
|
|
2110
|
-
return jsonResponse({ error: "Failed to send email", details: errorText }, 500);
|
|
2111
|
-
}
|
|
2112
|
-
|
|
2113
|
-
const emailResult = await emailResponse.json();
|
|
2114
|
-
console.log("Email sent successfully:", emailResult.id);
|
|
2115
|
-
|
|
2116
|
-
return jsonResponse({
|
|
2117
|
-
success: true,
|
|
2118
|
-
token: result.token,
|
|
2119
|
-
expiresAt: result.expiresAt,
|
|
2120
|
-
message: "Magic link sent! Check your email.",
|
|
2121
|
-
emailId: emailResult.id,
|
|
2122
|
-
});
|
|
2123
|
-
} catch (e: any) {
|
|
2124
|
-
console.error("Magic link error:", e);
|
|
2125
|
-
return jsonResponse({ error: e.message || "Failed to create magic link" }, 500);
|
|
2126
|
-
}
|
|
2127
|
-
}),
|
|
2128
|
-
});
|
|
2129
|
-
|
|
2130
|
-
http.route({
|
|
2131
|
-
path: "/workspace/magic-link",
|
|
2132
|
-
method: "OPTIONS",
|
|
2133
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2134
|
-
});
|
|
2135
|
-
|
|
2136
|
-
// Poll magic link status (for agents to check if user clicked)
|
|
2137
|
-
http.route({
|
|
2138
|
-
path: "/workspace/poll",
|
|
2139
|
-
method: "GET",
|
|
2140
|
-
handler: httpAction(async (ctx, request) => {
|
|
2141
|
-
const url = new URL(request.url);
|
|
2142
|
-
const token = url.searchParams.get("token");
|
|
2143
|
-
|
|
2144
|
-
if (!token) {
|
|
2145
|
-
return jsonResponse({ error: "token required" }, 400);
|
|
2146
|
-
}
|
|
2147
|
-
|
|
2148
|
-
const result = await ctx.runQuery(api.workspaces.pollMagicLink, { token });
|
|
2149
|
-
return jsonResponse(result);
|
|
2150
|
-
}),
|
|
2151
|
-
});
|
|
2152
|
-
|
|
2153
|
-
http.route({
|
|
2154
|
-
path: "/workspace/poll",
|
|
2155
|
-
method: "OPTIONS",
|
|
2156
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2157
|
-
});
|
|
2158
|
-
|
|
2159
|
-
// Verify session token
|
|
2160
|
-
http.route({
|
|
2161
|
-
path: "/workspace/verify-session",
|
|
2162
|
-
method: "GET",
|
|
2163
|
-
handler: httpAction(async (ctx, request) => {
|
|
2164
|
-
const url = new URL(request.url);
|
|
2165
|
-
const sessionToken = url.searchParams.get("sessionToken");
|
|
2166
|
-
|
|
2167
|
-
if (!sessionToken) {
|
|
2168
|
-
return jsonResponse({ error: "sessionToken required" }, 400);
|
|
2169
|
-
}
|
|
2170
|
-
|
|
2171
|
-
const result = await ctx.runQuery(api.workspaces.verifySession, { sessionToken });
|
|
2172
|
-
|
|
2173
|
-
if (!result) {
|
|
2174
|
-
return jsonResponse({ error: "Invalid or expired session" }, 401);
|
|
2175
|
-
}
|
|
2176
|
-
|
|
2177
|
-
return jsonResponse(result);
|
|
2178
|
-
}),
|
|
2179
|
-
});
|
|
2180
|
-
|
|
2181
|
-
http.route({
|
|
2182
|
-
path: "/workspace/verify-session",
|
|
2183
|
-
method: "OPTIONS",
|
|
2184
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2185
|
-
});
|
|
2186
|
-
|
|
2187
|
-
// Get workspace by email
|
|
2188
|
-
http.route({
|
|
2189
|
-
path: "/workspace/by-email",
|
|
2190
|
-
method: "GET",
|
|
2191
|
-
handler: httpAction(async (ctx, request) => {
|
|
2192
|
-
const url = new URL(request.url);
|
|
2193
|
-
const email = url.searchParams.get("email");
|
|
2194
|
-
|
|
2195
|
-
if (!email) {
|
|
2196
|
-
return jsonResponse({ error: "email required" }, 400);
|
|
2197
|
-
}
|
|
2198
|
-
|
|
2199
|
-
const result = await ctx.runQuery(api.workspaces.getByEmail, { email });
|
|
2200
|
-
|
|
2201
|
-
if (!result) {
|
|
2202
|
-
return jsonResponse({ exists: false });
|
|
2203
|
-
}
|
|
2204
|
-
|
|
2205
|
-
return jsonResponse({ exists: true, workspace: result });
|
|
2206
|
-
}),
|
|
2207
|
-
});
|
|
2208
|
-
|
|
2209
|
-
http.route({
|
|
2210
|
-
path: "/workspace/by-email",
|
|
2211
|
-
method: "OPTIONS",
|
|
2212
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2213
|
-
});
|
|
2214
|
-
|
|
2215
|
-
// Send reminder email
|
|
2216
|
-
http.route({
|
|
2217
|
-
path: "/workspace/send-reminder",
|
|
2218
|
-
method: "POST",
|
|
2219
|
-
handler: httpAction(async (ctx, request) => {
|
|
2220
|
-
try {
|
|
2221
|
-
const body = await request.json();
|
|
2222
|
-
const { email, token } = body;
|
|
2223
|
-
|
|
2224
|
-
if (!email || !token) {
|
|
2225
|
-
return jsonResponse({ error: "email and token required" }, 400);
|
|
2226
|
-
}
|
|
2227
|
-
|
|
2228
|
-
await ctx.runAction(api.email.sendReminderEmail, { email, token });
|
|
2229
|
-
return jsonResponse({ success: true });
|
|
2230
|
-
} catch (e: any) {
|
|
2231
|
-
return jsonResponse({ error: e.message }, 500);
|
|
2232
|
-
}
|
|
2233
|
-
}),
|
|
2234
|
-
});
|
|
2235
|
-
|
|
2236
|
-
http.route({
|
|
2237
|
-
path: "/workspace/send-reminder",
|
|
2238
|
-
method: "OPTIONS",
|
|
2239
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2240
|
-
});
|
|
2241
|
-
|
|
2242
|
-
// ==============================================
|
|
2243
|
-
// STRIPE BILLING ENDPOINTS
|
|
2244
|
-
// ==============================================
|
|
2245
|
-
|
|
2246
|
-
// Create checkout session
|
|
2247
|
-
http.route({
|
|
2248
|
-
path: "/api/billing/checkout",
|
|
2249
|
-
method: "POST",
|
|
2250
|
-
handler: createCheckoutSession,
|
|
2251
|
-
});
|
|
2252
|
-
|
|
2253
|
-
http.route({
|
|
2254
|
-
path: "/api/billing/checkout",
|
|
2255
|
-
method: "OPTIONS",
|
|
2256
|
-
handler: checkoutOptions,
|
|
2257
|
-
});
|
|
2258
|
-
|
|
2259
|
-
// Create billing portal session
|
|
2260
|
-
http.route({
|
|
2261
|
-
path: "/api/billing/portal",
|
|
2262
|
-
method: "POST",
|
|
2263
|
-
handler: createPortalSession,
|
|
2264
|
-
});
|
|
2265
|
-
|
|
2266
|
-
http.route({
|
|
2267
|
-
path: "/api/billing/portal",
|
|
2268
|
-
method: "OPTIONS",
|
|
2269
|
-
handler: portalOptions,
|
|
2270
|
-
});
|
|
2271
|
-
|
|
2272
|
-
// Stripe webhook handler
|
|
2273
|
-
http.route({
|
|
2274
|
-
path: "/api/webhooks/stripe",
|
|
2275
|
-
method: "POST",
|
|
2276
|
-
handler: handleStripeWebhook,
|
|
2277
|
-
});
|
|
2278
|
-
|
|
2279
|
-
http.route({
|
|
2280
|
-
path: "/api/webhooks/stripe",
|
|
2281
|
-
method: "OPTIONS",
|
|
2282
|
-
handler: webhookOptions,
|
|
2283
|
-
});
|
|
2284
|
-
|
|
2285
|
-
// Test endpoint to debug logging
|
|
2286
|
-
http.route({
|
|
2287
|
-
path: "/proxy/test-logging",
|
|
2288
|
-
method: "POST",
|
|
2289
|
-
handler: httpAction(async (ctx, request) => {
|
|
2290
|
-
const identifier = request.headers.get("X-APIClaw-Identifier");
|
|
2291
|
-
|
|
2292
|
-
try {
|
|
2293
|
-
const logId = await ctx.runMutation(api.analytics.log, {
|
|
2294
|
-
event: "test_endpoint",
|
|
2295
|
-
provider: "test",
|
|
2296
|
-
identifier: identifier || "test",
|
|
2297
|
-
metadata: { test: true },
|
|
2298
|
-
});
|
|
2299
|
-
|
|
2300
|
-
return jsonResponse({
|
|
2301
|
-
success: true,
|
|
2302
|
-
identifier,
|
|
2303
|
-
logId,
|
|
2304
|
-
message: "Logged successfully"
|
|
2305
|
-
});
|
|
2306
|
-
} catch (e: any) {
|
|
2307
|
-
return jsonResponse({
|
|
2308
|
-
success: false,
|
|
2309
|
-
error: e.message,
|
|
2310
|
-
stack: e.stack
|
|
2311
|
-
}, 500);
|
|
2312
|
-
}
|
|
2313
|
-
}),
|
|
2314
|
-
});
|
|
2315
|
-
|
|
2316
|
-
// ==============================================
|
|
2317
|
-
// GATEWAY v1 — Unified API Layer for AI Agents
|
|
2318
|
-
// ==============================================
|
|
2319
|
-
// OpenAI-compatible /v1/chat/completions endpoint.
|
|
2320
|
-
// Accepts: Authorization: Bearer sk-claw-...
|
|
2321
|
-
// Routes to the best available LLM provider (OpenRouter by default).
|
|
2322
|
-
// This is what OpenClaw and any agent configures as their API endpoint.
|
|
2323
|
-
// ==============================================
|
|
2324
|
-
|
|
2325
|
-
// Helper: extract Bearer token from Authorization header
|
|
2326
|
-
function extractBearerToken(request: Request): string | null {
|
|
2327
|
-
const auth = request.headers.get("Authorization");
|
|
2328
|
-
if (!auth?.startsWith("Bearer ")) return null;
|
|
2329
|
-
return auth.slice(7);
|
|
2330
|
-
}
|
|
2331
|
-
|
|
2332
|
-
// Helper: require API key auth, return 401 if missing
|
|
2333
|
-
async function requireApiKeyAuth(
|
|
2334
|
-
ctx: any,
|
|
2335
|
-
request: Request
|
|
2336
|
-
): Promise<{ workspaceId: string; keyId: string } | Response> {
|
|
2337
|
-
const auth = await resolveWorkspaceFromRequest(ctx, request);
|
|
2338
|
-
if (auth.authMethod !== "api-key" || !auth.workspaceId || !auth.keyId) {
|
|
2339
|
-
return jsonResponse(
|
|
2340
|
-
{
|
|
2341
|
-
error: {
|
|
2342
|
-
message: "Invalid API key. Generate one at https://apiclaw.cloud/workspace?tab=api-keys",
|
|
2343
|
-
type: "invalid_api_key",
|
|
2344
|
-
code: "invalid_api_key",
|
|
2345
|
-
},
|
|
2346
|
-
},
|
|
2347
|
-
401
|
|
2348
|
-
);
|
|
2349
|
-
}
|
|
2350
|
-
return { workspaceId: auth.workspaceId, keyId: auth.keyId };
|
|
2351
|
-
}
|
|
2352
|
-
|
|
2353
|
-
// /v1/chat/completions — OpenAI-compatible LLM gateway with intelligent routing
|
|
2354
|
-
http.route({
|
|
2355
|
-
path: "/v1/chat/completions",
|
|
2356
|
-
method: "POST",
|
|
2357
|
-
handler: httpAction(async (ctx, request) => {
|
|
2358
|
-
const startTime = Date.now();
|
|
2359
|
-
|
|
2360
|
-
// Require API key auth
|
|
2361
|
-
const authResult = await requireApiKeyAuth(ctx, request);
|
|
2362
|
-
if (authResult instanceof Response) return authResult;
|
|
2363
|
-
const { workspaceId } = authResult;
|
|
2364
|
-
|
|
2365
|
-
// Parse body
|
|
2366
|
-
let body: any;
|
|
2367
|
-
try {
|
|
2368
|
-
body = await request.json();
|
|
2369
|
-
} catch {
|
|
2370
|
-
return jsonResponse({ error: { message: "Invalid JSON body", type: "invalid_request_error" } }, 400);
|
|
2371
|
-
}
|
|
2372
|
-
|
|
2373
|
-
const { model, messages, stream, ...rest } = body;
|
|
2374
|
-
if (!messages || !Array.isArray(messages)) {
|
|
2375
|
-
return jsonResponse({ error: { message: "messages array is required", type: "invalid_request_error" } }, 400);
|
|
2376
|
-
}
|
|
2377
|
-
|
|
2378
|
-
// Request-level overrides (X-APIClaw-Route header)
|
|
2379
|
-
const routeOverride = request.headers.get("X-APIClaw-Route"); // e.g. "fastest" or "groq"
|
|
2380
|
-
|
|
2381
|
-
// Load workspace settings
|
|
2382
|
-
let settings: {
|
|
2383
|
-
routingMode: string;
|
|
2384
|
-
defaultModel: string | null;
|
|
2385
|
-
preferredProviders: string[];
|
|
2386
|
-
blockedProviders: string[];
|
|
2387
|
-
allowOpenRouterFallback: boolean;
|
|
2388
|
-
tier: string;
|
|
2389
|
-
};
|
|
2390
|
-
try {
|
|
2391
|
-
settings = await ctx.runQuery(internal.workspaceSettings.getForRouting, { workspaceId });
|
|
2392
|
-
} catch {
|
|
2393
|
-
settings = {
|
|
2394
|
-
routingMode: "balanced",
|
|
2395
|
-
defaultModel: null,
|
|
2396
|
-
preferredProviders: [],
|
|
2397
|
-
blockedProviders: [],
|
|
2398
|
-
allowOpenRouterFallback: true,
|
|
2399
|
-
tier: "free",
|
|
2400
|
-
};
|
|
2401
|
-
}
|
|
2402
|
-
|
|
2403
|
-
// Apply request-level overrides
|
|
2404
|
-
const effectiveRoutingMode = routeOverride && ["best_price", "highest_quality", "fastest", "balanced"].includes(routeOverride)
|
|
2405
|
-
? routeOverride
|
|
2406
|
-
: settings.routingMode;
|
|
2407
|
-
|
|
2408
|
-
// If routeOverride is a provider name, add it as preferred
|
|
2409
|
-
const effectivePreferred = routeOverride && PROVIDERS[routeOverride]?.isLLM
|
|
2410
|
-
? [routeOverride, ...settings.preferredProviders]
|
|
2411
|
-
: settings.preferredProviders;
|
|
2412
|
-
|
|
2413
|
-
const effectiveModel = model || settings.defaultModel || "anthropic/claude-sonnet-4-6";
|
|
2414
|
-
|
|
2415
|
-
// Route the request (async -- may invoke advisor for intelligent model selection)
|
|
2416
|
-
const route = await routeLLMRequest(effectiveModel, {
|
|
2417
|
-
routingMode: effectiveRoutingMode,
|
|
2418
|
-
preferredProviders: effectivePreferred,
|
|
2419
|
-
blockedProviders: settings.blockedProviders,
|
|
2420
|
-
allowOpenRouterFallback: settings.allowOpenRouterFallback,
|
|
2421
|
-
}, messages);
|
|
2422
|
-
|
|
2423
|
-
if (!route) {
|
|
2424
|
-
return jsonResponse({ error: { message: "No LLM provider available. Check workspace settings.", type: "server_error" } }, 503);
|
|
2425
|
-
}
|
|
2426
|
-
|
|
2427
|
-
// Log usage
|
|
2428
|
-
try {
|
|
2429
|
-
await ctx.runMutation(api.analytics.log, {
|
|
2430
|
-
event: "api_call",
|
|
2431
|
-
provider: "gateway",
|
|
2432
|
-
identifier: workspaceId,
|
|
2433
|
-
workspaceId: workspaceId as any,
|
|
2434
|
-
metadata: {
|
|
2435
|
-
action: "chat_completions",
|
|
2436
|
-
model: effectiveModel,
|
|
2437
|
-
routedTo: route.provider,
|
|
2438
|
-
routeReason: route.reason,
|
|
2439
|
-
authMethod: "api-key",
|
|
2440
|
-
},
|
|
2441
|
-
});
|
|
2442
|
-
await ctx.runMutation(api.logs.createProxyLog, {
|
|
2443
|
-
workspaceId: workspaceId as any,
|
|
2444
|
-
provider: route.provider,
|
|
2445
|
-
action: "chat_completions",
|
|
2446
|
-
subagentId: request.headers.get("X-APIClaw-Subagent") || "main",
|
|
2447
|
-
});
|
|
2448
|
-
await ctx.runMutation(api.workspaces.incrementUsage, {
|
|
2449
|
-
workspaceId: workspaceId as any,
|
|
2450
|
-
});
|
|
2451
|
-
} catch (e: any) {
|
|
2452
|
-
console.error("[Gateway] Logging failed:", e.message);
|
|
2453
|
-
}
|
|
2454
|
-
|
|
2455
|
-
// OAuth passthrough — founder tier can supply their own provider token
|
|
2456
|
-
// Header: X-APIClaw-OAuth: Bearer <token>
|
|
2457
|
-
// Only accepted for founder/partner tiers. Uses caller's token instead of managed key.
|
|
2458
|
-
const oauthPassthrough = request.headers.get("X-APIClaw-OAuth");
|
|
2459
|
-
const isPremiumTier = settings.tier === "founder" || settings.tier === "partner";
|
|
2460
|
-
const effectiveApiKey = (oauthPassthrough && isPremiumTier && route.provider === "openai")
|
|
2461
|
-
? oauthPassthrough.replace(/^Bearer\s+/i, "")
|
|
2462
|
-
: route.apiKey;
|
|
2463
|
-
|
|
2464
|
-
// Forward to the chosen provider
|
|
2465
|
-
try {
|
|
2466
|
-
const isAnthropic = route.provider === "anthropic";
|
|
2467
|
-
let requestBody: any;
|
|
2468
|
-
let headers: Record<string, string>;
|
|
2469
|
-
|
|
2470
|
-
if (isAnthropic) {
|
|
2471
|
-
// Anthropic Messages API format
|
|
2472
|
-
const { body: anthropicBody } = openaiToAnthropicRequest(route.model, messages, rest);
|
|
2473
|
-
if (stream) anthropicBody.stream = true;
|
|
2474
|
-
requestBody = anthropicBody;
|
|
2475
|
-
headers = {
|
|
2476
|
-
"x-api-key": effectiveApiKey,
|
|
2477
|
-
"anthropic-version": "2023-06-01",
|
|
2478
|
-
"Content-Type": "application/json",
|
|
2479
|
-
...(route.extraHeaders || {}),
|
|
2480
|
-
};
|
|
2481
|
-
} else {
|
|
2482
|
-
requestBody = {
|
|
2483
|
-
model: route.model,
|
|
2484
|
-
messages,
|
|
2485
|
-
stream: stream || false,
|
|
2486
|
-
...rest,
|
|
2487
|
-
};
|
|
2488
|
-
headers = {
|
|
2489
|
-
"Authorization": `Bearer ${effectiveApiKey}`,
|
|
2490
|
-
"Content-Type": "application/json",
|
|
2491
|
-
...(route.extraHeaders || {}),
|
|
2492
|
-
};
|
|
2493
|
-
}
|
|
2494
|
-
|
|
2495
|
-
let response = await fetch(route.baseUrl, {
|
|
2496
|
-
method: "POST",
|
|
2497
|
-
headers,
|
|
2498
|
-
body: JSON.stringify(requestBody),
|
|
2499
|
-
});
|
|
2500
|
-
|
|
2501
|
-
// OAuth fallback: if OAuth token fails with 401/403, retry with managed key
|
|
2502
|
-
const usedOAuth = oauthPassthrough && isPremiumTier && route.provider === "openai" && effectiveApiKey !== route.apiKey;
|
|
2503
|
-
if (usedOAuth && (response.status === 401 || response.status === 403)) {
|
|
2504
|
-
console.log(`OAuth token failed (${response.status}), falling back to managed key for ${route.provider}`);
|
|
2505
|
-
headers["Authorization"] = `Bearer ${route.apiKey}`;
|
|
2506
|
-
response = await fetch(route.baseUrl, {
|
|
2507
|
-
method: "POST",
|
|
2508
|
-
headers,
|
|
2509
|
-
body: JSON.stringify(requestBody),
|
|
2510
|
-
});
|
|
2511
|
-
}
|
|
2512
|
-
|
|
2513
|
-
// For streaming responses, proxy the stream directly
|
|
2514
|
-
if (stream && response.body) {
|
|
2515
|
-
return new Response(response.body, {
|
|
2516
|
-
status: response.status,
|
|
2517
|
-
headers: {
|
|
2518
|
-
"Content-Type": response.headers.get("Content-Type") || "text/event-stream",
|
|
2519
|
-
"Cache-Control": "no-cache",
|
|
2520
|
-
"Connection": "keep-alive",
|
|
2521
|
-
...corsHeaders,
|
|
2522
|
-
},
|
|
2523
|
-
});
|
|
2524
|
-
}
|
|
2525
|
-
|
|
2526
|
-
// Non-streaming: return JSON
|
|
2527
|
-
let data = await response.json();
|
|
2528
|
-
|
|
2529
|
-
// Translate Anthropic response to OpenAI format
|
|
2530
|
-
if (isAnthropic && response.ok) {
|
|
2531
|
-
data = anthropicToOpenaiResponse(data, route.model);
|
|
2532
|
-
}
|
|
2533
|
-
const latencyMs = Date.now() - startTime;
|
|
2534
|
-
|
|
2535
|
-
// Calculate cost from token usage
|
|
2536
|
-
const usage = (data as any)?.usage;
|
|
2537
|
-
const { providerCost, apiclawCost } = calculateCallCost(route.model, usage);
|
|
2538
|
-
|
|
2539
|
-
// Log cost to usage records (fire and forget)
|
|
2540
|
-
if (apiclawCost > 0) {
|
|
2541
|
-
ctx.runMutation(internal.billing.logCallCost, {
|
|
2542
|
-
workspaceId: workspaceId as any,
|
|
2543
|
-
provider: route.provider,
|
|
2544
|
-
model: route.model,
|
|
2545
|
-
providerCostUsd: providerCost,
|
|
2546
|
-
apiclawCostUsd: apiclawCost,
|
|
2547
|
-
inputTokens: usage?.prompt_tokens || 0,
|
|
2548
|
-
outputTokens: usage?.completion_tokens || 0,
|
|
2549
|
-
}).catch(() => {});
|
|
2550
|
-
}
|
|
2551
|
-
|
|
2552
|
-
// Add APIClaw metadata
|
|
2553
|
-
if (data && typeof data === "object") {
|
|
2554
|
-
(data as any)._apiclaw = {
|
|
2555
|
-
latencyMs,
|
|
2556
|
-
provider: route.provider,
|
|
2557
|
-
routeReason: route.reason,
|
|
2558
|
-
model: route.model,
|
|
2559
|
-
gateway: "v1",
|
|
2560
|
-
cost: {
|
|
2561
|
-
providerUsd: Math.round(providerCost * 1_000_000) / 1_000_000,
|
|
2562
|
-
totalUsd: Math.round(apiclawCost * 1_000_000) / 1_000_000,
|
|
2563
|
-
margin: "15%",
|
|
2564
|
-
},
|
|
2565
|
-
};
|
|
2566
|
-
}
|
|
2567
|
-
|
|
2568
|
-
return jsonResponse(data, response.status);
|
|
2569
|
-
} catch (e: any) {
|
|
2570
|
-
return jsonResponse({ error: { message: e.message, type: "server_error" } }, 500);
|
|
2571
|
-
}
|
|
2572
|
-
}),
|
|
2573
|
-
});
|
|
2574
|
-
|
|
2575
|
-
http.route({
|
|
2576
|
-
path: "/v1/chat/completions",
|
|
2577
|
-
method: "OPTIONS",
|
|
2578
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2579
|
-
});
|
|
2580
|
-
|
|
2581
|
-
// ==============================================
|
|
2582
|
-
// /v1/embeddings — OpenAI-compatible embedding gateway
|
|
2583
|
-
// ==============================================
|
|
2584
|
-
// Accepts: Authorization: Bearer sk-claw-...
|
|
2585
|
-
// Routes by model prefix to Direct Call embedding providers:
|
|
2586
|
-
// voyage/* → Voyage AI (default: voyage-3-large)
|
|
2587
|
-
// mistral/* → Mistral (mistral-embed)
|
|
2588
|
-
// openai/* → OpenAI (text-embedding-3-small, -large, ada-002)
|
|
2589
|
-
// cohere/* → Cohere (embed-v4.0, embed-multilingual-v3) — translated
|
|
2590
|
-
// Unprefixed model strings auto-route by known model names.
|
|
2591
|
-
// ==============================================
|
|
2592
|
-
|
|
2593
|
-
type EmbeddingBackend = {
|
|
2594
|
-
provider: "voyage" | "mistral" | "openai" | "cohere";
|
|
2595
|
-
baseUrl: string;
|
|
2596
|
-
apiKey: string | undefined;
|
|
2597
|
-
model: string;
|
|
2598
|
-
format: "openai" | "cohere";
|
|
2599
|
-
};
|
|
2600
|
-
|
|
2601
|
-
// Map a model string to a backend. Supports prefixed (voyage/voyage-3-large)
|
|
2602
|
-
// and bare model names (text-embedding-3-small, mistral-embed, voyage-3-large).
|
|
2603
|
-
function resolveEmbeddingBackend(requestedModel: string | undefined): EmbeddingBackend | null {
|
|
2604
|
-
const raw = (requestedModel || "voyage/voyage-3-large").trim();
|
|
2605
|
-
let provider: EmbeddingBackend["provider"] | null = null;
|
|
2606
|
-
let model = raw;
|
|
2607
|
-
|
|
2608
|
-
// Explicit prefix
|
|
2609
|
-
if (raw.startsWith("voyage/")) {
|
|
2610
|
-
provider = "voyage";
|
|
2611
|
-
model = raw.slice(7);
|
|
2612
|
-
} else if (raw.startsWith("mistral/")) {
|
|
2613
|
-
provider = "mistral";
|
|
2614
|
-
model = raw.slice(8);
|
|
2615
|
-
} else if (raw.startsWith("openai/")) {
|
|
2616
|
-
provider = "openai";
|
|
2617
|
-
model = raw.slice(7);
|
|
2618
|
-
} else if (raw.startsWith("cohere/")) {
|
|
2619
|
-
provider = "cohere";
|
|
2620
|
-
model = raw.slice(7);
|
|
2621
|
-
} else {
|
|
2622
|
-
// Auto-detect from bare model name
|
|
2623
|
-
if (raw.startsWith("voyage-")) provider = "voyage";
|
|
2624
|
-
else if (raw.startsWith("mistral-embed") || raw === "mistral-embed") provider = "mistral";
|
|
2625
|
-
else if (raw.startsWith("text-embedding-") || raw.startsWith("ada-")) provider = "openai";
|
|
2626
|
-
else if (raw.startsWith("embed-")) provider = "cohere";
|
|
2627
|
-
else return null;
|
|
2628
|
-
}
|
|
2629
|
-
|
|
2630
|
-
switch (provider) {
|
|
2631
|
-
case "voyage":
|
|
2632
|
-
return {
|
|
2633
|
-
provider,
|
|
2634
|
-
baseUrl: "https://api.voyageai.com/v1/embeddings",
|
|
2635
|
-
apiKey: process.env.VOYAGE_API_KEY,
|
|
2636
|
-
model: model || "voyage-3-large",
|
|
2637
|
-
format: "openai",
|
|
2638
|
-
};
|
|
2639
|
-
case "mistral":
|
|
2640
|
-
return {
|
|
2641
|
-
provider,
|
|
2642
|
-
baseUrl: "https://api.mistral.ai/v1/embeddings",
|
|
2643
|
-
apiKey: process.env.MISTRAL_API_KEY,
|
|
2644
|
-
model: model || "mistral-embed",
|
|
2645
|
-
format: "openai",
|
|
2646
|
-
};
|
|
2647
|
-
case "openai":
|
|
2648
|
-
return {
|
|
2649
|
-
provider,
|
|
2650
|
-
baseUrl: "https://api.openai.com/v1/embeddings",
|
|
2651
|
-
apiKey: process.env.OPENAI_API_KEY,
|
|
2652
|
-
model: model || "text-embedding-3-small",
|
|
2653
|
-
format: "openai",
|
|
2654
|
-
};
|
|
2655
|
-
case "cohere":
|
|
2656
|
-
return {
|
|
2657
|
-
provider,
|
|
2658
|
-
baseUrl: "https://api.cohere.com/v2/embed",
|
|
2659
|
-
apiKey: process.env.COHERE_API_KEY,
|
|
2660
|
-
model: model || "embed-v4.0",
|
|
2661
|
-
format: "cohere",
|
|
2662
|
-
};
|
|
2663
|
-
}
|
|
2664
|
-
}
|
|
2665
|
-
|
|
2666
|
-
// /v1/embeddings — POST
|
|
2667
|
-
http.route({
|
|
2668
|
-
path: "/v1/embeddings",
|
|
2669
|
-
method: "POST",
|
|
2670
|
-
handler: httpAction(async (ctx, request) => {
|
|
2671
|
-
const startTime = Date.now();
|
|
2672
|
-
|
|
2673
|
-
const authResult = await requireApiKeyAuth(ctx, request);
|
|
2674
|
-
if (authResult instanceof Response) return authResult;
|
|
2675
|
-
const { workspaceId } = authResult;
|
|
2676
|
-
|
|
2677
|
-
let body: any;
|
|
2678
|
-
try {
|
|
2679
|
-
body = await request.json();
|
|
2680
|
-
} catch {
|
|
2681
|
-
return jsonResponse({ error: { message: "Invalid JSON body", type: "invalid_request_error" } }, 400);
|
|
2682
|
-
}
|
|
2683
|
-
|
|
2684
|
-
const { model, input, encoding_format, dimensions, user, input_type } = body;
|
|
2685
|
-
if (input === undefined || input === null) {
|
|
2686
|
-
return jsonResponse({ error: { message: "input is required", type: "invalid_request_error" } }, 400);
|
|
2687
|
-
}
|
|
2688
|
-
|
|
2689
|
-
const backend = resolveEmbeddingBackend(model);
|
|
2690
|
-
if (!backend) {
|
|
2691
|
-
return jsonResponse(
|
|
2692
|
-
{ error: { message: `Unknown embedding model: ${model}. Use voyage/*, mistral/*, openai/*, or cohere/* prefix.`, type: "invalid_request_error" } },
|
|
2693
|
-
400
|
|
2694
|
-
);
|
|
2695
|
-
}
|
|
2696
|
-
if (!backend.apiKey) {
|
|
2697
|
-
return jsonResponse(
|
|
2698
|
-
{ error: { message: `Provider ${backend.provider} is not configured (missing ${backend.provider.toUpperCase()}_API_KEY).`, type: "server_error" } },
|
|
2699
|
-
503
|
|
2700
|
-
);
|
|
2701
|
-
}
|
|
2702
|
-
|
|
2703
|
-
// Log usage
|
|
2704
|
-
try {
|
|
2705
|
-
await ctx.runMutation(api.analytics.log, {
|
|
2706
|
-
event: "api_call",
|
|
2707
|
-
provider: "gateway",
|
|
2708
|
-
identifier: workspaceId,
|
|
2709
|
-
workspaceId: workspaceId as any,
|
|
2710
|
-
metadata: {
|
|
2711
|
-
action: "embeddings",
|
|
2712
|
-
model: `${backend.provider}/${backend.model}`,
|
|
2713
|
-
routedTo: backend.provider,
|
|
2714
|
-
authMethod: "api-key",
|
|
2715
|
-
},
|
|
2716
|
-
});
|
|
2717
|
-
await ctx.runMutation(api.logs.createProxyLog, {
|
|
2718
|
-
workspaceId: workspaceId as any,
|
|
2719
|
-
provider: backend.provider,
|
|
2720
|
-
action: "embeddings",
|
|
2721
|
-
subagentId: request.headers.get("X-APIClaw-Subagent") || "main",
|
|
2722
|
-
});
|
|
2723
|
-
await ctx.runMutation(api.workspaces.incrementUsage, {
|
|
2724
|
-
workspaceId: workspaceId as any,
|
|
2725
|
-
});
|
|
2726
|
-
} catch (e: any) {
|
|
2727
|
-
console.error("[Gateway] Embeddings logging failed:", e.message);
|
|
2728
|
-
}
|
|
2729
|
-
|
|
2730
|
-
try {
|
|
2731
|
-
let providerRequestBody: any;
|
|
2732
|
-
let providerHeaders: Record<string, string> = {
|
|
2733
|
-
"Content-Type": "application/json",
|
|
2734
|
-
"Authorization": `Bearer ${backend.apiKey}`,
|
|
2735
|
-
};
|
|
2736
|
-
|
|
2737
|
-
if (backend.format === "openai") {
|
|
2738
|
-
// OpenAI-compatible passthrough (Voyage, Mistral, OpenAI)
|
|
2739
|
-
providerRequestBody = {
|
|
2740
|
-
model: backend.model,
|
|
2741
|
-
input,
|
|
2742
|
-
...(encoding_format !== undefined ? { encoding_format } : {}),
|
|
2743
|
-
...(dimensions !== undefined ? { dimensions } : {}),
|
|
2744
|
-
...(user !== undefined ? { user } : {}),
|
|
2745
|
-
...(input_type !== undefined ? { input_type } : {}),
|
|
2746
|
-
};
|
|
2747
|
-
} else {
|
|
2748
|
-
// Cohere v2 format
|
|
2749
|
-
const texts = Array.isArray(input) ? input : [String(input)];
|
|
2750
|
-
providerRequestBody = {
|
|
2751
|
-
model: backend.model,
|
|
2752
|
-
texts,
|
|
2753
|
-
input_type: input_type || "search_document",
|
|
2754
|
-
embedding_types: ["float"],
|
|
2755
|
-
};
|
|
2756
|
-
}
|
|
2757
|
-
|
|
2758
|
-
const response = await fetch(backend.baseUrl, {
|
|
2759
|
-
method: "POST",
|
|
2760
|
-
headers: providerHeaders,
|
|
2761
|
-
body: JSON.stringify(providerRequestBody),
|
|
2762
|
-
});
|
|
2763
|
-
|
|
2764
|
-
const providerData = await response.json();
|
|
2765
|
-
const latencyMs = Date.now() - startTime;
|
|
2766
|
-
|
|
2767
|
-
if (!response.ok) {
|
|
2768
|
-
return jsonResponse(
|
|
2769
|
-
{
|
|
2770
|
-
error: {
|
|
2771
|
-
message: (providerData as any)?.error?.message || (providerData as any)?.message || `${backend.provider} error`,
|
|
2772
|
-
type: "provider_error",
|
|
2773
|
-
provider: backend.provider,
|
|
2774
|
-
},
|
|
2775
|
-
_apiclaw: { latencyMs, provider: backend.provider, gateway: "v1" },
|
|
2776
|
-
},
|
|
2777
|
-
response.status
|
|
2778
|
-
);
|
|
2779
|
-
}
|
|
2780
|
-
|
|
2781
|
-
// Normalize Cohere response to OpenAI format
|
|
2782
|
-
let openAIData: any;
|
|
2783
|
-
if (backend.format === "cohere") {
|
|
2784
|
-
const cohereEmbeddings: number[][] = (providerData as any)?.embeddings?.float || (providerData as any)?.embeddings || [];
|
|
2785
|
-
openAIData = {
|
|
2786
|
-
object: "list",
|
|
2787
|
-
data: cohereEmbeddings.map((embedding, index) => ({
|
|
2788
|
-
object: "embedding",
|
|
2789
|
-
embedding,
|
|
2790
|
-
index,
|
|
2791
|
-
})),
|
|
2792
|
-
model: `cohere/${backend.model}`,
|
|
2793
|
-
usage: {
|
|
2794
|
-
prompt_tokens: (providerData as any)?.meta?.billed_units?.input_tokens || 0,
|
|
2795
|
-
total_tokens: (providerData as any)?.meta?.billed_units?.input_tokens || 0,
|
|
2796
|
-
},
|
|
2797
|
-
};
|
|
2798
|
-
} else {
|
|
2799
|
-
// Already OpenAI-format
|
|
2800
|
-
openAIData = providerData;
|
|
2801
|
-
if (openAIData && typeof openAIData === "object" && !openAIData.model) {
|
|
2802
|
-
openAIData.model = `${backend.provider}/${backend.model}`;
|
|
2803
|
-
}
|
|
2804
|
-
}
|
|
2805
|
-
|
|
2806
|
-
if (openAIData && typeof openAIData === "object") {
|
|
2807
|
-
openAIData._apiclaw = {
|
|
2808
|
-
latencyMs,
|
|
2809
|
-
provider: backend.provider,
|
|
2810
|
-
model: backend.model,
|
|
2811
|
-
gateway: "v1",
|
|
2812
|
-
};
|
|
2813
|
-
}
|
|
2814
|
-
|
|
2815
|
-
return jsonResponse(openAIData, 200);
|
|
2816
|
-
} catch (e: any) {
|
|
2817
|
-
return jsonResponse({ error: { message: e.message, type: "server_error" } }, 500);
|
|
2818
|
-
}
|
|
2819
|
-
}),
|
|
2820
|
-
});
|
|
2821
|
-
|
|
2822
|
-
http.route({
|
|
2823
|
-
path: "/v1/embeddings",
|
|
2824
|
-
method: "OPTIONS",
|
|
2825
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
2826
|
-
});
|
|
2827
|
-
|
|
2828
|
-
// ==============================================
|
|
2829
|
-
// /v1/execute — Unified execution gateway
|
|
2830
|
-
// ==============================================
|
|
2831
|
-
// Single endpoint for ALL API call types:
|
|
2832
|
-
// 1. Managed providers (19 providers, APIClaw owns keys)
|
|
2833
|
-
// 2. LLM routing (Groq, Mistral, Together, OpenRouter)
|
|
2834
|
-
// 3. Open APIs (generic HTTP proxy with caller-supplied baseUrl)
|
|
2835
|
-
//
|
|
2836
|
-
// Auth: Bearer sk-claw-... OR X-APIClaw-Internal (server-to-server)
|
|
2837
|
-
// ==============================================
|
|
2838
|
-
|
|
2839
|
-
// Managed provider dispatch: maps provider+action to an upstream HTTP call
|
|
2840
|
-
// Returns { url, method, headers, body } or null if unknown
|
|
2841
|
-
function buildManagedRequest(
|
|
2842
|
-
provider: string,
|
|
2843
|
-
action: string,
|
|
2844
|
-
params: Record<string, any>
|
|
2845
|
-
): { url: string; method: string; headers: Record<string, string>; body?: string } | null {
|
|
2846
|
-
const meta = PROVIDERS[provider];
|
|
2847
|
-
if (!meta?.envKey) return null;
|
|
2848
|
-
|
|
2849
|
-
const apiKey = process.env[meta.envKey];
|
|
2850
|
-
if (!apiKey) return null;
|
|
2851
|
-
|
|
2852
|
-
// Provider-specific request builders
|
|
2853
|
-
switch (provider) {
|
|
2854
|
-
case "brave_search": {
|
|
2855
|
-
if (action !== "search") return null;
|
|
2856
|
-
const url = new URL("https://api.search.brave.com/res/v1/web/search");
|
|
2857
|
-
url.searchParams.set("q", params.query || "");
|
|
2858
|
-
url.searchParams.set("count", String(params.count || 10));
|
|
2859
|
-
return { url: url.toString(), method: "GET", headers: { "X-Subscription-Token": apiKey } };
|
|
2860
|
-
}
|
|
2861
|
-
case "serper": {
|
|
2862
|
-
if (action !== "search") return null;
|
|
2863
|
-
return {
|
|
2864
|
-
url: "https://google.serper.dev/search",
|
|
2865
|
-
method: "POST",
|
|
2866
|
-
headers: { "X-API-KEY": apiKey, "Content-Type": "application/json" },
|
|
2867
|
-
body: JSON.stringify({ q: params.query || params.q, num: params.num || 10 }),
|
|
2868
|
-
};
|
|
2869
|
-
}
|
|
2870
|
-
case "resend": {
|
|
2871
|
-
if (action !== "send_email") return null;
|
|
2872
|
-
return {
|
|
2873
|
-
url: "https://api.resend.com/emails",
|
|
2874
|
-
method: "POST",
|
|
2875
|
-
headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json" },
|
|
2876
|
-
body: JSON.stringify(params),
|
|
2877
|
-
};
|
|
2878
|
-
}
|
|
2879
|
-
case "elevenlabs": {
|
|
2880
|
-
if (action !== "text_to_speech") return null;
|
|
2881
|
-
const voiceId = params.voice_id || "21m00Tcm4TlvDq8ikWAM";
|
|
2882
|
-
return {
|
|
2883
|
-
url: `https://api.elevenlabs.io/v1/text-to-speech/${voiceId}`,
|
|
2884
|
-
method: "POST",
|
|
2885
|
-
headers: { "xi-api-key": apiKey, "Content-Type": "application/json" },
|
|
2886
|
-
body: JSON.stringify({
|
|
2887
|
-
text: params.text,
|
|
2888
|
-
model_id: params.model_id || "eleven_multilingual_v2",
|
|
2889
|
-
voice_settings: params.voice_settings || { stability: 0.5, similarity_boost: 0.75 },
|
|
2890
|
-
}),
|
|
2891
|
-
};
|
|
2892
|
-
}
|
|
2893
|
-
case "deepgram": {
|
|
2894
|
-
if (action !== "transcribe") return null;
|
|
2895
|
-
const dgUrl = new URL("https://api.deepgram.com/v1/listen");
|
|
2896
|
-
if (params.language) dgUrl.searchParams.set("language", params.language);
|
|
2897
|
-
if (params.model) dgUrl.searchParams.set("model", params.model);
|
|
2898
|
-
dgUrl.searchParams.set("smart_format", "true");
|
|
2899
|
-
return {
|
|
2900
|
-
url: dgUrl.toString(),
|
|
2901
|
-
method: "POST",
|
|
2902
|
-
headers: { "Authorization": `Token ${apiKey}`, "Content-Type": "application/json" },
|
|
2903
|
-
body: JSON.stringify({ url: params.url || params.audio_url }),
|
|
2904
|
-
};
|
|
2905
|
-
}
|
|
2906
|
-
case "firecrawl": {
|
|
2907
|
-
const firecrawlActions: Record<string, string> = {
|
|
2908
|
-
scrape: "https://api.firecrawl.dev/v1/scrape",
|
|
2909
|
-
crawl: "https://api.firecrawl.dev/v1/crawl",
|
|
2910
|
-
map: "https://api.firecrawl.dev/v1/map",
|
|
2911
|
-
};
|
|
2912
|
-
const fUrl = firecrawlActions[action];
|
|
2913
|
-
if (!fUrl) return null;
|
|
2914
|
-
return {
|
|
2915
|
-
url: fUrl,
|
|
2916
|
-
method: "POST",
|
|
2917
|
-
headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json" },
|
|
2918
|
-
body: JSON.stringify(params),
|
|
2919
|
-
};
|
|
2920
|
-
}
|
|
2921
|
-
case "replicate": {
|
|
2922
|
-
if (action !== "run") return null;
|
|
2923
|
-
return {
|
|
2924
|
-
url: "https://api.replicate.com/v1/predictions",
|
|
2925
|
-
method: "POST",
|
|
2926
|
-
headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json" },
|
|
2927
|
-
body: JSON.stringify({ version: params.version, input: params.input || params }),
|
|
2928
|
-
};
|
|
2929
|
-
}
|
|
2930
|
-
case "stability": {
|
|
2931
|
-
if (action !== "generate") return null;
|
|
2932
|
-
return {
|
|
2933
|
-
url: "https://api.stability.ai/v2beta/stable-image/generate/sd3",
|
|
2934
|
-
method: "POST",
|
|
2935
|
-
headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json", "Accept": "application/json" },
|
|
2936
|
-
body: JSON.stringify(params),
|
|
2937
|
-
};
|
|
2938
|
-
}
|
|
2939
|
-
case "github": {
|
|
2940
|
-
const ghHeaders = { "Authorization": `Bearer ${apiKey}`, "Accept": "application/vnd.github.v3+json", "User-Agent": "APIClaw-Gateway" };
|
|
2941
|
-
if (action === "search_repos") {
|
|
2942
|
-
const ghUrl = new URL("https://api.github.com/search/repositories");
|
|
2943
|
-
ghUrl.searchParams.set("q", params.query || params.q || "");
|
|
2944
|
-
return { url: ghUrl.toString(), method: "GET", headers: ghHeaders };
|
|
2945
|
-
}
|
|
2946
|
-
if (action === "get_repo") {
|
|
2947
|
-
return { url: `https://api.github.com/repos/${params.owner}/${params.repo}`, method: "GET", headers: ghHeaders };
|
|
2948
|
-
}
|
|
2949
|
-
if (action === "get_file") {
|
|
2950
|
-
return { url: `https://api.github.com/repos/${params.owner}/${params.repo}/contents/${params.path}`, method: "GET", headers: ghHeaders };
|
|
2951
|
-
}
|
|
2952
|
-
return null;
|
|
2953
|
-
}
|
|
2954
|
-
case "e2b": {
|
|
2955
|
-
// E2B sandbox execution is complex (create sandbox, then run code). Simplified for gateway.
|
|
2956
|
-
if (action !== "run_code") return null;
|
|
2957
|
-
return {
|
|
2958
|
-
url: "https://api.e2b.dev/v1/sandboxes",
|
|
2959
|
-
method: "POST",
|
|
2960
|
-
headers: { "X-API-Key": apiKey, "Content-Type": "application/json" },
|
|
2961
|
-
body: JSON.stringify({ template: params.template || "base", ...params }),
|
|
2962
|
-
};
|
|
2963
|
-
}
|
|
2964
|
-
case "46elks": {
|
|
2965
|
-
if (action !== "send_sms") return null;
|
|
2966
|
-
// 46elks uses Basic auth with username:password (envKey has format user:pass)
|
|
2967
|
-
const [user, pass] = apiKey.includes(":") ? apiKey.split(":") : [apiKey, ""];
|
|
2968
|
-
const basicAuth = typeof btoa !== "undefined" ? btoa(`${user}:${pass}`) : Buffer.from(`${user}:${pass}`).toString("base64");
|
|
2969
|
-
return {
|
|
2970
|
-
url: "https://api.46elks.com/a1/sms",
|
|
2971
|
-
method: "POST",
|
|
2972
|
-
headers: { "Authorization": `Basic ${basicAuth}`, "Content-Type": "application/x-www-form-urlencoded" },
|
|
2973
|
-
body: new URLSearchParams({ from: params.from || "APIClaw", to: params.to, message: params.message }).toString(),
|
|
2974
|
-
};
|
|
2975
|
-
}
|
|
2976
|
-
case "twilio": {
|
|
2977
|
-
// Twilio uses Basic auth. envKey format: accountSid:authToken
|
|
2978
|
-
const [sid, token] = apiKey.includes(":") ? apiKey.split(":") : [apiKey, ""];
|
|
2979
|
-
const twilioAuth = typeof btoa !== "undefined" ? btoa(`${sid}:${token}`) : Buffer.from(`${sid}:${token}`).toString("base64");
|
|
2980
|
-
return {
|
|
2981
|
-
url: `https://api.twilio.com/2010-04-01/Accounts/${sid}/Messages.json`,
|
|
2982
|
-
method: "POST",
|
|
2983
|
-
headers: { "Authorization": `Basic ${twilioAuth}`, "Content-Type": "application/x-www-form-urlencoded" },
|
|
2984
|
-
body: new URLSearchParams({ From: params.from, To: params.to, Body: params.message }).toString(),
|
|
2985
|
-
};
|
|
2986
|
-
}
|
|
2987
|
-
case "assemblyai": {
|
|
2988
|
-
if (action !== "transcribe") return null;
|
|
2989
|
-
return {
|
|
2990
|
-
url: "https://api.assemblyai.com/v2/transcript",
|
|
2991
|
-
method: "POST",
|
|
2992
|
-
headers: { "Authorization": apiKey, "Content-Type": "application/json" },
|
|
2993
|
-
body: JSON.stringify({ audio_url: params.url || params.audio_url, ...params }),
|
|
2994
|
-
};
|
|
2995
|
-
}
|
|
2996
|
-
case "anthropic": {
|
|
2997
|
-
if (action === "chat" || action === "messages") {
|
|
2998
|
-
return {
|
|
2999
|
-
url: "https://api.anthropic.com/v1/messages",
|
|
3000
|
-
method: "POST",
|
|
3001
|
-
headers: { "x-api-key": apiKey, "anthropic-version": "2023-06-01", "Content-Type": "application/json" },
|
|
3002
|
-
body: JSON.stringify(params),
|
|
3003
|
-
};
|
|
3004
|
-
}
|
|
3005
|
-
return null;
|
|
3006
|
-
}
|
|
3007
|
-
case "cohere": {
|
|
3008
|
-
if (action === "chat") {
|
|
3009
|
-
return {
|
|
3010
|
-
url: "https://api.cohere.com/v2/chat",
|
|
3011
|
-
method: "POST",
|
|
3012
|
-
headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json" },
|
|
3013
|
-
body: JSON.stringify(params),
|
|
3014
|
-
};
|
|
3015
|
-
}
|
|
3016
|
-
if (action === "rerank") {
|
|
3017
|
-
return {
|
|
3018
|
-
url: "https://api.cohere.com/v2/rerank",
|
|
3019
|
-
method: "POST",
|
|
3020
|
-
headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json" },
|
|
3021
|
-
body: JSON.stringify(params),
|
|
3022
|
-
};
|
|
3023
|
-
}
|
|
3024
|
-
return null;
|
|
3025
|
-
}
|
|
3026
|
-
default:
|
|
3027
|
-
return null;
|
|
3028
|
-
}
|
|
3029
|
-
}
|
|
3030
|
-
|
|
3031
|
-
// Resolve auth for /v1/execute: supports both sk-claw- keys and X-APIClaw-Internal
|
|
3032
|
-
async function resolveExecuteAuth(
|
|
3033
|
-
ctx: any,
|
|
3034
|
-
request: Request
|
|
3035
|
-
): Promise<{ workspaceId?: string; keyId?: string; authMethod: "api-key" | "internal" | "anonymous" } | Response> {
|
|
3036
|
-
// 1. Check internal server-to-server auth
|
|
3037
|
-
const internalSecret = request.headers.get("X-APIClaw-Internal");
|
|
3038
|
-
if (internalSecret) {
|
|
3039
|
-
const expectedSecret = process.env.APICLAW_INTERNAL_SECRET;
|
|
3040
|
-
if (!expectedSecret || internalSecret !== expectedSecret) {
|
|
3041
|
-
return jsonResponse({ error: { message: "Invalid internal secret", type: "auth_error" } }, 401);
|
|
3042
|
-
}
|
|
3043
|
-
// Internal auth: extract workspace from body or header
|
|
3044
|
-
const workspaceHeader = request.headers.get("X-APIClaw-Workspace");
|
|
3045
|
-
return { workspaceId: workspaceHeader || undefined, authMethod: "internal" };
|
|
3046
|
-
}
|
|
3047
|
-
|
|
3048
|
-
// 2. Check for API key auth (Bearer sk-claw-...)
|
|
3049
|
-
const auth = await resolveWorkspaceFromRequest(ctx, request);
|
|
3050
|
-
if (auth.authMethod === "api-key" && auth.workspaceId && auth.keyId) {
|
|
3051
|
-
return { workspaceId: auth.workspaceId, keyId: auth.keyId, authMethod: "api-key" };
|
|
3052
|
-
}
|
|
3053
|
-
|
|
3054
|
-
// 3. No valid auth
|
|
3055
|
-
return jsonResponse(
|
|
3056
|
-
{ error: { message: "Authentication required. Use Bearer sk-claw-... or X-APIClaw-Internal header.", type: "auth_error" } },
|
|
3057
|
-
401
|
|
3058
|
-
);
|
|
3059
|
-
}
|
|
3060
|
-
|
|
3061
|
-
http.route({
|
|
3062
|
-
path: "/v1/execute",
|
|
3063
|
-
method: "POST",
|
|
3064
|
-
handler: httpAction(async (ctx, request) => {
|
|
3065
|
-
const startTime = Date.now();
|
|
3066
|
-
|
|
3067
|
-
// Auth
|
|
3068
|
-
const authResult = await resolveExecuteAuth(ctx, request);
|
|
3069
|
-
if (authResult instanceof Response) return authResult;
|
|
3070
|
-
const { workspaceId, authMethod } = authResult;
|
|
3071
|
-
|
|
3072
|
-
// Parse body
|
|
3073
|
-
let body: any;
|
|
3074
|
-
try {
|
|
3075
|
-
body = await request.json();
|
|
3076
|
-
} catch {
|
|
3077
|
-
return jsonResponse({ error: { message: "Invalid JSON body", type: "invalid_request" } }, 400);
|
|
3078
|
-
}
|
|
3079
|
-
|
|
3080
|
-
const { provider, action, params = {} } = body;
|
|
3081
|
-
if (!provider) {
|
|
3082
|
-
return jsonResponse({ error: { message: "provider is required", type: "invalid_request" } }, 400);
|
|
3083
|
-
}
|
|
3084
|
-
if (!action) {
|
|
3085
|
-
return jsonResponse({ error: { message: "action is required", type: "invalid_request" } }, 400);
|
|
3086
|
-
}
|
|
3087
|
-
|
|
3088
|
-
const subagentId = request.headers.get("X-APIClaw-Subagent") || "main";
|
|
3089
|
-
|
|
3090
|
-
// Determine execution path
|
|
3091
|
-
let routeDetail = "";
|
|
3092
|
-
|
|
3093
|
-
// Path 1: LLM routing (provider "auto" or known LLM provider with action "chat")
|
|
3094
|
-
const isLLMRequest = action === "chat" && (
|
|
3095
|
-
provider === "auto" ||
|
|
3096
|
-
(PROVIDERS[provider]?.isLLM === true)
|
|
3097
|
-
);
|
|
3098
|
-
|
|
3099
|
-
if (isLLMRequest) {
|
|
3100
|
-
// LLM routing path
|
|
3101
|
-
|
|
3102
|
-
// Load workspace settings for routing
|
|
3103
|
-
let settings = {
|
|
3104
|
-
routingMode: "balanced",
|
|
3105
|
-
defaultModel: null as string | null,
|
|
3106
|
-
preferredProviders: [] as string[],
|
|
3107
|
-
blockedProviders: [] as string[],
|
|
3108
|
-
allowOpenRouterFallback: true,
|
|
3109
|
-
};
|
|
3110
|
-
if (workspaceId) {
|
|
3111
|
-
try {
|
|
3112
|
-
settings = await ctx.runQuery(internal.workspaceSettings.getForRouting, { workspaceId });
|
|
3113
|
-
} catch { /* use defaults */ }
|
|
3114
|
-
}
|
|
3115
|
-
|
|
3116
|
-
const routeOverride = request.headers.get("X-APIClaw-Route");
|
|
3117
|
-
const effectiveRoutingMode = routeOverride && ["best_price", "highest_quality", "fastest", "balanced"].includes(routeOverride)
|
|
3118
|
-
? routeOverride : settings.routingMode;
|
|
3119
|
-
const effectivePreferred = routeOverride && PROVIDERS[routeOverride]?.isLLM
|
|
3120
|
-
? [routeOverride, ...settings.preferredProviders] : settings.preferredProviders;
|
|
3121
|
-
// If a specific LLM provider is requested (not "auto"), prefer it
|
|
3122
|
-
const finalPreferred = provider !== "auto" && PROVIDERS[provider]?.isLLM
|
|
3123
|
-
? [provider, ...effectivePreferred] : effectivePreferred;
|
|
3124
|
-
|
|
3125
|
-
const effectiveModel = params.model || settings.defaultModel || "anthropic/claude-sonnet-4-6";
|
|
3126
|
-
|
|
3127
|
-
const route = await routeLLMRequest(effectiveModel, {
|
|
3128
|
-
routingMode: effectiveRoutingMode,
|
|
3129
|
-
preferredProviders: finalPreferred,
|
|
3130
|
-
blockedProviders: settings.blockedProviders,
|
|
3131
|
-
allowOpenRouterFallback: settings.allowOpenRouterFallback,
|
|
3132
|
-
}, params.messages);
|
|
3133
|
-
|
|
3134
|
-
if (!route) {
|
|
3135
|
-
return jsonResponse({ success: false, error: "No LLM provider available", _apiclaw: { latencyMs: Date.now() - startTime, route: "none", gateway: true } }, 503);
|
|
3136
|
-
}
|
|
3137
|
-
|
|
3138
|
-
routeDetail = route.reason;
|
|
3139
|
-
|
|
3140
|
-
// Log usage
|
|
3141
|
-
if (workspaceId) {
|
|
3142
|
-
try {
|
|
3143
|
-
await ctx.runMutation(api.analytics.log, {
|
|
3144
|
-
event: "api_call", provider: "gateway", identifier: workspaceId,
|
|
3145
|
-
workspaceId: workspaceId as any,
|
|
3146
|
-
metadata: { action: "execute_chat", model: effectiveModel, routedTo: route.provider, routeReason: route.reason, authMethod },
|
|
3147
|
-
});
|
|
3148
|
-
await ctx.runMutation(api.logs.createProxyLog, {
|
|
3149
|
-
workspaceId: workspaceId as any, provider: route.provider, action: "chat", subagentId,
|
|
3150
|
-
});
|
|
3151
|
-
await ctx.runMutation(api.workspaces.incrementUsage, { workspaceId: workspaceId as any });
|
|
3152
|
-
} catch (e: any) { console.error("[Execute] LLM logging failed:", e.message); }
|
|
3153
|
-
}
|
|
3154
|
-
|
|
3155
|
-
// Forward to provider
|
|
3156
|
-
try {
|
|
3157
|
-
const { model: _m, ...restParams } = params;
|
|
3158
|
-
const isAnthropic = route.provider === "anthropic";
|
|
3159
|
-
let finalBody: any;
|
|
3160
|
-
let headers: Record<string, string>;
|
|
3161
|
-
|
|
3162
|
-
if (isAnthropic) {
|
|
3163
|
-
const { body: anthropicBody } = openaiToAnthropicRequest(route.model, params.messages || [], restParams);
|
|
3164
|
-
if (params.stream) anthropicBody.stream = true;
|
|
3165
|
-
finalBody = anthropicBody;
|
|
3166
|
-
headers = {
|
|
3167
|
-
"x-api-key": route.apiKey,
|
|
3168
|
-
"anthropic-version": "2023-06-01",
|
|
3169
|
-
"Content-Type": "application/json",
|
|
3170
|
-
...(route.extraHeaders || {}),
|
|
3171
|
-
};
|
|
3172
|
-
} else {
|
|
3173
|
-
finalBody = { model: route.model, messages: params.messages, stream: params.stream || false, ...restParams };
|
|
3174
|
-
headers = {
|
|
3175
|
-
"Authorization": `Bearer ${route.apiKey}`,
|
|
3176
|
-
"Content-Type": "application/json",
|
|
3177
|
-
...(route.extraHeaders || {}),
|
|
3178
|
-
};
|
|
3179
|
-
}
|
|
3180
|
-
|
|
3181
|
-
const response = await fetch(route.baseUrl, {
|
|
3182
|
-
method: "POST", headers, body: JSON.stringify(finalBody),
|
|
3183
|
-
});
|
|
3184
|
-
|
|
3185
|
-
// Streaming
|
|
3186
|
-
if (params.stream && response.body) {
|
|
3187
|
-
return new Response(response.body, {
|
|
3188
|
-
status: response.status,
|
|
3189
|
-
headers: { "Content-Type": response.headers.get("Content-Type") || "text/event-stream", "Cache-Control": "no-cache", ...corsHeaders },
|
|
3190
|
-
});
|
|
3191
|
-
}
|
|
3192
|
-
|
|
3193
|
-
let data = await response.json();
|
|
3194
|
-
|
|
3195
|
-
// Translate Anthropic response to OpenAI format
|
|
3196
|
-
if (isAnthropic && response.ok) {
|
|
3197
|
-
data = anthropicToOpenaiResponse(data, route.model);
|
|
3198
|
-
}
|
|
3199
|
-
const latencyMs = Date.now() - startTime;
|
|
3200
|
-
|
|
3201
|
-
// Calculate cost from token usage (parity with /v1/chat/completions)
|
|
3202
|
-
const usage = (data as any)?.usage;
|
|
3203
|
-
const { providerCost, apiclawCost } = calculateCallCost(route.model, usage);
|
|
3204
|
-
|
|
3205
|
-
// Log cost to usage records
|
|
3206
|
-
if (apiclawCost > 0 && workspaceId) {
|
|
3207
|
-
ctx.runMutation(internal.billing.logCallCost, {
|
|
3208
|
-
workspaceId: workspaceId as any,
|
|
3209
|
-
provider: route.provider,
|
|
3210
|
-
model: route.model,
|
|
3211
|
-
providerCostUsd: providerCost,
|
|
3212
|
-
apiclawCostUsd: apiclawCost,
|
|
3213
|
-
inputTokens: usage?.prompt_tokens || 0,
|
|
3214
|
-
outputTokens: usage?.completion_tokens || 0,
|
|
3215
|
-
}).catch(() => {});
|
|
3216
|
-
}
|
|
3217
|
-
|
|
3218
|
-
return jsonResponse({
|
|
3219
|
-
success: response.ok,
|
|
3220
|
-
provider: route.provider,
|
|
3221
|
-
action: "chat",
|
|
3222
|
-
data,
|
|
3223
|
-
_apiclaw: {
|
|
3224
|
-
latencyMs, route: routeDetail, gateway: true, model: route.model,
|
|
3225
|
-
cost: {
|
|
3226
|
-
providerUsd: Math.round(providerCost * 1_000_000) / 1_000_000,
|
|
3227
|
-
totalUsd: Math.round(apiclawCost * 1_000_000) / 1_000_000,
|
|
3228
|
-
margin: "15%",
|
|
3229
|
-
},
|
|
3230
|
-
},
|
|
3231
|
-
}, response.ok ? 200 : response.status);
|
|
3232
|
-
} catch (e: any) {
|
|
3233
|
-
return jsonResponse({ success: false, provider: provider, action, error: e.message, _apiclaw: { latencyMs: Date.now() - startTime, route: routeDetail, gateway: true } }, 500);
|
|
3234
|
-
}
|
|
3235
|
-
}
|
|
3236
|
-
|
|
3237
|
-
// Path 2: Managed provider (known in PROVIDERS catalog)
|
|
3238
|
-
if (PROVIDERS[provider]) {
|
|
3239
|
-
// Managed provider path
|
|
3240
|
-
routeDetail = `direct_${provider}`;
|
|
3241
|
-
|
|
3242
|
-
const req = buildManagedRequest(provider, action, params);
|
|
3243
|
-
if (!req) {
|
|
3244
|
-
return jsonResponse({
|
|
3245
|
-
success: false,
|
|
3246
|
-
error: `Unknown action "${action}" for provider "${provider}"`,
|
|
3247
|
-
_apiclaw: { latencyMs: Date.now() - startTime, route: routeDetail, gateway: true },
|
|
3248
|
-
}, 400);
|
|
3249
|
-
}
|
|
3250
|
-
|
|
3251
|
-
// Log usage
|
|
3252
|
-
if (workspaceId) {
|
|
3253
|
-
try {
|
|
3254
|
-
await ctx.runMutation(api.analytics.log, {
|
|
3255
|
-
event: "api_call", provider, identifier: workspaceId,
|
|
3256
|
-
workspaceId: workspaceId as any,
|
|
3257
|
-
metadata: { action, subagentId, authMethod, via: "execute" },
|
|
3258
|
-
});
|
|
3259
|
-
await ctx.runMutation(api.logs.createProxyLog, {
|
|
3260
|
-
workspaceId: workspaceId as any, provider, action, subagentId,
|
|
3261
|
-
});
|
|
3262
|
-
await ctx.runMutation(api.workspaces.incrementUsage, { workspaceId: workspaceId as any });
|
|
3263
|
-
} catch (e: any) { console.error("[Execute] Managed logging failed:", e.message); }
|
|
3264
|
-
}
|
|
3265
|
-
|
|
3266
|
-
// Execute upstream call
|
|
3267
|
-
try {
|
|
3268
|
-
const fetchOpts: RequestInit = { method: req.method, headers: req.headers };
|
|
3269
|
-
if (req.body) fetchOpts.body = req.body;
|
|
3270
|
-
|
|
3271
|
-
const response = await fetch(req.url, fetchOpts);
|
|
3272
|
-
const latencyMs = Date.now() - startTime;
|
|
3273
|
-
|
|
3274
|
-
// Handle binary responses (e.g., ElevenLabs audio)
|
|
3275
|
-
const contentType = response.headers.get("Content-Type") || "";
|
|
3276
|
-
if (contentType.includes("audio/") || contentType.includes("application/octet-stream")) {
|
|
3277
|
-
return new Response(response.body, {
|
|
3278
|
-
status: response.status,
|
|
3279
|
-
headers: { "Content-Type": contentType, ...corsHeaders },
|
|
3280
|
-
});
|
|
3281
|
-
}
|
|
3282
|
-
|
|
3283
|
-
let data: any;
|
|
3284
|
-
try {
|
|
3285
|
-
data = await response.json();
|
|
3286
|
-
} catch {
|
|
3287
|
-
data = { raw: await response.text() };
|
|
3288
|
-
}
|
|
3289
|
-
|
|
3290
|
-
return jsonResponse({
|
|
3291
|
-
success: response.ok,
|
|
3292
|
-
provider,
|
|
3293
|
-
action,
|
|
3294
|
-
data,
|
|
3295
|
-
_apiclaw: { latencyMs, route: routeDetail, gateway: true },
|
|
3296
|
-
}, response.ok ? 200 : response.status);
|
|
3297
|
-
} catch (e: any) {
|
|
3298
|
-
return jsonResponse({
|
|
3299
|
-
success: false, provider, action, error: e.message,
|
|
3300
|
-
_apiclaw: { latencyMs: Date.now() - startTime, route: routeDetail, gateway: true },
|
|
3301
|
-
}, 500);
|
|
3302
|
-
}
|
|
3303
|
-
}
|
|
3304
|
-
|
|
3305
|
-
// Path 3: Open API (generic HTTP proxy)
|
|
3306
|
-
// Open API path
|
|
3307
|
-
routeDetail = `open_${provider}`;
|
|
3308
|
-
|
|
3309
|
-
const { baseUrl, method = "GET", headers: customHeaders = {}, body: customBody } = params;
|
|
3310
|
-
if (!baseUrl) {
|
|
3311
|
-
return jsonResponse({
|
|
3312
|
-
success: false,
|
|
3313
|
-
error: `Unknown provider "${provider}". For open APIs, include params.baseUrl.`,
|
|
3314
|
-
_apiclaw: { latencyMs: Date.now() - startTime, route: "unknown", gateway: true },
|
|
3315
|
-
}, 400);
|
|
3316
|
-
}
|
|
3317
|
-
|
|
3318
|
-
// Log usage
|
|
3319
|
-
if (workspaceId) {
|
|
3320
|
-
try {
|
|
3321
|
-
await ctx.runMutation(api.analytics.log, {
|
|
3322
|
-
event: "api_call", provider: `open:${provider}`, identifier: workspaceId,
|
|
3323
|
-
workspaceId: workspaceId as any,
|
|
3324
|
-
metadata: { action, subagentId, authMethod, baseUrl, via: "execute_open" },
|
|
3325
|
-
});
|
|
3326
|
-
await ctx.runMutation(api.logs.createProxyLog, {
|
|
3327
|
-
workspaceId: workspaceId as any, provider: `open:${provider}`, action, subagentId,
|
|
3328
|
-
});
|
|
3329
|
-
await ctx.runMutation(api.workspaces.incrementUsage, { workspaceId: workspaceId as any });
|
|
3330
|
-
} catch (e: any) { console.error("[Execute] Open API logging failed:", e.message); }
|
|
3331
|
-
}
|
|
3332
|
-
|
|
3333
|
-
// Execute open API call
|
|
3334
|
-
try {
|
|
3335
|
-
const fetchOpts: RequestInit = {
|
|
3336
|
-
method: method.toUpperCase(),
|
|
3337
|
-
headers: { "Content-Type": "application/json", ...customHeaders },
|
|
3338
|
-
};
|
|
3339
|
-
if (customBody && method.toUpperCase() !== "GET") {
|
|
3340
|
-
fetchOpts.body = typeof customBody === "string" ? customBody : JSON.stringify(customBody);
|
|
3341
|
-
}
|
|
3342
|
-
|
|
3343
|
-
const response = await fetch(baseUrl, fetchOpts);
|
|
3344
|
-
const latencyMs = Date.now() - startTime;
|
|
3345
|
-
|
|
3346
|
-
let data: any;
|
|
3347
|
-
const ct = response.headers.get("Content-Type") || "";
|
|
3348
|
-
if (ct.includes("json")) {
|
|
3349
|
-
try { data = await response.json(); } catch { data = { raw: await response.text() }; }
|
|
3350
|
-
} else {
|
|
3351
|
-
data = { raw: await response.text() };
|
|
3352
|
-
}
|
|
3353
|
-
|
|
3354
|
-
return jsonResponse({
|
|
3355
|
-
success: response.ok,
|
|
3356
|
-
provider,
|
|
3357
|
-
action,
|
|
3358
|
-
data,
|
|
3359
|
-
_apiclaw: { latencyMs, route: routeDetail, gateway: true },
|
|
3360
|
-
}, response.ok ? 200 : response.status);
|
|
3361
|
-
} catch (e: any) {
|
|
3362
|
-
return jsonResponse({
|
|
3363
|
-
success: false, provider, action, error: e.message,
|
|
3364
|
-
_apiclaw: { latencyMs: Date.now() - startTime, route: routeDetail, gateway: true },
|
|
3365
|
-
}, 500);
|
|
3366
|
-
}
|
|
3367
|
-
}),
|
|
3368
|
-
});
|
|
3369
|
-
|
|
3370
|
-
http.route({
|
|
3371
|
-
path: "/v1/execute",
|
|
3372
|
-
method: "OPTIONS",
|
|
3373
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
3374
|
-
});
|
|
3375
|
-
|
|
3376
|
-
// /v1/models — List available models through APIClaw
|
|
3377
|
-
http.route({
|
|
3378
|
-
path: "/v1/models",
|
|
3379
|
-
method: "GET",
|
|
3380
|
-
handler: httpAction(async (ctx, request) => {
|
|
3381
|
-
// API key auth optional for models listing
|
|
3382
|
-
const models = [
|
|
3383
|
-
// OpenRouter models (main LLM backbone)
|
|
3384
|
-
// Anthropic direct models
|
|
3385
|
-
{ id: "anthropic/claude-sonnet-4-6", object: "model", owned_by: "anthropic", via: "direct" },
|
|
3386
|
-
{ id: "anthropic/claude-opus-4-6", object: "model", owned_by: "anthropic", via: "direct" },
|
|
3387
|
-
{ id: "anthropic/claude-3.5-sonnet", object: "model", owned_by: "anthropic", via: "direct" },
|
|
3388
|
-
{ id: "anthropic/claude-haiku-4-5", object: "model", owned_by: "anthropic", via: "direct" },
|
|
3389
|
-
{ id: "openai/gpt-4o", object: "model", owned_by: "openai", via: "openrouter" },
|
|
3390
|
-
{ id: "openai/gpt-4o-mini", object: "model", owned_by: "openai", via: "openrouter" },
|
|
3391
|
-
{ id: "openai/o3-mini", object: "model", owned_by: "openai", via: "openrouter" },
|
|
3392
|
-
{ id: "google/gemini-2.5-pro-preview", object: "model", owned_by: "google", via: "openrouter" },
|
|
3393
|
-
{ id: "google/gemini-2.5-flash-preview", object: "model", owned_by: "google", via: "openrouter" },
|
|
3394
|
-
{ id: "meta-llama/llama-3.3-70b-instruct", object: "model", owned_by: "meta", via: "openrouter" },
|
|
3395
|
-
{ id: "mistralai/mistral-large-latest", object: "model", owned_by: "mistral", via: "openrouter" },
|
|
3396
|
-
{ id: "deepseek/deepseek-r1", object: "model", owned_by: "deepseek", via: "openrouter" },
|
|
3397
|
-
{ id: "deepseek/deepseek-chat", object: "model", owned_by: "deepseek", via: "openrouter" },
|
|
3398
|
-
{ id: "qwen/qwen-2.5-72b-instruct", object: "model", owned_by: "qwen", via: "openrouter" },
|
|
3399
|
-
|
|
3400
|
-
// Embedding models via /v1/embeddings
|
|
3401
|
-
{ id: "voyage/voyage-3-large", object: "model", owned_by: "voyage", via: "voyage", endpoint: "/v1/embeddings" },
|
|
3402
|
-
{ id: "voyage/voyage-3", object: "model", owned_by: "voyage", via: "voyage", endpoint: "/v1/embeddings" },
|
|
3403
|
-
{ id: "voyage/voyage-3-lite", object: "model", owned_by: "voyage", via: "voyage", endpoint: "/v1/embeddings" },
|
|
3404
|
-
{ id: "voyage/voyage-code-3", object: "model", owned_by: "voyage", via: "voyage", endpoint: "/v1/embeddings" },
|
|
3405
|
-
{ id: "voyage/voyage-multilingual-2", object: "model", owned_by: "voyage", via: "voyage", endpoint: "/v1/embeddings" },
|
|
3406
|
-
{ id: "mistral/mistral-embed", object: "model", owned_by: "mistral", via: "mistral", endpoint: "/v1/embeddings" },
|
|
3407
|
-
{ id: "openai/text-embedding-3-small", object: "model", owned_by: "openai", via: "openai", endpoint: "/v1/embeddings" },
|
|
3408
|
-
{ id: "openai/text-embedding-3-large", object: "model", owned_by: "openai", via: "openai", endpoint: "/v1/embeddings" },
|
|
3409
|
-
{ id: "openai/text-embedding-ada-002", object: "model", owned_by: "openai", via: "openai", endpoint: "/v1/embeddings" },
|
|
3410
|
-
{ id: "cohere/embed-v4.0", object: "model", owned_by: "cohere", via: "cohere", endpoint: "/v1/embeddings" },
|
|
3411
|
-
{ id: "cohere/embed-multilingual-v3", object: "model", owned_by: "cohere", via: "cohere", endpoint: "/v1/embeddings" },
|
|
3412
|
-
];
|
|
3413
|
-
|
|
3414
|
-
return jsonResponse({
|
|
3415
|
-
object: "list",
|
|
3416
|
-
data: models,
|
|
3417
|
-
_apiclaw: {
|
|
3418
|
-
gateway: "v1",
|
|
3419
|
-
note: "These models are available through APIClaw's unified gateway. All 800+ OpenRouter chat models + embedding models across Voyage, Mistral, OpenAI, and Cohere.",
|
|
3420
|
-
non_llm_apis: Object.keys(PROVIDERS).length + " Direct Call providers (SMS, email, search, TTS, embeddings, code execution, scraping, and more)",
|
|
3421
|
-
},
|
|
3422
|
-
});
|
|
3423
|
-
}),
|
|
3424
|
-
});
|
|
3425
|
-
|
|
3426
|
-
http.route({
|
|
3427
|
-
path: "/v1/models",
|
|
3428
|
-
method: "OPTIONS",
|
|
3429
|
-
handler: httpAction(async () => new Response(null, { headers: corsHeaders })),
|
|
3430
|
-
});
|