spectyra-proxy 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,367 @@
1
+ #!/usr/bin/env node
2
+ import express from "express";
3
+ import cors from "cors";
4
+ import fs from "fs";
5
+ import path from "path";
6
+ import { fileURLToPath } from "url";
7
+ const __filename = fileURLToPath(import.meta.url);
8
+ const __dirname = path.dirname(__filename);
9
+ const DASHBOARD_DIR = path.join(__dirname, 'dashboard');
10
+ const PORT = parseInt(process.env.PROXY_PORT || "3001", 10);
11
+ const DASHBOARD_PORT = parseInt(process.env.DASHBOARD_PORT || "3002", 10);
12
+ const SPECTYRA_API = process.env.SPECTYRA_API_URL || "https://spectyra.up.railway.app/v1";
13
+ const CONFIG_FILE = path.join(__dirname, ".spectyra-proxy-config.json");
14
+ function loadConfig() {
15
+ try {
16
+ if (fs.existsSync(CONFIG_FILE)) {
17
+ const data = fs.readFileSync(CONFIG_FILE, "utf-8");
18
+ return JSON.parse(data);
19
+ }
20
+ }
21
+ catch (error) {
22
+ console.error("Error loading config:", error);
23
+ }
24
+ return null;
25
+ }
26
+ function saveConfig(config) {
27
+ try {
28
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2));
29
+ }
30
+ catch (error) {
31
+ console.error("Error saving config:", error);
32
+ }
33
+ }
34
+ const stats = {
35
+ totalRequests: 0,
36
+ totalTokensSaved: 0,
37
+ totalCostSaved: 0,
38
+ requests: [],
39
+ };
40
+ function addStats(savings, model) {
41
+ stats.totalRequests++;
42
+ if (savings) {
43
+ stats.totalTokensSaved += savings.tokens_saved || 0;
44
+ stats.totalCostSaved += savings.cost_saved_usd || 0;
45
+ stats.requests.push({
46
+ timestamp: Date.now(),
47
+ tokensSaved: savings.tokens_saved || 0,
48
+ costSaved: savings.cost_saved_usd || 0,
49
+ pctSaved: savings.pct_saved || 0,
50
+ model,
51
+ });
52
+ if (stats.requests.length > 1000) {
53
+ stats.requests = stats.requests.slice(-1000);
54
+ }
55
+ }
56
+ }
57
+ const app = express();
58
+ app.use(cors());
59
+ app.use(express.json());
60
+ function convertMessagesToSpectyra(messages, format) {
61
+ if (format === "anthropic") {
62
+ return messages.map((m) => {
63
+ let content = "";
64
+ if (typeof m.content === "string") {
65
+ content = m.content;
66
+ }
67
+ else if (Array.isArray(m.content)) {
68
+ const textPart = m.content.find((p) => p.type === "text");
69
+ content = textPart?.text || "";
70
+ }
71
+ return {
72
+ role: m.role === "assistant" ? "assistant" : "user",
73
+ content,
74
+ };
75
+ });
76
+ }
77
+ else if (format === "gemini") {
78
+ return messages.map((m) => {
79
+ let content = "";
80
+ if (m.parts && Array.isArray(m.parts)) {
81
+ content = m.parts.map((p) => p.text || "").join("");
82
+ }
83
+ else if (typeof m.content === "string") {
84
+ content = m.content;
85
+ }
86
+ return {
87
+ role: m.role === "model" ? "assistant" : "user",
88
+ content,
89
+ };
90
+ });
91
+ }
92
+ else {
93
+ return messages.map((m) => ({
94
+ role: m.role,
95
+ content: typeof m.content === "string" ? m.content : JSON.stringify(m.content),
96
+ }));
97
+ }
98
+ }
99
+ function convertResponseFromSpectyra(data, format, model) {
100
+ const responseText = data.response_text || data.responseText || "";
101
+ const inputTokens = data.usage?.input_tokens || 0;
102
+ const outputTokens = data.usage?.output_tokens || 0;
103
+ const totalTokens = data.usage?.total_tokens || 0;
104
+ const id = data.id || Date.now();
105
+ if (format === "anthropic") {
106
+ return {
107
+ id: `msg-${id}`,
108
+ type: "message",
109
+ role: "assistant",
110
+ content: [{
111
+ type: "text",
112
+ text: responseText,
113
+ }],
114
+ model,
115
+ stop_reason: "end_turn",
116
+ stop_sequence: null,
117
+ usage: {
118
+ input_tokens: inputTokens,
119
+ output_tokens: outputTokens,
120
+ },
121
+ };
122
+ }
123
+ else if (format === "gemini") {
124
+ return {
125
+ candidates: [{
126
+ content: {
127
+ parts: [{
128
+ text: responseText,
129
+ }],
130
+ role: "model",
131
+ },
132
+ finishReason: "STOP",
133
+ safetyRatings: [],
134
+ }],
135
+ usageMetadata: {
136
+ promptTokenCount: inputTokens,
137
+ candidatesTokenCount: outputTokens,
138
+ totalTokenCount: totalTokens,
139
+ },
140
+ };
141
+ }
142
+ else {
143
+ return {
144
+ id: `chatcmpl-${id}`,
145
+ object: "chat.completion",
146
+ created: Math.floor(Date.now() / 1000),
147
+ model,
148
+ choices: [{
149
+ index: 0,
150
+ message: {
151
+ role: "assistant",
152
+ content: responseText,
153
+ },
154
+ finish_reason: "stop",
155
+ }],
156
+ usage: {
157
+ prompt_tokens: inputTokens,
158
+ completion_tokens: outputTokens,
159
+ total_tokens: totalTokens,
160
+ },
161
+ };
162
+ }
163
+ }
164
+ function detectApiFormat(req) {
165
+ const path = req.path;
166
+ const body = req.body;
167
+ if (path.includes("/v1/messages")) {
168
+ return "anthropic";
169
+ }
170
+ if (path.includes("/v1/models") || path.includes("generativelanguage")) {
171
+ return "gemini";
172
+ }
173
+ if (path.includes("/v1/chat/completions")) {
174
+ if (body.messages && Array.isArray(body.messages)) {
175
+ if (body.messages.some((m) => Array.isArray(m.content))) {
176
+ return "anthropic";
177
+ }
178
+ return "openai";
179
+ }
180
+ }
181
+ return "openai";
182
+ }
183
+ app.post("/v1/chat/completions", async (req, res) => {
184
+ await handleRequest(req, res, "openai");
185
+ });
186
+ app.post("/v1/messages", async (req, res) => {
187
+ await handleRequest(req, res, "anthropic");
188
+ });
189
+ app.post("/v1/*/generateContent", async (req, res) => {
190
+ await handleRequest(req, res, "gemini");
191
+ });
192
+ async function handleRequest(req, res, detectedFormat) {
193
+ const config = loadConfig();
194
+ if (!config || !config.spectyraKey || !config.providerKey) {
195
+ return res.status(500).json({
196
+ error: {
197
+ message: "Proxy not configured. Please configure API keys first.",
198
+ type: "configuration_error",
199
+ },
200
+ });
201
+ }
202
+ try {
203
+ const provider = config.provider || (detectedFormat === "anthropic" ? "anthropic" : detectedFormat === "gemini" ? "gemini" : "openai");
204
+ const format = detectedFormat;
205
+ let model;
206
+ let messages;
207
+ if (format === "anthropic") {
208
+ model = req.body.model || "claude-3-5-sonnet-20241022";
209
+ messages = req.body.messages || [];
210
+ if (req.body.system) {
211
+ messages.unshift({ role: "system", content: req.body.system });
212
+ }
213
+ }
214
+ else if (format === "gemini") {
215
+ model = req.body.model || "gemini-pro";
216
+ const contents = req.body.contents || [];
217
+ messages = contents.map((c) => ({
218
+ role: c.role,
219
+ parts: c.parts || [],
220
+ }));
221
+ }
222
+ else {
223
+ model = req.body.model || "gpt-4o-mini";
224
+ messages = req.body.messages || [];
225
+ }
226
+ const spectyraMessages = convertMessagesToSpectyra(messages, format);
227
+ const response = await fetch(`${SPECTYRA_API}/chat`, {
228
+ method: "POST",
229
+ headers: {
230
+ "Content-Type": "application/json",
231
+ "X-SPECTYRA-KEY": config.spectyraKey,
232
+ "X-PROVIDER-KEY": config.providerKey,
233
+ },
234
+ body: JSON.stringify({
235
+ path: config.path || "code",
236
+ provider,
237
+ model,
238
+ messages: spectyraMessages,
239
+ mode: "optimized",
240
+ optimization_level: config.optimizationLevel || 2,
241
+ }),
242
+ });
243
+ if (!response.ok) {
244
+ const errorText = await response.text();
245
+ console.error("Spectyra API error:", response.status, errorText);
246
+ return res.status(response.status).json({
247
+ error: {
248
+ message: `Spectyra API error: ${response.statusText}`,
249
+ type: "api_error",
250
+ },
251
+ });
252
+ }
253
+ const data = await response.json();
254
+ if (data.savings) {
255
+ addStats(data.savings, model);
256
+ console.log(`šŸ’° Saved ${data.savings.pct_saved?.toFixed(1)}% (${data.savings.tokens_saved} tokens, $${data.savings.cost_saved_usd?.toFixed(4)})`);
257
+ }
258
+ const providerResponse = convertResponseFromSpectyra(data, format, model);
259
+ res.json(providerResponse);
260
+ }
261
+ catch (error) {
262
+ console.error("Proxy error:", error);
263
+ res.status(500).json({
264
+ error: {
265
+ message: error.message || "Internal proxy error",
266
+ type: "proxy_error",
267
+ },
268
+ });
269
+ }
270
+ }
271
+ app.post("/config", (req, res) => {
272
+ try {
273
+ const config = {
274
+ spectyraKey: req.body.spectyraKey || "",
275
+ providerKey: req.body.providerKey || "",
276
+ provider: req.body.provider || "openai",
277
+ path: req.body.path || "code",
278
+ optimizationLevel: req.body.optimizationLevel || 2,
279
+ };
280
+ saveConfig(config);
281
+ res.json({ success: true, message: "Configuration saved" });
282
+ }
283
+ catch (error) {
284
+ res.status(500).json({ error: error.message });
285
+ }
286
+ });
287
+ app.get("/config", (req, res) => {
288
+ const config = loadConfig();
289
+ if (!config) {
290
+ return res.json({ configured: false });
291
+ }
292
+ res.json({
293
+ configured: true,
294
+ provider: config.provider,
295
+ path: config.path,
296
+ optimizationLevel: config.optimizationLevel,
297
+ });
298
+ });
299
+ app.get("/stats", (req, res) => {
300
+ res.json({
301
+ totalRequests: stats.totalRequests,
302
+ totalTokensSaved: stats.totalTokensSaved,
303
+ totalCostSaved: stats.totalCostSaved,
304
+ recentRequests: stats.requests.slice(-50),
305
+ });
306
+ });
307
+ app.get("/health", (req, res) => {
308
+ const config = loadConfig();
309
+ res.json({
310
+ status: "ok",
311
+ service: "spectyra-proxy",
312
+ configured: !!config && !!config.spectyraKey && !!config.providerKey,
313
+ });
314
+ });
315
+ app.listen(PORT, () => {
316
+ console.log(`\nšŸš€ Spectyra Proxy running on http://localhost:${PORT}`);
317
+ console.log(`šŸ“Š Dashboard: http://localhost:${DASHBOARD_PORT}`);
318
+ console.log(`šŸ”— Routing to: ${SPECTYRA_API}`);
319
+ const config = loadConfig();
320
+ if (!config || !config.spectyraKey || !config.providerKey) {
321
+ console.log(`\nāš ļø Proxy not configured!`);
322
+ console.log(` Visit http://localhost:${DASHBOARD_PORT} to configure`);
323
+ }
324
+ else {
325
+ console.log(`āœ… Proxy configured (${config.provider}, ${config.path} path)`);
326
+ }
327
+ console.log(`\nšŸ’” To use with OpenAI-compatible tools, set:`);
328
+ console.log(` OPENAI_API_BASE=http://localhost:${PORT}/v1\n`);
329
+ });
330
+ const dashboard = express();
331
+ dashboard.use(cors());
332
+ dashboard.use(express.json());
333
+ dashboard.use(express.static(DASHBOARD_DIR));
334
+ dashboard.get("/api/config", (req, res) => {
335
+ const config = loadConfig();
336
+ res.json(config || {});
337
+ });
338
+ dashboard.post("/api/config", (req, res) => {
339
+ try {
340
+ const config = {
341
+ spectyraKey: req.body.spectyraKey || "",
342
+ providerKey: req.body.providerKey || "",
343
+ provider: req.body.provider || "openai",
344
+ path: req.body.path || "code",
345
+ optimizationLevel: req.body.optimizationLevel || 2,
346
+ };
347
+ saveConfig(config);
348
+ res.json({ success: true });
349
+ }
350
+ catch (error) {
351
+ res.status(500).json({ error: error.message });
352
+ }
353
+ });
354
+ dashboard.get("/api/stats", (req, res) => {
355
+ res.json({
356
+ totalRequests: stats.totalRequests,
357
+ totalTokensSaved: stats.totalTokensSaved,
358
+ totalCostSaved: stats.totalCostSaved,
359
+ recentRequests: stats.requests.slice(-100),
360
+ });
361
+ });
362
+ dashboard.get("*", (req, res) => {
363
+ res.sendFile(path.join(DASHBOARD_DIR, "index.html"));
364
+ });
365
+ dashboard.listen(DASHBOARD_PORT, () => {
366
+ console.log(`šŸ“Š Dashboard running on http://localhost:${DASHBOARD_PORT}`);
367
+ });
package/package.json ADDED
@@ -0,0 +1,48 @@
1
+ {
2
+ "name": "spectyra-proxy",
3
+ "version": "1.0.0",
4
+ "type": "module",
5
+ "description": "Local proxy for Spectyra optimization - works with Copilot, Cursor, Claude Code, and other coding assistants",
6
+ "main": "dist/spectyra-proxy.js",
7
+ "bin": {
8
+ "spectyra-proxy": "./dist/spectyra-proxy.js"
9
+ },
10
+ "files": [
11
+ "dist",
12
+ "README.md",
13
+ "SETUP_GUIDE.md",
14
+ "PROVIDER_SUPPORT.md",
15
+ "INSTALLATION.md",
16
+ "package.json"
17
+ ],
18
+ "scripts": {
19
+ "dev": "tsx watch spectyra-proxy.ts",
20
+ "start": "tsx spectyra-proxy.ts",
21
+ "build": "tsc && node scripts/prepare-dist.js",
22
+ "prepublishOnly": "npm run build",
23
+ "postinstall": "node -e \"console.log('āœ… Spectyra Proxy installed. Run: spectyra-proxy')\""
24
+ },
25
+ "dependencies": {
26
+ "express": "^4.18.2",
27
+ "cors": "^2.8.5"
28
+ },
29
+ "devDependencies": {
30
+ "@types/express": "^4.17.21",
31
+ "@types/cors": "^2.8.17",
32
+ "@types/node": "^20.10.0",
33
+ "tsx": "^4.7.1",
34
+ "typescript": "^5.3.3"
35
+ },
36
+ "engines": {
37
+ "node": ">=18.0.0"
38
+ },
39
+ "keywords": [
40
+ "spectyra",
41
+ "llm",
42
+ "optimization",
43
+ "proxy",
44
+ "copilot",
45
+ "cursor",
46
+ "claude"
47
+ ]
48
+ }