trickle-backend 0.1.64 → 0.1.66

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/Dockerfile ADDED
@@ -0,0 +1,23 @@
1
+ FROM node:20-alpine
2
+
3
+ WORKDIR /app
4
+
5
+ # Install dependencies
6
+ COPY package.json package-lock.json* ./
7
+ RUN npm ci --production 2>/dev/null || npm install --production
8
+
9
+ # Copy built files
10
+ COPY dist/ dist/
11
+
12
+ # Create data directory
13
+ RUN mkdir -p /data
14
+
15
+ ENV PORT=4888
16
+ ENV TRICKLE_DB_PATH=/data/trickle.db
17
+
18
+ EXPOSE 4888
19
+
20
+ HEALTHCHECK --interval=30s --timeout=3s --retries=3 \
21
+ CMD wget -qO- http://localhost:4888/api/health || exit 1
22
+
23
+ CMD ["node", "dist/index.js"]
@@ -7,9 +7,9 @@ exports.db = void 0;
7
7
  const path_1 = __importDefault(require("path"));
8
8
  const fs_1 = __importDefault(require("fs"));
9
9
  const better_sqlite3_1 = __importDefault(require("better-sqlite3"));
10
- const trickleDir = path_1.default.join(process.env.HOME || "~", ".trickle");
10
+ const dbPath = process.env.TRICKLE_DB_PATH || path_1.default.join(process.env.HOME || "~", ".trickle", "trickle.db");
11
+ const trickleDir = path_1.default.dirname(dbPath);
11
12
  fs_1.default.mkdirSync(trickleDir, { recursive: true });
12
- const dbPath = path_1.default.join(trickleDir, "trickle.db");
13
13
  const db = new better_sqlite3_1.default(dbPath);
14
14
  exports.db = db;
15
15
  db.pragma("journal_mode = WAL");
package/dist/index.js CHANGED
@@ -9,4 +9,35 @@ const cloud_migrations_1 = require("./db/cloud-migrations");
9
9
  const PORT = parseInt(process.env.PORT || "4888", 10);
10
10
  server_1.app.listen(PORT, () => {
11
11
  console.log(`[trickle] Backend listening on http://localhost:${PORT}`);
12
+ if (process.env.NODE_ENV === "production") {
13
+ console.log(`[trickle] Production mode enabled`);
14
+ }
12
15
  });
16
+ // ── Data retention — periodic cleanup of expired data ──
17
+ const RETENTION_DAYS = parseInt(process.env.TRICKLE_RETENTION_DAYS || "30", 10);
18
+ const CLEANUP_INTERVAL_MS = 6 * 3600_000; // Every 6 hours
19
+ function runDataRetention() {
20
+ try {
21
+ // Delete expired share links
22
+ const expiredLinks = connection_1.db.prepare("DELETE FROM share_links WHERE expires_at IS NOT NULL AND expires_at < datetime('now')").run();
23
+ // Delete old push history (keep last 30 days)
24
+ const oldHistory = connection_1.db.prepare(`DELETE FROM push_history WHERE pushed_at < datetime('now', '-${RETENTION_DAYS} days')`).run();
25
+ // Delete stale project data (not updated in retention period)
26
+ const staleData = connection_1.db.prepare(`DELETE FROM project_data WHERE pushed_at < datetime('now', '-${RETENTION_DAYS} days')`).run();
27
+ const total = (expiredLinks.changes || 0) + (oldHistory.changes || 0) + (staleData.changes || 0);
28
+ if (total > 0) {
29
+ console.log(`[trickle] Data retention: cleaned ${total} rows (${RETENTION_DAYS}d retention)`);
30
+ // Reclaim space
31
+ try {
32
+ connection_1.db.pragma("wal_checkpoint(TRUNCATE)");
33
+ }
34
+ catch { }
35
+ }
36
+ }
37
+ catch (err) {
38
+ console.error("[trickle] Data retention error:", err.message);
39
+ }
40
+ }
41
+ // Run retention on startup and periodically
42
+ setTimeout(runDataRetention, 10_000); // 10s after startup
43
+ setInterval(runDataRetention, CLEANUP_INTERVAL_MS);
@@ -70,7 +70,38 @@ router.post("/keys", (req, res) => {
70
70
  message: "Save this key — it cannot be retrieved later.",
71
71
  });
72
72
  });
73
- // ── POST /api/v1/pushUpload project data ──
73
+ // ── POST /api/v1/ingestReal-time streaming ingest ──
74
+ // Accepts batched observations and appends to project data files.
75
+ // This enables `trickle run` to stream data to the cloud in real-time.
76
+ router.post("/ingest", requireAuth, (req, res) => {
77
+ const { project, file, lines } = req.body;
78
+ if (!project || !file || !lines) {
79
+ res.status(400).json({ error: "project, file, and lines required" });
80
+ return;
81
+ }
82
+ const projectId = `${req.keyId}:${project}`;
83
+ // Auto-create project
84
+ connection_1.db.prepare(`
85
+ INSERT INTO projects (id, name, owner_key_id, updated_at)
86
+ VALUES (?, ?, ?, datetime('now'))
87
+ ON CONFLICT(id) DO UPDATE SET updated_at = datetime('now')
88
+ `).run(projectId, project, req.keyId);
89
+ // Append to existing content (or create new)
90
+ const existing = connection_1.db.prepare("SELECT content FROM project_data WHERE project_id = ? AND filename = ?").get(projectId, file);
91
+ const newContent = typeof lines === "string" ? lines : lines.join("\n") + "\n";
92
+ const content = existing ? existing.content + newContent : newContent;
93
+ const bytes = Buffer.byteLength(content, "utf-8");
94
+ connection_1.db.prepare(`
95
+ INSERT INTO project_data (project_id, filename, content, size_bytes, pushed_at)
96
+ VALUES (?, ?, ?, ?, datetime('now'))
97
+ ON CONFLICT(project_id, filename) DO UPDATE SET
98
+ content = excluded.content,
99
+ size_bytes = excluded.size_bytes,
100
+ pushed_at = datetime('now')
101
+ `).run(projectId, file, content, bytes);
102
+ res.json({ ok: true, file, bytes });
103
+ });
104
+ // ── POST /api/v1/push — Upload project data (full replace) ──
74
105
  router.post("/push", requireAuth, (req, res) => {
75
106
  const { project, files, timestamp } = req.body;
76
107
  if (!project || typeof project !== "string") {
package/dist/server.js CHANGED
@@ -21,8 +21,56 @@ const search_1 = __importDefault(require("./routes/search"));
21
21
  const cloud_1 = __importDefault(require("./routes/cloud"));
22
22
  const app = (0, express_1.default)();
23
23
  exports.app = app;
24
- app.use((0, cors_1.default)());
25
- app.use(express_1.default.json({ limit: "5mb" }));
24
+ // ── Production middleware ──
25
+ // CORS allow all origins for local dev, restrict in production
26
+ const allowedOrigins = process.env.TRICKLE_CORS_ORIGINS?.split(",") || [];
27
+ app.use((0, cors_1.default)(allowedOrigins.length > 0 ? { origin: allowedOrigins } : {}));
28
+ // Body size limits
29
+ app.use(express_1.default.json({ limit: "10mb" }));
30
+ // Rate limiting — simple in-memory token bucket per IP
31
+ const rateLimits = new Map();
32
+ const RATE_LIMIT_WINDOW_MS = 60_000; // 1 minute
33
+ const RATE_LIMIT_MAX = parseInt(process.env.TRICKLE_RATE_LIMIT || "300", 10); // 300 req/min default
34
+ function rateLimit(req, res, next) {
35
+ if (process.env.NODE_ENV !== "production" && !process.env.TRICKLE_RATE_LIMIT) {
36
+ return next(); // Skip rate limiting in dev unless explicitly enabled
37
+ }
38
+ const ip = req.ip || req.socket.remoteAddress || "unknown";
39
+ const now = Date.now();
40
+ let bucket = rateLimits.get(ip);
41
+ if (!bucket || now > bucket.resetAt) {
42
+ bucket = { count: 0, resetAt: now + RATE_LIMIT_WINDOW_MS };
43
+ rateLimits.set(ip, bucket);
44
+ }
45
+ bucket.count++;
46
+ if (bucket.count > RATE_LIMIT_MAX) {
47
+ res.status(429).json({ error: "Rate limit exceeded. Try again later." });
48
+ return;
49
+ }
50
+ // Periodic cleanup of old entries
51
+ if (rateLimits.size > 10000) {
52
+ for (const [key, val] of rateLimits) {
53
+ if (now > val.resetAt)
54
+ rateLimits.delete(key);
55
+ }
56
+ }
57
+ next();
58
+ }
59
+ app.use("/api/v1", rateLimit);
60
+ // Request logging in production
61
+ if (process.env.NODE_ENV === "production") {
62
+ app.use((req, _res, next) => {
63
+ const start = Date.now();
64
+ _res.on("finish", () => {
65
+ const ms = Date.now() - start;
66
+ if (ms > 1000 || _res.statusCode >= 400) {
67
+ console.log(`${req.method} ${req.path} ${_res.statusCode} ${ms}ms`);
68
+ }
69
+ });
70
+ next();
71
+ });
72
+ }
73
+ // ── Routes ──
26
74
  app.use("/api/ingest", ingest_1.default);
27
75
  app.use("/api/functions", functions_1.default);
28
76
  app.use("/api/types", types_1.default);
@@ -38,5 +86,10 @@ app.use("/api/search", search_1.default);
38
86
  app.use("/api/v1", cloud_1.default);
39
87
  // Health check
40
88
  app.get("/api/health", (_req, res) => {
41
- res.json({ ok: true, timestamp: new Date().toISOString() });
89
+ res.json({ ok: true, timestamp: new Date().toISOString(), version: process.env.npm_package_version || "dev" });
90
+ });
91
+ // Global error handler
92
+ app.use((err, _req, res, _next) => {
93
+ console.error("[trickle] Unhandled error:", err.message);
94
+ res.status(500).json({ error: "Internal server error" });
42
95
  });
package/fly.toml ADDED
@@ -0,0 +1,31 @@
1
+ app = "trickle-cloud"
2
+ primary_region = "lhr"
3
+
4
+ [build]
5
+ dockerfile = "Dockerfile"
6
+
7
+ [env]
8
+ PORT = "4888"
9
+ TRICKLE_DB_PATH = "/data/trickle.db"
10
+ NODE_ENV = "production"
11
+
12
+ [http_service]
13
+ internal_port = 4888
14
+ force_https = true
15
+ auto_stop_machines = "stop"
16
+ auto_start_machines = true
17
+ min_machines_running = 0
18
+
19
+ [http_service.concurrency]
20
+ type = "connections"
21
+ hard_limit = 100
22
+ soft_limit = 80
23
+
24
+ [mounts]
25
+ source = "trickle_data"
26
+ destination = "/data"
27
+
28
+ [[vm]]
29
+ memory = "512mb"
30
+ cpu_kind = "shared"
31
+ cpus = 1
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "trickle-backend",
3
- "version": "0.1.64",
3
+ "version": "0.1.66",
4
4
  "main": "dist/index.js",
5
5
  "scripts": {
6
6
  "build": "tsc",
@@ -2,12 +2,11 @@ import path from "path";
2
2
  import fs from "fs";
3
3
  import Database, { Database as DatabaseType } from "better-sqlite3";
4
4
 
5
- const trickleDir = path.join(process.env.HOME || "~", ".trickle");
5
+ const dbPath = process.env.TRICKLE_DB_PATH || path.join(process.env.HOME || "~", ".trickle", "trickle.db");
6
+ const trickleDir = path.dirname(dbPath);
6
7
 
7
8
  fs.mkdirSync(trickleDir, { recursive: true });
8
9
 
9
- const dbPath = path.join(trickleDir, "trickle.db");
10
-
11
10
  const db: DatabaseType = new Database(dbPath);
12
11
 
13
12
  db.pragma("journal_mode = WAL");
package/src/index.ts CHANGED
@@ -10,4 +10,44 @@ const PORT = parseInt(process.env.PORT || "4888", 10);
10
10
 
11
11
  app.listen(PORT, () => {
12
12
  console.log(`[trickle] Backend listening on http://localhost:${PORT}`);
13
+ if (process.env.NODE_ENV === "production") {
14
+ console.log(`[trickle] Production mode enabled`);
15
+ }
13
16
  });
17
+
18
+ // ── Data retention — periodic cleanup of expired data ──
19
+
20
+ const RETENTION_DAYS = parseInt(process.env.TRICKLE_RETENTION_DAYS || "30", 10);
21
+ const CLEANUP_INTERVAL_MS = 6 * 3600_000; // Every 6 hours
22
+
23
+ function runDataRetention(): void {
24
+ try {
25
+ // Delete expired share links
26
+ const expiredLinks = db.prepare(
27
+ "DELETE FROM share_links WHERE expires_at IS NOT NULL AND expires_at < datetime('now')"
28
+ ).run();
29
+
30
+ // Delete old push history (keep last 30 days)
31
+ const oldHistory = db.prepare(
32
+ `DELETE FROM push_history WHERE pushed_at < datetime('now', '-${RETENTION_DAYS} days')`
33
+ ).run();
34
+
35
+ // Delete stale project data (not updated in retention period)
36
+ const staleData = db.prepare(
37
+ `DELETE FROM project_data WHERE pushed_at < datetime('now', '-${RETENTION_DAYS} days')`
38
+ ).run();
39
+
40
+ const total = (expiredLinks.changes || 0) + (oldHistory.changes || 0) + (staleData.changes || 0);
41
+ if (total > 0) {
42
+ console.log(`[trickle] Data retention: cleaned ${total} rows (${RETENTION_DAYS}d retention)`);
43
+ // Reclaim space
44
+ try { db.pragma("wal_checkpoint(TRUNCATE)"); } catch {}
45
+ }
46
+ } catch (err: any) {
47
+ console.error("[trickle] Data retention error:", err.message);
48
+ }
49
+ }
50
+
51
+ // Run retention on startup and periodically
52
+ setTimeout(runDataRetention, 10_000); // 10s after startup
53
+ setInterval(runDataRetention, CLEANUP_INTERVAL_MS);
@@ -94,7 +94,49 @@ router.post("/keys", (req: Request, res: Response) => {
94
94
  });
95
95
  });
96
96
 
97
- // ── POST /api/v1/pushUpload project data ──
97
+ // ── POST /api/v1/ingestReal-time streaming ingest ──
98
+ // Accepts batched observations and appends to project data files.
99
+ // This enables `trickle run` to stream data to the cloud in real-time.
100
+
101
+ router.post("/ingest", requireAuth, (req: AuthedRequest, res: Response) => {
102
+ const { project, file, lines } = req.body;
103
+
104
+ if (!project || !file || !lines) {
105
+ res.status(400).json({ error: "project, file, and lines required" });
106
+ return;
107
+ }
108
+
109
+ const projectId = `${req.keyId}:${project}`;
110
+
111
+ // Auto-create project
112
+ db.prepare(`
113
+ INSERT INTO projects (id, name, owner_key_id, updated_at)
114
+ VALUES (?, ?, ?, datetime('now'))
115
+ ON CONFLICT(id) DO UPDATE SET updated_at = datetime('now')
116
+ `).run(projectId, project, req.keyId);
117
+
118
+ // Append to existing content (or create new)
119
+ const existing = db.prepare(
120
+ "SELECT content FROM project_data WHERE project_id = ? AND filename = ?"
121
+ ).get(projectId, file) as any;
122
+
123
+ const newContent = typeof lines === "string" ? lines : (lines as string[]).join("\n") + "\n";
124
+ const content = existing ? existing.content + newContent : newContent;
125
+ const bytes = Buffer.byteLength(content, "utf-8");
126
+
127
+ db.prepare(`
128
+ INSERT INTO project_data (project_id, filename, content, size_bytes, pushed_at)
129
+ VALUES (?, ?, ?, ?, datetime('now'))
130
+ ON CONFLICT(project_id, filename) DO UPDATE SET
131
+ content = excluded.content,
132
+ size_bytes = excluded.size_bytes,
133
+ pushed_at = datetime('now')
134
+ `).run(projectId, file, content, bytes);
135
+
136
+ res.json({ ok: true, file, bytes });
137
+ });
138
+
139
+ // ── POST /api/v1/push — Upload project data (full replace) ──
98
140
 
99
141
  router.post("/push", requireAuth, (req: AuthedRequest, res: Response) => {
100
142
  const { project, files, timestamp } = req.body;
package/src/server.ts CHANGED
@@ -1,4 +1,4 @@
1
- import express from "express";
1
+ import express, { Request, Response, NextFunction } from "express";
2
2
  import cors from "cors";
3
3
 
4
4
  import ingestRouter from "./routes/ingest";
@@ -17,8 +17,67 @@ import cloudRouter from "./routes/cloud";
17
17
 
18
18
  const app = express();
19
19
 
20
- app.use(cors());
21
- app.use(express.json({ limit: "5mb" }));
20
+ // ── Production middleware ──
21
+
22
+ // CORS — allow all origins for local dev, restrict in production
23
+ const allowedOrigins = process.env.TRICKLE_CORS_ORIGINS?.split(",") || [];
24
+ app.use(cors(allowedOrigins.length > 0 ? { origin: allowedOrigins } : {}));
25
+
26
+ // Body size limits
27
+ app.use(express.json({ limit: "10mb" }));
28
+
29
+ // Rate limiting — simple in-memory token bucket per IP
30
+ const rateLimits = new Map<string, { count: number; resetAt: number }>();
31
+ const RATE_LIMIT_WINDOW_MS = 60_000; // 1 minute
32
+ const RATE_LIMIT_MAX = parseInt(process.env.TRICKLE_RATE_LIMIT || "300", 10); // 300 req/min default
33
+
34
+ function rateLimit(req: Request, res: Response, next: NextFunction): void {
35
+ if (process.env.NODE_ENV !== "production" && !process.env.TRICKLE_RATE_LIMIT) {
36
+ return next(); // Skip rate limiting in dev unless explicitly enabled
37
+ }
38
+
39
+ const ip = req.ip || req.socket.remoteAddress || "unknown";
40
+ const now = Date.now();
41
+ let bucket = rateLimits.get(ip);
42
+
43
+ if (!bucket || now > bucket.resetAt) {
44
+ bucket = { count: 0, resetAt: now + RATE_LIMIT_WINDOW_MS };
45
+ rateLimits.set(ip, bucket);
46
+ }
47
+
48
+ bucket.count++;
49
+ if (bucket.count > RATE_LIMIT_MAX) {
50
+ res.status(429).json({ error: "Rate limit exceeded. Try again later." });
51
+ return;
52
+ }
53
+
54
+ // Periodic cleanup of old entries
55
+ if (rateLimits.size > 10000) {
56
+ for (const [key, val] of rateLimits) {
57
+ if (now > val.resetAt) rateLimits.delete(key);
58
+ }
59
+ }
60
+
61
+ next();
62
+ }
63
+
64
+ app.use("/api/v1", rateLimit);
65
+
66
+ // Request logging in production
67
+ if (process.env.NODE_ENV === "production") {
68
+ app.use((req: Request, _res: Response, next: NextFunction) => {
69
+ const start = Date.now();
70
+ _res.on("finish", () => {
71
+ const ms = Date.now() - start;
72
+ if (ms > 1000 || _res.statusCode >= 400) {
73
+ console.log(`${req.method} ${req.path} ${_res.statusCode} ${ms}ms`);
74
+ }
75
+ });
76
+ next();
77
+ });
78
+ }
79
+
80
+ // ── Routes ──
22
81
 
23
82
  app.use("/api/ingest", ingestRouter);
24
83
  app.use("/api/functions", functionsRouter);
@@ -36,7 +95,13 @@ app.use("/api/v1", cloudRouter);
36
95
 
37
96
  // Health check
38
97
  app.get("/api/health", (_req, res) => {
39
- res.json({ ok: true, timestamp: new Date().toISOString() });
98
+ res.json({ ok: true, timestamp: new Date().toISOString(), version: process.env.npm_package_version || "dev" });
99
+ });
100
+
101
+ // Global error handler
102
+ app.use((err: Error, _req: Request, res: Response, _next: NextFunction) => {
103
+ console.error("[trickle] Unhandled error:", err.message);
104
+ res.status(500).json({ error: "Internal server error" });
40
105
  });
41
106
 
42
107
  export { app };