spindb 0.32.2 → 0.34.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +8 -0
- package/README.md +112 -855
- package/cli/commands/connect.ts +99 -0
- package/cli/commands/create.ts +5 -1
- package/cli/commands/engines.ts +78 -1
- package/cli/commands/menu/backup-handlers.ts +9 -0
- package/cli/commands/menu/container-handlers.ts +41 -12
- package/cli/commands/menu/engine-handlers.ts +4 -0
- package/cli/commands/menu/index.ts +72 -1
- package/cli/commands/menu/settings-handlers.ts +3 -0
- package/cli/commands/menu/shell-handlers.ts +592 -12
- package/cli/commands/ports.ts +211 -0
- package/cli/constants.ts +7 -3
- package/cli/helpers.ts +73 -0
- package/cli/index.ts +2 -0
- package/cli/ui/prompts.ts +4 -2
- package/config/backup-formats.ts +14 -0
- package/config/engine-defaults.ts +13 -0
- package/config/engines.json +17 -0
- package/core/config-manager.ts +18 -0
- package/core/dblab-utils.ts +113 -0
- package/core/dependency-manager.ts +6 -0
- package/core/docker-exporter.ts +13 -0
- package/core/pgweb-utils.ts +62 -0
- package/engines/base-engine.ts +9 -0
- package/engines/cockroachdb/index.ts +3 -0
- package/engines/ferretdb/index.ts +46 -27
- package/engines/index.ts +4 -0
- package/engines/influxdb/README.md +180 -0
- package/engines/influxdb/api-client.ts +64 -0
- package/engines/influxdb/backup.ts +160 -0
- package/engines/influxdb/binary-manager.ts +110 -0
- package/engines/influxdb/binary-urls.ts +69 -0
- package/engines/influxdb/hostdb-releases.ts +23 -0
- package/engines/influxdb/index.ts +1227 -0
- package/engines/influxdb/restore.ts +417 -0
- package/engines/influxdb/version-maps.ts +75 -0
- package/engines/influxdb/version-validator.ts +128 -0
- package/engines/postgresql/index.ts +3 -0
- package/package.json +2 -1
- package/types/index.ts +17 -0
package/core/docker-exporter.ts
CHANGED
|
@@ -71,6 +71,7 @@ function getEngineDisplayName(engine: Engine): string {
|
|
|
71
71
|
[Engine.SurrealDB]: 'SurrealDB',
|
|
72
72
|
[Engine.QuestDB]: 'QuestDB',
|
|
73
73
|
[Engine.TypeDB]: 'TypeDB',
|
|
74
|
+
[Engine.InfluxDB]: 'InfluxDB',
|
|
74
75
|
}
|
|
75
76
|
return displayNames[engine] || engine
|
|
76
77
|
}
|
|
@@ -141,6 +142,9 @@ const _ENGINE_BINARY_CONFIG: Record<
|
|
|
141
142
|
[Engine.TypeDB]: {
|
|
142
143
|
primaryBinaries: ['typedb', 'typedb_console_bin'],
|
|
143
144
|
},
|
|
145
|
+
[Engine.InfluxDB]: {
|
|
146
|
+
primaryBinaries: [], // REST API only, no CLI tools
|
|
147
|
+
},
|
|
144
148
|
}
|
|
145
149
|
|
|
146
150
|
/**
|
|
@@ -193,6 +197,7 @@ function getConnectionStringTemplate(
|
|
|
193
197
|
return useTLS ? `https://<host>:${port}` : `http://<host>:${port}`
|
|
194
198
|
|
|
195
199
|
case Engine.Meilisearch:
|
|
200
|
+
case Engine.InfluxDB:
|
|
196
201
|
return useTLS ? `https://<host>:${port}` : `http://<host>:${port}`
|
|
197
202
|
|
|
198
203
|
case Engine.CouchDB:
|
|
@@ -492,6 +497,13 @@ echo "User configured via server settings"
|
|
|
492
497
|
userCreationCommands = `
|
|
493
498
|
# API key is configured at server start
|
|
494
499
|
echo "API key configured via server settings"
|
|
500
|
+
`
|
|
501
|
+
break
|
|
502
|
+
|
|
503
|
+
case Engine.InfluxDB:
|
|
504
|
+
userCreationCommands = `
|
|
505
|
+
# InfluxDB 3.x local dev runs without authentication
|
|
506
|
+
echo "No authentication required for local InfluxDB 3.x"
|
|
495
507
|
`
|
|
496
508
|
break
|
|
497
509
|
|
|
@@ -1284,6 +1296,7 @@ export async function getDockerConnectionString(
|
|
|
1284
1296
|
return `http://${host}:${port}`
|
|
1285
1297
|
|
|
1286
1298
|
case Engine.Meilisearch:
|
|
1299
|
+
case Engine.InfluxDB:
|
|
1287
1300
|
return `http://${host}:${port}`
|
|
1288
1301
|
|
|
1289
1302
|
case Engine.CouchDB:
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import { existsSync } from 'fs'
|
|
2
|
+
import { readFile, unlink } from 'fs/promises'
|
|
3
|
+
import { join } from 'path'
|
|
4
|
+
import { platformService } from './platform-service'
|
|
5
|
+
import { paths } from '../config/paths'
|
|
6
|
+
|
|
7
|
+
/** Pinned pgweb version — single source of truth for download URL */
|
|
8
|
+
export const PGWEB_VERSION = '0.17.0'
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Check if pgweb is running for a container.
|
|
12
|
+
* Reads pgweb.pid/pgweb.port files and verifies the process is alive.
|
|
13
|
+
* Cleans up stale PID/port files if the process is dead.
|
|
14
|
+
*/
|
|
15
|
+
export async function getPgwebStatus(
|
|
16
|
+
containerName: string,
|
|
17
|
+
engine: string,
|
|
18
|
+
): Promise<{ running: boolean; port?: number; pid?: number }> {
|
|
19
|
+
const containerDir = paths.getContainerPath(containerName, { engine })
|
|
20
|
+
const pidFile = join(containerDir, 'pgweb.pid')
|
|
21
|
+
const portFile = join(containerDir, 'pgweb.port')
|
|
22
|
+
|
|
23
|
+
if (!existsSync(pidFile)) return { running: false }
|
|
24
|
+
|
|
25
|
+
try {
|
|
26
|
+
const pid = parseInt(await readFile(pidFile, 'utf8'), 10)
|
|
27
|
+
if (platformService.isProcessRunning(pid)) {
|
|
28
|
+
const port = parseInt(await readFile(portFile, 'utf8'), 10)
|
|
29
|
+
return { running: true, port, pid }
|
|
30
|
+
}
|
|
31
|
+
} catch {
|
|
32
|
+
// PID file invalid or process dead
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// Clean up stale files
|
|
36
|
+
await unlink(pidFile).catch(() => {})
|
|
37
|
+
await unlink(portFile).catch(() => {})
|
|
38
|
+
return { running: false }
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Stop a running pgweb process for a container (no UI output).
|
|
43
|
+
* Returns true if a process was stopped, false if nothing was running.
|
|
44
|
+
*/
|
|
45
|
+
export async function stopPgweb(
|
|
46
|
+
containerName: string,
|
|
47
|
+
engine: string,
|
|
48
|
+
): Promise<boolean> {
|
|
49
|
+
const status = await getPgwebStatus(containerName, engine)
|
|
50
|
+
if (!status.running || !status.pid) return false
|
|
51
|
+
|
|
52
|
+
try {
|
|
53
|
+
await platformService.terminateProcess(status.pid, false)
|
|
54
|
+
} catch {
|
|
55
|
+
// Already gone
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const containerDir = paths.getContainerPath(containerName, { engine })
|
|
59
|
+
await unlink(join(containerDir, 'pgweb.pid')).catch(() => {})
|
|
60
|
+
await unlink(join(containerDir, 'pgweb.port')).catch(() => {})
|
|
61
|
+
return true
|
|
62
|
+
}
|
package/engines/base-engine.ts
CHANGED
|
@@ -13,6 +13,7 @@ import type {
|
|
|
13
13
|
UserCredentials,
|
|
14
14
|
} from '../types'
|
|
15
15
|
import { UnsupportedOperationError } from '../core/error-handler'
|
|
16
|
+
import { stopPgweb } from '../core/pgweb-utils'
|
|
16
17
|
|
|
17
18
|
/**
|
|
18
19
|
* Base class for database engines
|
|
@@ -267,6 +268,14 @@ export abstract class BaseEngine {
|
|
|
267
268
|
// Default: no-op. Override in engines that support connection termination.
|
|
268
269
|
}
|
|
269
270
|
|
|
271
|
+
/**
|
|
272
|
+
* Stop pgweb if running for this container.
|
|
273
|
+
* Called from stop() in engines that support pgweb (PostgreSQL, CockroachDB, FerretDB).
|
|
274
|
+
*/
|
|
275
|
+
protected async stopPgweb(containerName: string): Promise<void> {
|
|
276
|
+
await stopPgweb(containerName, this.name)
|
|
277
|
+
}
|
|
278
|
+
|
|
270
279
|
/**
|
|
271
280
|
* Execute a query and return results in a structured format.
|
|
272
281
|
* @param container - The container configuration
|
|
@@ -622,41 +622,57 @@ export class FerretDBEngine extends BaseEngine {
|
|
|
622
622
|
let ferretStarted = false
|
|
623
623
|
|
|
624
624
|
try {
|
|
625
|
-
// 1. Start PostgreSQL
|
|
625
|
+
// 1. Start PostgreSQL (skip if already running)
|
|
626
626
|
onProgress?.({
|
|
627
627
|
stage: 'starting',
|
|
628
628
|
message: 'Starting PostgreSQL backend...',
|
|
629
629
|
})
|
|
630
630
|
|
|
631
|
-
//
|
|
632
|
-
|
|
631
|
+
// Check if PostgreSQL backend is already running in this data dir
|
|
632
|
+
let pgAlreadyRunning = false
|
|
633
633
|
try {
|
|
634
|
-
await spawnAsync(
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
)
|
|
648
|
-
} catch (pgError) {
|
|
649
|
-
// Read PostgreSQL log for debugging
|
|
650
|
-
let pgLog = ''
|
|
634
|
+
await spawnAsync(pgCtl, ['status', '-D', pgDataDir], {
|
|
635
|
+
env: pgSpawnEnv,
|
|
636
|
+
timeout: 5000,
|
|
637
|
+
})
|
|
638
|
+
// pg_ctl status exits 0 if server is running
|
|
639
|
+
pgAlreadyRunning = true
|
|
640
|
+
logDebug('PostgreSQL backend already running, skipping start')
|
|
641
|
+
} catch {
|
|
642
|
+
// Exit code != 0 means not running — proceed to start
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
if (!pgAlreadyRunning) {
|
|
646
|
+
// Use pg_ctl to start PostgreSQL
|
|
647
|
+
// Add 60s timeout to prevent hanging if PostgreSQL fails to start (especially on Windows)
|
|
651
648
|
try {
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
649
|
+
await spawnAsync(
|
|
650
|
+
pgCtl,
|
|
651
|
+
[
|
|
652
|
+
'start',
|
|
653
|
+
'-D',
|
|
654
|
+
pgDataDir,
|
|
655
|
+
'-l',
|
|
656
|
+
pgLogFile,
|
|
657
|
+
'-o',
|
|
658
|
+
`-p ${backendPort} -h 127.0.0.1`,
|
|
659
|
+
'-w', // Wait for startup
|
|
660
|
+
],
|
|
661
|
+
{ env: pgSpawnEnv, timeout: 60000 },
|
|
662
|
+
)
|
|
663
|
+
} catch (pgError) {
|
|
664
|
+
// Read PostgreSQL log for debugging
|
|
665
|
+
let pgLog = ''
|
|
666
|
+
try {
|
|
667
|
+
pgLog = await readFile(pgLogFile, 'utf8')
|
|
668
|
+
} catch {
|
|
669
|
+
pgLog = '(no log available)'
|
|
670
|
+
}
|
|
671
|
+
throw new Error(
|
|
672
|
+
`PostgreSQL backend failed to start: ${pgError instanceof Error ? pgError.message : pgError}\n` +
|
|
673
|
+
`PostgreSQL log:\n${pgLog.slice(-2000)}`, // Last 2KB of log
|
|
674
|
+
)
|
|
655
675
|
}
|
|
656
|
-
throw new Error(
|
|
657
|
-
`PostgreSQL backend failed to start: ${pgError instanceof Error ? pgError.message : pgError}\n` +
|
|
658
|
-
`PostgreSQL log:\n${pgLog.slice(-2000)}`, // Last 2KB of log
|
|
659
|
-
)
|
|
660
676
|
}
|
|
661
677
|
|
|
662
678
|
pgStarted = true
|
|
@@ -880,6 +896,9 @@ export class FerretDBEngine extends BaseEngine {
|
|
|
880
896
|
await this.stopPostgreSQLProcess(pgCtl, pgDataDir, pgSpawnEnv)
|
|
881
897
|
}
|
|
882
898
|
|
|
899
|
+
// Kill pgweb if running for this container
|
|
900
|
+
await this.stopPgweb(name)
|
|
901
|
+
|
|
883
902
|
logDebug('FerretDB stopped')
|
|
884
903
|
}
|
|
885
904
|
|
package/engines/index.ts
CHANGED
|
@@ -15,6 +15,7 @@ import { cockroachdbEngine } from './cockroachdb'
|
|
|
15
15
|
import { surrealdbEngine } from './surrealdb'
|
|
16
16
|
import { questdbEngine } from './questdb'
|
|
17
17
|
import { typedbEngine } from './typedb'
|
|
18
|
+
import { influxdbEngine } from './influxdb'
|
|
18
19
|
import { platformService } from '../core/platform-service'
|
|
19
20
|
import { Engine, Platform } from '../types'
|
|
20
21
|
import type { BaseEngine } from './base-engine'
|
|
@@ -80,6 +81,9 @@ export const engines: Record<string, BaseEngine> = {
|
|
|
80
81
|
// TypeDB and aliases
|
|
81
82
|
[Engine.TypeDB]: typedbEngine,
|
|
82
83
|
tdb: typedbEngine,
|
|
84
|
+
// InfluxDB and aliases
|
|
85
|
+
[Engine.InfluxDB]: influxdbEngine,
|
|
86
|
+
influx: influxdbEngine,
|
|
83
87
|
}
|
|
84
88
|
|
|
85
89
|
// Get an engine by name
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
# InfluxDB Engine Implementation
|
|
2
|
+
|
|
3
|
+
## Overview
|
|
4
|
+
|
|
5
|
+
InfluxDB 3.x is a time-series database rewritten in Rust. It uses a REST API for all operations (no CLI client). InfluxDB 3.x supports SQL queries via its HTTP API, unlike earlier versions which used InfluxQL/Flux.
|
|
6
|
+
|
|
7
|
+
## Platform Support
|
|
8
|
+
|
|
9
|
+
| Platform | Architecture | Status | Notes |
|
|
10
|
+
|----------|--------------|--------|-------|
|
|
11
|
+
| darwin | x64 | Supported | Uses hostdb binaries |
|
|
12
|
+
| darwin | arm64 | Supported | Uses hostdb binaries (Apple Silicon) |
|
|
13
|
+
| linux | x64 | Supported | Uses hostdb binaries |
|
|
14
|
+
| linux | arm64 | Supported | Uses hostdb binaries |
|
|
15
|
+
| win32 | x64 | Supported | Uses hostdb binaries |
|
|
16
|
+
|
|
17
|
+
## Binary Packaging
|
|
18
|
+
|
|
19
|
+
### Archive Format
|
|
20
|
+
- **Unix (macOS/Linux)**: `tar.gz`
|
|
21
|
+
- **Windows**: `zip`
|
|
22
|
+
|
|
23
|
+
### Archive Structure
|
|
24
|
+
```text
|
|
25
|
+
influxdb/
|
|
26
|
+
├── influxdb3 # Server binary
|
|
27
|
+
├── python/ # Bundled Python runtime
|
|
28
|
+
│ └── lib/
|
|
29
|
+
│ └── libpython3.13.dylib
|
|
30
|
+
├── LICENSE-APACHE
|
|
31
|
+
└── LICENSE-MIT
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
### Binary + Python Runtime
|
|
35
|
+
InfluxDB 3.x ships as a single `influxdb3` binary that acts as the server, bundled with a Python runtime. The binary uses `@executable_path/python/lib/libpython3.13.dylib`, so the `python/` directory must be co-located with the binary. The custom `moveExtractedEntries` override ensures both end up in `bin/`. There is no separate CLI client — all interactions use the REST API.
|
|
36
|
+
|
|
37
|
+
### Version Map Sync
|
|
38
|
+
|
|
39
|
+
```typescript
|
|
40
|
+
export const INFLUXDB_VERSION_MAP: Record<string, string> = {
|
|
41
|
+
'3': '3.8.0',
|
|
42
|
+
}
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Implementation Details
|
|
46
|
+
|
|
47
|
+
### Binary Manager
|
|
48
|
+
|
|
49
|
+
InfluxDB uses `BaseBinaryManager` since it's a server-based engine with single-digit major versions:
|
|
50
|
+
|
|
51
|
+
```typescript
|
|
52
|
+
class InfluxDBBinaryManager extends BaseBinaryManager {
|
|
53
|
+
protected readonly config = {
|
|
54
|
+
engine: Engine.InfluxDB,
|
|
55
|
+
engineName: 'influxdb',
|
|
56
|
+
displayName: 'InfluxDB',
|
|
57
|
+
serverBinary: 'influxdb3',
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### REST API Engine
|
|
63
|
+
|
|
64
|
+
InfluxDB is a **REST API engine**:
|
|
65
|
+
- `spindb run` is **NOT applicable** (scriptFileLabel is `null`)
|
|
66
|
+
- `spindb connect` opens the health endpoint info in terminal
|
|
67
|
+
- All operations use HTTP REST API
|
|
68
|
+
|
|
69
|
+
### Default Configuration
|
|
70
|
+
|
|
71
|
+
- **Default Port**: 8086
|
|
72
|
+
- **Health Endpoint**: `GET /health`
|
|
73
|
+
- **SQL Query Endpoint**: `POST /api/v3/query_sql`
|
|
74
|
+
- **Write Endpoint**: `POST /api/v3/write_lp`
|
|
75
|
+
- **No Authentication**: InfluxDB 3.x local dev has no auth by default
|
|
76
|
+
- **PID File**: `influxdb.pid` in container directory
|
|
77
|
+
|
|
78
|
+
### Database Creation
|
|
79
|
+
|
|
80
|
+
InfluxDB 3.x creates databases **implicitly on first write**. There is no explicit `CREATE DATABASE` command. When you write data with a database name, it's auto-created.
|
|
81
|
+
|
|
82
|
+
### Connection String Format
|
|
83
|
+
|
|
84
|
+
```text
|
|
85
|
+
http://127.0.0.1:{port}
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Backup & Restore
|
|
89
|
+
|
|
90
|
+
### Backup Formats
|
|
91
|
+
|
|
92
|
+
| Format | Extension | Method | Notes |
|
|
93
|
+
|--------|-----------|--------|-------|
|
|
94
|
+
| sql | `.sql` | REST API | SQL dump with CREATE TABLE + INSERT statements |
|
|
95
|
+
|
|
96
|
+
### Backup Method
|
|
97
|
+
|
|
98
|
+
Uses InfluxDB's SQL query API to export data:
|
|
99
|
+
1. `SHOW TABLES` — lists all tables/measurements
|
|
100
|
+
2. `SELECT * FROM {table}` — exports all data per table
|
|
101
|
+
3. Generates SQL INSERT statements for restore
|
|
102
|
+
|
|
103
|
+
### Restore Method
|
|
104
|
+
|
|
105
|
+
Parses SQL dump file and executes statements via `POST /api/v3/query_sql`.
|
|
106
|
+
|
|
107
|
+
## Integration Test Notes
|
|
108
|
+
|
|
109
|
+
### REST API Testing
|
|
110
|
+
|
|
111
|
+
Integration tests use `fetch()` to interact with InfluxDB REST API.
|
|
112
|
+
|
|
113
|
+
### Test Fixtures
|
|
114
|
+
|
|
115
|
+
Located in `tests/fixtures/influxdb/seeds/`:
|
|
116
|
+
- `README.md` documenting the API-based approach
|
|
117
|
+
|
|
118
|
+
## Known Issues & Gotchas
|
|
119
|
+
|
|
120
|
+
### 1. No CLI Client
|
|
121
|
+
|
|
122
|
+
InfluxDB 3.x has no bundled CLI client. All operations use the HTTP REST API. The `clientTools` array in engine-defaults is empty.
|
|
123
|
+
|
|
124
|
+
### 2. Implicit Database Creation
|
|
125
|
+
|
|
126
|
+
Databases are created on first write, not via explicit commands. `createDatabase()` verifies server health but doesn't create anything.
|
|
127
|
+
|
|
128
|
+
### 3. SQL Query Support
|
|
129
|
+
|
|
130
|
+
InfluxDB 3.x supports SQL queries (not InfluxQL or Flux from v1/v2). Query via:
|
|
131
|
+
```bash
|
|
132
|
+
curl -X POST http://localhost:8086/api/v3/query_sql \
|
|
133
|
+
-H "Content-Type: application/json" \
|
|
134
|
+
-d '{"db":"mydb","q":"SELECT * FROM measurement","format":"json"}'
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### 4. Write via Line Protocol
|
|
138
|
+
|
|
139
|
+
Data writes use InfluxDB line protocol format:
|
|
140
|
+
```bash
|
|
141
|
+
curl -X POST "http://localhost:8086/api/v3/write_lp?db=mydb" \
|
|
142
|
+
-H "Content-Type: text/plain" \
|
|
143
|
+
-d 'measurement,tag=value field=123'
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
### 5. Windows PID Handling
|
|
147
|
+
|
|
148
|
+
On Windows, uses `platformService.findProcessByPort(port)` after startup to find the real PID, similar to QuestDB/TypeDB pattern.
|
|
149
|
+
|
|
150
|
+
## REST API Quick Reference
|
|
151
|
+
|
|
152
|
+
### Health
|
|
153
|
+
```bash
|
|
154
|
+
GET /health
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
### Query (SQL)
|
|
158
|
+
```bash
|
|
159
|
+
POST /api/v3/query_sql
|
|
160
|
+
Content-Type: application/json
|
|
161
|
+
{"db":"mydb","q":"SELECT 1","format":"json"}
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### Write (Line Protocol)
|
|
165
|
+
```bash
|
|
166
|
+
POST /api/v3/write_lp?db=mydb
|
|
167
|
+
Content-Type: text/plain
|
|
168
|
+
measurement,tag=value field=123
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
### Show Tables
|
|
172
|
+
```bash
|
|
173
|
+
POST /api/v3/query_sql
|
|
174
|
+
{"db":"mydb","q":"SHOW TABLES","format":"json"}
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
### List Databases
|
|
178
|
+
```bash
|
|
179
|
+
GET /api/v3/configure/database?format=json
|
|
180
|
+
```
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared InfluxDB REST API client utilities
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Make an HTTP request to InfluxDB REST API
|
|
7
|
+
*
|
|
8
|
+
* @param port - The HTTP port InfluxDB is listening on
|
|
9
|
+
* @param method - HTTP method (GET, POST, PUT, DELETE)
|
|
10
|
+
* @param path - API path (e.g., '/health', '/api/v3/query_sql')
|
|
11
|
+
* @param body - Optional body: object for JSON, string for text/plain (line protocol)
|
|
12
|
+
* @param timeoutMs - Request timeout in milliseconds (default: 30s)
|
|
13
|
+
*/
|
|
14
|
+
export async function influxdbApiRequest(
|
|
15
|
+
port: number,
|
|
16
|
+
method: string,
|
|
17
|
+
path: string,
|
|
18
|
+
body?: Record<string, unknown> | string,
|
|
19
|
+
timeoutMs = 30000,
|
|
20
|
+
): Promise<{ status: number; data: unknown }> {
|
|
21
|
+
const url = `http://127.0.0.1:${port}${path}`
|
|
22
|
+
|
|
23
|
+
const controller = new AbortController()
|
|
24
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs)
|
|
25
|
+
|
|
26
|
+
const options: RequestInit = {
|
|
27
|
+
method,
|
|
28
|
+
signal: controller.signal,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
if (body !== undefined) {
|
|
32
|
+
if (typeof body === 'string') {
|
|
33
|
+
options.headers = { 'Content-Type': 'text/plain' }
|
|
34
|
+
options.body = body
|
|
35
|
+
} else {
|
|
36
|
+
options.headers = { 'Content-Type': 'application/json' }
|
|
37
|
+
options.body = JSON.stringify(body)
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
try {
|
|
42
|
+
const response = await fetch(url, options)
|
|
43
|
+
|
|
44
|
+
// Try to parse as JSON, fall back to text for endpoints like /health
|
|
45
|
+
let data: unknown
|
|
46
|
+
const contentType = response.headers.get('content-type') || ''
|
|
47
|
+
if (contentType.includes('application/json')) {
|
|
48
|
+
data = await response.json()
|
|
49
|
+
} else {
|
|
50
|
+
data = await response.text()
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return { status: response.status, data }
|
|
54
|
+
} catch (error) {
|
|
55
|
+
if (error instanceof Error && error.name === 'AbortError') {
|
|
56
|
+
throw new Error(
|
|
57
|
+
`InfluxDB API request timed out after ${timeoutMs / 1000}s: ${method} ${path}`,
|
|
58
|
+
)
|
|
59
|
+
}
|
|
60
|
+
throw error
|
|
61
|
+
} finally {
|
|
62
|
+
clearTimeout(timeoutId)
|
|
63
|
+
}
|
|
64
|
+
}
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* InfluxDB backup module
|
|
3
|
+
* Supports SQL-based backup using InfluxDB's REST API to export data
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { mkdir, stat, writeFile } from 'fs/promises'
|
|
7
|
+
import { existsSync } from 'fs'
|
|
8
|
+
import { dirname } from 'path'
|
|
9
|
+
import { logDebug } from '../../core/error-handler'
|
|
10
|
+
import { influxdbApiRequest } from './api-client'
|
|
11
|
+
import type { ContainerConfig, BackupOptions, BackupResult } from '../../types'
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Create an SQL backup using InfluxDB's REST API
|
|
15
|
+
* Queries all tables and exports data as SQL INSERT statements
|
|
16
|
+
*/
|
|
17
|
+
export async function createBackup(
|
|
18
|
+
container: ContainerConfig,
|
|
19
|
+
outputPath: string,
|
|
20
|
+
options: BackupOptions,
|
|
21
|
+
): Promise<BackupResult> {
|
|
22
|
+
const { port } = container
|
|
23
|
+
const database = options.database || container.database
|
|
24
|
+
|
|
25
|
+
// Ensure output directory exists
|
|
26
|
+
const outputDir = dirname(outputPath)
|
|
27
|
+
if (!existsSync(outputDir)) {
|
|
28
|
+
await mkdir(outputDir, { recursive: true })
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
logDebug(
|
|
32
|
+
`Creating InfluxDB SQL backup via REST API on port ${port} for database "${database}"`,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
// Get list of tables in the database
|
|
36
|
+
const tablesResponse = await influxdbApiRequest(
|
|
37
|
+
port,
|
|
38
|
+
'POST',
|
|
39
|
+
'/api/v3/query_sql',
|
|
40
|
+
{
|
|
41
|
+
db: database,
|
|
42
|
+
q: 'SHOW TABLES',
|
|
43
|
+
format: 'json',
|
|
44
|
+
},
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
if (tablesResponse.status !== 200) {
|
|
48
|
+
throw new Error(
|
|
49
|
+
`Failed to list tables: ${JSON.stringify(tablesResponse.data)}`,
|
|
50
|
+
)
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const tablesData = tablesResponse.data as Array<Record<string, unknown>>
|
|
54
|
+
const tables: string[] = []
|
|
55
|
+
|
|
56
|
+
// Extract user table names: include rows with 'iox' schema or no schema field,
|
|
57
|
+
// skip system schemas (information_schema, system, etc.)
|
|
58
|
+
if (Array.isArray(tablesData)) {
|
|
59
|
+
for (const row of tablesData) {
|
|
60
|
+
const schema = row.table_schema as string | undefined
|
|
61
|
+
if (schema && schema !== 'iox') continue
|
|
62
|
+
const tableName =
|
|
63
|
+
(row.table_name as string) ||
|
|
64
|
+
(row.name as string) ||
|
|
65
|
+
(Object.values(row)[0] as string)
|
|
66
|
+
if (tableName) {
|
|
67
|
+
tables.push(tableName)
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
logDebug(`Found ${tables.length} tables: ${tables.join(', ')}`)
|
|
73
|
+
|
|
74
|
+
// Build SQL dump
|
|
75
|
+
let sqlContent = `-- InfluxDB SQL Backup\n`
|
|
76
|
+
sqlContent += `-- Database: ${database}\n`
|
|
77
|
+
sqlContent += `-- Created: ${new Date().toISOString()}\n\n`
|
|
78
|
+
|
|
79
|
+
for (const table of tables) {
|
|
80
|
+
logDebug(`Exporting table: ${table}`)
|
|
81
|
+
|
|
82
|
+
// Query column metadata to identify tag columns
|
|
83
|
+
// Tags use Dictionary(Int32, Utf8) type in InfluxDB 3.x
|
|
84
|
+
const tagColumns: string[] = []
|
|
85
|
+
try {
|
|
86
|
+
const colResponse = await influxdbApiRequest(
|
|
87
|
+
port,
|
|
88
|
+
'POST',
|
|
89
|
+
'/api/v3/query_sql',
|
|
90
|
+
{
|
|
91
|
+
db: database,
|
|
92
|
+
q: `SELECT column_name, data_type FROM information_schema.columns WHERE table_name = '${table.replace(/'/g, "''")}'`,
|
|
93
|
+
format: 'json',
|
|
94
|
+
},
|
|
95
|
+
)
|
|
96
|
+
if (colResponse.status === 200 && Array.isArray(colResponse.data)) {
|
|
97
|
+
for (const col of colResponse.data as Array<Record<string, unknown>>) {
|
|
98
|
+
const dataType = String(col.data_type || '')
|
|
99
|
+
if (dataType.includes('Dictionary')) {
|
|
100
|
+
tagColumns.push(String(col.column_name))
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
} catch {
|
|
105
|
+
logDebug(`Warning: Could not query column metadata for ${table}`)
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Query all data from the table
|
|
109
|
+
const dataResponse = await influxdbApiRequest(
|
|
110
|
+
port,
|
|
111
|
+
'POST',
|
|
112
|
+
'/api/v3/query_sql',
|
|
113
|
+
{
|
|
114
|
+
db: database,
|
|
115
|
+
q: `SELECT * FROM "${table.replace(/"/g, '""')}"`,
|
|
116
|
+
format: 'json',
|
|
117
|
+
},
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
if (dataResponse.status !== 200) {
|
|
121
|
+
logDebug(
|
|
122
|
+
`Warning: Failed to export table ${table}: ${JSON.stringify(dataResponse.data)}`,
|
|
123
|
+
)
|
|
124
|
+
continue
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
const rows = dataResponse.data as Array<Record<string, unknown>>
|
|
128
|
+
|
|
129
|
+
if (Array.isArray(rows) && rows.length > 0) {
|
|
130
|
+
sqlContent += `-- Table: ${table}\n`
|
|
131
|
+
if (tagColumns.length > 0) {
|
|
132
|
+
sqlContent += `-- Tags: ${tagColumns.join(', ')}\n`
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
for (const row of rows) {
|
|
136
|
+
const columns = Object.keys(row)
|
|
137
|
+
const values = columns.map((col) => {
|
|
138
|
+
const val = row[col]
|
|
139
|
+
if (val === null || val === undefined) return 'NULL'
|
|
140
|
+
if (typeof val === 'number') return String(val)
|
|
141
|
+
if (typeof val === 'boolean') return val ? 'true' : 'false'
|
|
142
|
+
return `'${String(val).replace(/'/g, "''")}'`
|
|
143
|
+
})
|
|
144
|
+
sqlContent += `INSERT INTO "${table.replace(/"/g, '""')}" (${columns.map((c) => `"${c.replace(/"/g, '""')}"`).join(', ')}) VALUES (${values.join(', ')});\n`
|
|
145
|
+
}
|
|
146
|
+
sqlContent += '\n'
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Write SQL content to file
|
|
151
|
+
await writeFile(outputPath, sqlContent, 'utf-8')
|
|
152
|
+
|
|
153
|
+
const stats = await stat(outputPath)
|
|
154
|
+
|
|
155
|
+
return {
|
|
156
|
+
path: outputPath,
|
|
157
|
+
format: 'sql',
|
|
158
|
+
size: stats.size,
|
|
159
|
+
}
|
|
160
|
+
}
|