@push.rocks/smartdb 2.0.0 → 2.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.smartconfig.json +11 -0
- package/dist_rust/rustdb_linux_amd64 +0 -0
- package/dist_rust/rustdb_linux_arm64 +0 -0
- package/dist_ts/00_commitinfo_data.js +1 -1
- package/dist_ts/index.d.ts +2 -0
- package/dist_ts/index.js +3 -1
- package/dist_ts/ts_local/classes.localsmartdb.js +7 -4
- package/dist_ts/ts_migration/classes.storagemigrator.d.ts +24 -0
- package/dist_ts/ts_migration/classes.storagemigrator.js +75 -0
- package/dist_ts/ts_migration/index.d.ts +1 -0
- package/dist_ts/ts_migration/index.js +2 -0
- package/dist_ts/ts_migration/migrators/v0_to_v1.d.ts +9 -0
- package/dist_ts/ts_migration/migrators/v0_to_v1.js +225 -0
- package/dist_ts/ts_smartdb/index.d.ts +1 -0
- package/dist_ts/ts_smartdb/rust-db-bridge.d.ts +80 -1
- package/dist_ts/ts_smartdb/rust-db-bridge.js +17 -2
- package/dist_ts/ts_smartdb/server/SmartdbServer.d.ts +31 -0
- package/dist_ts/ts_smartdb/server/SmartdbServer.js +47 -5
- package/dist_ts_debugserver/bundled.d.ts +4 -0
- package/dist_ts_debugserver/bundled.js +12 -0
- package/dist_ts_debugserver/classes.debugserver.d.ts +36 -0
- package/dist_ts_debugserver/classes.debugserver.js +95 -0
- package/dist_ts_debugserver/index.d.ts +2 -0
- package/dist_ts_debugserver/index.js +2 -0
- package/dist_ts_debugserver/plugins.d.ts +2 -0
- package/dist_ts_debugserver/plugins.js +3 -0
- package/dist_ts_debugui/index.d.ts +2 -0
- package/dist_ts_debugui/index.js +2 -0
- package/dist_ts_debugui/plugins.d.ts +1 -0
- package/dist_ts_debugui/plugins.js +2 -0
- package/dist_ts_debugui/smartdb-debugui.d.ts +62 -0
- package/dist_ts_debugui/smartdb-debugui.js +1132 -0
- package/package.json +9 -4
- package/readme.md +161 -42
- package/ts/00_commitinfo_data.ts +1 -1
- package/ts/index.ts +14 -0
- package/ts/ts_local/classes.localsmartdb.ts +5 -0
- package/ts/ts_migration/classes.storagemigrator.ts +93 -0
- package/ts/ts_migration/index.ts +1 -0
- package/ts/ts_migration/migrators/v0_to_v1.ts +253 -0
- package/ts/ts_smartdb/index.ts +11 -0
- package/ts/ts_smartdb/rust-db-bridge.ts +127 -3
- package/ts/ts_smartdb/server/SmartdbServer.ts +71 -0
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
import * as fs from 'fs';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
import * as crypto from 'crypto';
|
|
4
|
+
import { BSON } from 'bson';
|
|
5
|
+
|
|
6
|
+
// ---------------------------------------------------------------------------
|
|
7
|
+
// Binary format constants (must match Rust: record.rs)
|
|
8
|
+
// ---------------------------------------------------------------------------
|
|
9
|
+
|
|
10
|
+
/** File-level magic: "SMARTDB\0" */
|
|
11
|
+
const FILE_MAGIC = Buffer.from('SMARTDB\0', 'ascii');
|
|
12
|
+
/** Current format version */
|
|
13
|
+
const FORMAT_VERSION = 1;
|
|
14
|
+
/** File type tags */
|
|
15
|
+
const FILE_TYPE_DATA = 1;
|
|
16
|
+
const FILE_TYPE_HINT = 3;
|
|
17
|
+
/** File header total size */
|
|
18
|
+
const FILE_HEADER_SIZE = 64;
|
|
19
|
+
/** Per-record magic */
|
|
20
|
+
const RECORD_MAGIC = 0xDB01;
|
|
21
|
+
/** Per-record header size */
|
|
22
|
+
const RECORD_HEADER_SIZE = 22; // 2 + 8 + 4 + 4 + 4
|
|
23
|
+
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
// Binary encoding helpers
|
|
26
|
+
// ---------------------------------------------------------------------------
|
|
27
|
+
|
|
28
|
+
function writeFileHeader(fileType: number): Buffer {
|
|
29
|
+
const buf = Buffer.alloc(FILE_HEADER_SIZE, 0);
|
|
30
|
+
FILE_MAGIC.copy(buf, 0);
|
|
31
|
+
buf.writeUInt16LE(FORMAT_VERSION, 8);
|
|
32
|
+
buf.writeUInt8(fileType, 10);
|
|
33
|
+
buf.writeUInt32LE(0, 11); // flags
|
|
34
|
+
const now = BigInt(Date.now());
|
|
35
|
+
buf.writeBigUInt64LE(now, 15);
|
|
36
|
+
// bytes 23..64 are reserved (zeros)
|
|
37
|
+
return buf;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function encodeDataRecord(timestamp: bigint, key: Buffer, value: Buffer): Buffer {
|
|
41
|
+
const keyLen = key.length;
|
|
42
|
+
const valLen = value.length;
|
|
43
|
+
const totalSize = RECORD_HEADER_SIZE + keyLen + valLen;
|
|
44
|
+
const buf = Buffer.alloc(totalSize);
|
|
45
|
+
|
|
46
|
+
// Write header fields (without CRC)
|
|
47
|
+
buf.writeUInt16LE(RECORD_MAGIC, 0);
|
|
48
|
+
buf.writeBigUInt64LE(timestamp, 2);
|
|
49
|
+
buf.writeUInt32LE(keyLen, 10);
|
|
50
|
+
buf.writeUInt32LE(valLen, 14);
|
|
51
|
+
// CRC placeholder at offset 18..22 (will fill below)
|
|
52
|
+
key.copy(buf, RECORD_HEADER_SIZE);
|
|
53
|
+
value.copy(buf, RECORD_HEADER_SIZE + keyLen);
|
|
54
|
+
|
|
55
|
+
// CRC32 covers everything except the CRC field itself:
|
|
56
|
+
// bytes [0..18] + bytes [22..]
|
|
57
|
+
const crc = crc32(Buffer.concat([
|
|
58
|
+
buf.subarray(0, 18),
|
|
59
|
+
buf.subarray(22),
|
|
60
|
+
]));
|
|
61
|
+
buf.writeUInt32LE(crc, 18);
|
|
62
|
+
|
|
63
|
+
return buf;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
function encodeHintEntry(key: string, offset: bigint, recordLen: number, valueLen: number, timestamp: bigint): Buffer {
|
|
67
|
+
const keyBuf = Buffer.from(key, 'utf-8');
|
|
68
|
+
const buf = Buffer.alloc(4 + keyBuf.length + 8 + 4 + 4 + 8);
|
|
69
|
+
let pos = 0;
|
|
70
|
+
buf.writeUInt32LE(keyBuf.length, pos); pos += 4;
|
|
71
|
+
keyBuf.copy(buf, pos); pos += keyBuf.length;
|
|
72
|
+
buf.writeBigUInt64LE(offset, pos); pos += 8;
|
|
73
|
+
buf.writeUInt32LE(recordLen, pos); pos += 4;
|
|
74
|
+
buf.writeUInt32LE(valueLen, pos); pos += 4;
|
|
75
|
+
buf.writeBigUInt64LE(timestamp, pos);
|
|
76
|
+
return buf;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// ---------------------------------------------------------------------------
|
|
80
|
+
// CRC32 (matching crc32fast in Rust)
|
|
81
|
+
// ---------------------------------------------------------------------------
|
|
82
|
+
|
|
83
|
+
const CRC32_TABLE = (() => {
|
|
84
|
+
const table = new Uint32Array(256);
|
|
85
|
+
for (let i = 0; i < 256; i++) {
|
|
86
|
+
let crc = i;
|
|
87
|
+
for (let j = 0; j < 8; j++) {
|
|
88
|
+
crc = (crc & 1) ? (0xEDB88320 ^ (crc >>> 1)) : (crc >>> 1);
|
|
89
|
+
}
|
|
90
|
+
table[i] = crc;
|
|
91
|
+
}
|
|
92
|
+
return table;
|
|
93
|
+
})();
|
|
94
|
+
|
|
95
|
+
function crc32(data: Buffer): number {
|
|
96
|
+
let crc = 0xFFFFFFFF;
|
|
97
|
+
for (let i = 0; i < data.length; i++) {
|
|
98
|
+
crc = CRC32_TABLE[(crc ^ data[i]) & 0xFF] ^ (crc >>> 8);
|
|
99
|
+
}
|
|
100
|
+
return (crc ^ 0xFFFFFFFF) >>> 0;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// ---------------------------------------------------------------------------
|
|
104
|
+
// Migration: v0 (JSON) → v1 (Bitcask binary)
|
|
105
|
+
// ---------------------------------------------------------------------------
|
|
106
|
+
|
|
107
|
+
interface IKeyDirEntry {
|
|
108
|
+
offset: bigint;
|
|
109
|
+
recordLen: number;
|
|
110
|
+
valueLen: number;
|
|
111
|
+
timestamp: bigint;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Migrate a storage directory from v0 (JSON-per-collection) to v1 (Bitcask binary).
|
|
116
|
+
*
|
|
117
|
+
* - Original .json files are NOT modified or deleted.
|
|
118
|
+
* - New v1 files are written into {db}/{coll}/ subdirectories.
|
|
119
|
+
* - Returns a list of old files that can be safely deleted.
|
|
120
|
+
* - On failure, cleans up any partial new files and throws.
|
|
121
|
+
*/
|
|
122
|
+
export async function migrateV0ToV1(storagePath: string): Promise<string[]> {
|
|
123
|
+
const deletableFiles: string[] = [];
|
|
124
|
+
const createdDirs: string[] = [];
|
|
125
|
+
|
|
126
|
+
try {
|
|
127
|
+
const dbEntries = fs.readdirSync(storagePath, { withFileTypes: true });
|
|
128
|
+
|
|
129
|
+
for (const dbEntry of dbEntries) {
|
|
130
|
+
if (!dbEntry.isDirectory()) continue;
|
|
131
|
+
|
|
132
|
+
const dbDir = path.join(storagePath, dbEntry.name);
|
|
133
|
+
const collFiles = fs.readdirSync(dbDir, { withFileTypes: true });
|
|
134
|
+
|
|
135
|
+
for (const collFile of collFiles) {
|
|
136
|
+
if (!collFile.isFile()) continue;
|
|
137
|
+
if (!collFile.name.endsWith('.json')) continue;
|
|
138
|
+
if (collFile.name.endsWith('.indexes.json')) continue;
|
|
139
|
+
|
|
140
|
+
const collName = collFile.name.replace(/\.json$/, '');
|
|
141
|
+
const jsonPath = path.join(dbDir, collFile.name);
|
|
142
|
+
const indexJsonPath = path.join(dbDir, `${collName}.indexes.json`);
|
|
143
|
+
|
|
144
|
+
// Target directory
|
|
145
|
+
const collDir = path.join(dbDir, collName);
|
|
146
|
+
if (fs.existsSync(collDir)) {
|
|
147
|
+
// Already migrated
|
|
148
|
+
continue;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
console.log(`[smartdb] Migrating ${dbEntry.name}.${collName}...`);
|
|
152
|
+
|
|
153
|
+
// Read the JSON collection
|
|
154
|
+
const jsonData = fs.readFileSync(jsonPath, 'utf-8');
|
|
155
|
+
const docs: any[] = JSON.parse(jsonData);
|
|
156
|
+
|
|
157
|
+
// Create collection directory
|
|
158
|
+
fs.mkdirSync(collDir, { recursive: true });
|
|
159
|
+
createdDirs.push(collDir);
|
|
160
|
+
|
|
161
|
+
// Write data.rdb
|
|
162
|
+
const dataPath = path.join(collDir, 'data.rdb');
|
|
163
|
+
const fd = fs.openSync(dataPath, 'w');
|
|
164
|
+
|
|
165
|
+
try {
|
|
166
|
+
// File header
|
|
167
|
+
const headerBuf = writeFileHeader(FILE_TYPE_DATA);
|
|
168
|
+
fs.writeSync(fd, headerBuf);
|
|
169
|
+
|
|
170
|
+
let currentOffset = BigInt(FILE_HEADER_SIZE);
|
|
171
|
+
const keydir: Map<string, IKeyDirEntry> = new Map();
|
|
172
|
+
const ts = BigInt(Date.now());
|
|
173
|
+
|
|
174
|
+
for (const doc of docs) {
|
|
175
|
+
// Extract _id
|
|
176
|
+
let idHex: string;
|
|
177
|
+
if (doc._id && doc._id.$oid) {
|
|
178
|
+
idHex = doc._id.$oid;
|
|
179
|
+
} else if (typeof doc._id === 'string') {
|
|
180
|
+
idHex = doc._id;
|
|
181
|
+
} else if (doc._id) {
|
|
182
|
+
idHex = String(doc._id);
|
|
183
|
+
} else {
|
|
184
|
+
// Generate a new ObjectId
|
|
185
|
+
idHex = crypto.randomBytes(12).toString('hex');
|
|
186
|
+
doc._id = { $oid: idHex };
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// Serialize to BSON
|
|
190
|
+
const bsonBytes = BSON.serialize(doc);
|
|
191
|
+
const keyBuf = Buffer.from(idHex, 'utf-8');
|
|
192
|
+
const valueBuf = Buffer.from(bsonBytes);
|
|
193
|
+
|
|
194
|
+
const record = encodeDataRecord(ts, keyBuf, valueBuf);
|
|
195
|
+
fs.writeSync(fd, record);
|
|
196
|
+
|
|
197
|
+
keydir.set(idHex, {
|
|
198
|
+
offset: currentOffset,
|
|
199
|
+
recordLen: record.length,
|
|
200
|
+
valueLen: valueBuf.length,
|
|
201
|
+
timestamp: ts,
|
|
202
|
+
});
|
|
203
|
+
|
|
204
|
+
currentOffset += BigInt(record.length);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
fs.fsyncSync(fd);
|
|
208
|
+
fs.closeSync(fd);
|
|
209
|
+
|
|
210
|
+
// Write keydir.hint
|
|
211
|
+
const hintPath = path.join(collDir, 'keydir.hint');
|
|
212
|
+
const hintFd = fs.openSync(hintPath, 'w');
|
|
213
|
+
fs.writeSync(hintFd, writeFileHeader(FILE_TYPE_HINT));
|
|
214
|
+
for (const [key, entry] of keydir) {
|
|
215
|
+
fs.writeSync(hintFd, encodeHintEntry(key, entry.offset, entry.recordLen, entry.valueLen, entry.timestamp));
|
|
216
|
+
}
|
|
217
|
+
fs.fsyncSync(hintFd);
|
|
218
|
+
fs.closeSync(hintFd);
|
|
219
|
+
|
|
220
|
+
} catch (writeErr) {
|
|
221
|
+
// Clean up on write failure
|
|
222
|
+
try { fs.closeSync(fd); } catch {}
|
|
223
|
+
throw writeErr;
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// Copy indexes.json if it exists
|
|
227
|
+
if (fs.existsSync(indexJsonPath)) {
|
|
228
|
+
const destIndexPath = path.join(collDir, 'indexes.json');
|
|
229
|
+
fs.copyFileSync(indexJsonPath, destIndexPath);
|
|
230
|
+
deletableFiles.push(indexJsonPath);
|
|
231
|
+
} else {
|
|
232
|
+
// Write default _id index
|
|
233
|
+
const destIndexPath = path.join(collDir, 'indexes.json');
|
|
234
|
+
fs.writeFileSync(destIndexPath, JSON.stringify([{ name: '_id_', key: { _id: 1 } }], null, 2));
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
deletableFiles.push(jsonPath);
|
|
238
|
+
|
|
239
|
+
console.log(`[smartdb] Migrated ${dbEntry.name}.${collName}: ${docs.length} documents`);
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
} catch (err) {
|
|
243
|
+
// Clean up any partially created directories
|
|
244
|
+
for (const dir of createdDirs) {
|
|
245
|
+
try {
|
|
246
|
+
fs.rmSync(dir, { recursive: true, force: true });
|
|
247
|
+
} catch {}
|
|
248
|
+
}
|
|
249
|
+
throw err;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
return deletableFiles;
|
|
253
|
+
}
|
package/ts/ts_smartdb/index.ts
CHANGED
|
@@ -6,3 +6,14 @@ export type { ISmartdbServerOptions } from './server/SmartdbServer.js';
|
|
|
6
6
|
|
|
7
7
|
// Export bridge for advanced usage
|
|
8
8
|
export { RustDbBridge } from './rust-db-bridge.js';
|
|
9
|
+
|
|
10
|
+
// Export oplog / debug types
|
|
11
|
+
export type {
|
|
12
|
+
IOpLogEntry,
|
|
13
|
+
IOpLogResult,
|
|
14
|
+
IOpLogStats,
|
|
15
|
+
IRevertResult,
|
|
16
|
+
ICollectionInfo,
|
|
17
|
+
IDocumentsResult,
|
|
18
|
+
ISmartDbMetrics,
|
|
19
|
+
} from './rust-db-bridge.js';
|
|
@@ -3,6 +3,82 @@ import * as path from 'path';
|
|
|
3
3
|
import * as url from 'url';
|
|
4
4
|
import { EventEmitter } from 'events';
|
|
5
5
|
|
|
6
|
+
/**
|
|
7
|
+
* A single oplog entry returned from the Rust engine.
|
|
8
|
+
*/
|
|
9
|
+
export interface IOpLogEntry {
|
|
10
|
+
seq: number;
|
|
11
|
+
timestampMs: number;
|
|
12
|
+
op: 'insert' | 'update' | 'delete';
|
|
13
|
+
db: string;
|
|
14
|
+
collection: string;
|
|
15
|
+
documentId: string;
|
|
16
|
+
document: Record<string, any> | null;
|
|
17
|
+
previousDocument: Record<string, any> | null;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Aggregate oplog statistics.
|
|
22
|
+
*/
|
|
23
|
+
export interface IOpLogStats {
|
|
24
|
+
currentSeq: number;
|
|
25
|
+
totalEntries: number;
|
|
26
|
+
oldestSeq: number;
|
|
27
|
+
entriesByOp: {
|
|
28
|
+
insert: number;
|
|
29
|
+
update: number;
|
|
30
|
+
delete: number;
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Result of a getOpLog query.
|
|
36
|
+
*/
|
|
37
|
+
export interface IOpLogResult {
|
|
38
|
+
entries: IOpLogEntry[];
|
|
39
|
+
currentSeq: number;
|
|
40
|
+
totalEntries: number;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Result of a revertToSeq command.
|
|
45
|
+
*/
|
|
46
|
+
export interface IRevertResult {
|
|
47
|
+
dryRun: boolean;
|
|
48
|
+
reverted: number;
|
|
49
|
+
targetSeq?: number;
|
|
50
|
+
entries?: IOpLogEntry[];
|
|
51
|
+
errors?: string[];
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* A collection info entry.
|
|
56
|
+
*/
|
|
57
|
+
export interface ICollectionInfo {
|
|
58
|
+
db: string;
|
|
59
|
+
name: string;
|
|
60
|
+
count: number;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Result of a getDocuments query.
|
|
65
|
+
*/
|
|
66
|
+
export interface IDocumentsResult {
|
|
67
|
+
documents: Record<string, any>[];
|
|
68
|
+
total: number;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Server metrics.
|
|
73
|
+
*/
|
|
74
|
+
export interface ISmartDbMetrics {
|
|
75
|
+
databases: number;
|
|
76
|
+
collections: number;
|
|
77
|
+
oplogEntries: number;
|
|
78
|
+
oplogCurrentSeq: number;
|
|
79
|
+
uptimeSeconds: number;
|
|
80
|
+
}
|
|
81
|
+
|
|
6
82
|
/**
|
|
7
83
|
* Type-safe command definitions for the RustDb IPC protocol.
|
|
8
84
|
*/
|
|
@@ -10,7 +86,24 @@ type TSmartDbCommands = {
|
|
|
10
86
|
start: { params: { config: ISmartDbRustConfig }; result: { connectionUri: string } };
|
|
11
87
|
stop: { params: Record<string, never>; result: void };
|
|
12
88
|
getStatus: { params: Record<string, never>; result: { running: boolean } };
|
|
13
|
-
getMetrics: { params: Record<string, never>; result:
|
|
89
|
+
getMetrics: { params: Record<string, never>; result: ISmartDbMetrics };
|
|
90
|
+
getOpLog: {
|
|
91
|
+
params: { sinceSeq?: number; limit?: number; db?: string; collection?: string };
|
|
92
|
+
result: IOpLogResult;
|
|
93
|
+
};
|
|
94
|
+
getOpLogStats: { params: Record<string, never>; result: IOpLogStats };
|
|
95
|
+
revertToSeq: {
|
|
96
|
+
params: { seq: number; dryRun?: boolean };
|
|
97
|
+
result: IRevertResult;
|
|
98
|
+
};
|
|
99
|
+
getCollections: {
|
|
100
|
+
params: { db?: string };
|
|
101
|
+
result: { collections: ICollectionInfo[] };
|
|
102
|
+
};
|
|
103
|
+
getDocuments: {
|
|
104
|
+
params: { db: string; collection: string; limit?: number; skip?: number };
|
|
105
|
+
result: IDocumentsResult;
|
|
106
|
+
};
|
|
14
107
|
};
|
|
15
108
|
|
|
16
109
|
/**
|
|
@@ -132,7 +225,38 @@ export class RustDbBridge extends EventEmitter {
|
|
|
132
225
|
return await this.bridge.sendCommand('getStatus', {} as Record<string, never>) as { running: boolean };
|
|
133
226
|
}
|
|
134
227
|
|
|
135
|
-
public async getMetrics(): Promise<
|
|
136
|
-
return this.bridge.sendCommand('getMetrics', {} as Record<string, never>)
|
|
228
|
+
public async getMetrics(): Promise<ISmartDbMetrics> {
|
|
229
|
+
return this.bridge.sendCommand('getMetrics', {} as Record<string, never>) as Promise<ISmartDbMetrics>;
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
public async getOpLog(params: {
|
|
233
|
+
sinceSeq?: number;
|
|
234
|
+
limit?: number;
|
|
235
|
+
db?: string;
|
|
236
|
+
collection?: string;
|
|
237
|
+
} = {}): Promise<IOpLogResult> {
|
|
238
|
+
return this.bridge.sendCommand('getOpLog', params) as Promise<IOpLogResult>;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
public async getOpLogStats(): Promise<IOpLogStats> {
|
|
242
|
+
return this.bridge.sendCommand('getOpLogStats', {} as Record<string, never>) as Promise<IOpLogStats>;
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
public async revertToSeq(seq: number, dryRun = false): Promise<IRevertResult> {
|
|
246
|
+
return this.bridge.sendCommand('revertToSeq', { seq, dryRun }) as Promise<IRevertResult>;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
public async getCollections(db?: string): Promise<ICollectionInfo[]> {
|
|
250
|
+
const result = await this.bridge.sendCommand('getCollections', db ? { db } : {}) as { collections: ICollectionInfo[] };
|
|
251
|
+
return result.collections;
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
public async getDocuments(
|
|
255
|
+
db: string,
|
|
256
|
+
collection: string,
|
|
257
|
+
limit = 50,
|
|
258
|
+
skip = 0,
|
|
259
|
+
): Promise<IDocumentsResult> {
|
|
260
|
+
return this.bridge.sendCommand('getDocuments', { db, collection, limit, skip }) as Promise<IDocumentsResult>;
|
|
137
261
|
}
|
|
138
262
|
}
|
|
@@ -1,4 +1,14 @@
|
|
|
1
1
|
import { RustDbBridge } from '../rust-db-bridge.js';
|
|
2
|
+
import { StorageMigrator } from '../../ts_migration/index.js';
|
|
3
|
+
import type {
|
|
4
|
+
IOpLogEntry,
|
|
5
|
+
IOpLogResult,
|
|
6
|
+
IOpLogStats,
|
|
7
|
+
IRevertResult,
|
|
8
|
+
ICollectionInfo,
|
|
9
|
+
IDocumentsResult,
|
|
10
|
+
ISmartDbMetrics,
|
|
11
|
+
} from '../rust-db-bridge.js';
|
|
2
12
|
|
|
3
13
|
/**
|
|
4
14
|
* Server configuration options
|
|
@@ -66,6 +76,12 @@ export class SmartdbServer {
|
|
|
66
76
|
throw new Error('Server is already running');
|
|
67
77
|
}
|
|
68
78
|
|
|
79
|
+
// Run storage migration for file-based storage before starting Rust engine
|
|
80
|
+
if (this.options.storage === 'file' && this.options.storagePath) {
|
|
81
|
+
const migrator = new StorageMigrator(this.options.storagePath);
|
|
82
|
+
await migrator.run();
|
|
83
|
+
}
|
|
84
|
+
|
|
69
85
|
const spawned = await this.bridge.spawn();
|
|
70
86
|
if (!spawned) {
|
|
71
87
|
throw new Error(
|
|
@@ -156,4 +172,59 @@ export class SmartdbServer {
|
|
|
156
172
|
get host(): string {
|
|
157
173
|
return this.options.host ?? '127.0.0.1';
|
|
158
174
|
}
|
|
175
|
+
|
|
176
|
+
// --- OpLog / Debug API ---
|
|
177
|
+
|
|
178
|
+
/**
|
|
179
|
+
* Get oplog entries, optionally filtered.
|
|
180
|
+
*/
|
|
181
|
+
async getOpLog(params: {
|
|
182
|
+
sinceSeq?: number;
|
|
183
|
+
limit?: number;
|
|
184
|
+
db?: string;
|
|
185
|
+
collection?: string;
|
|
186
|
+
} = {}): Promise<IOpLogResult> {
|
|
187
|
+
return this.bridge.getOpLog(params);
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Get aggregate oplog statistics.
|
|
192
|
+
*/
|
|
193
|
+
async getOpLogStats(): Promise<IOpLogStats> {
|
|
194
|
+
return this.bridge.getOpLogStats();
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* Revert database state to a specific oplog sequence number.
|
|
199
|
+
* Use dryRun=true to preview which entries would be reverted.
|
|
200
|
+
*/
|
|
201
|
+
async revertToSeq(seq: number, dryRun = false): Promise<IRevertResult> {
|
|
202
|
+
return this.bridge.revertToSeq(seq, dryRun);
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* List all collections across all databases, with document counts.
|
|
207
|
+
*/
|
|
208
|
+
async getCollections(db?: string): Promise<ICollectionInfo[]> {
|
|
209
|
+
return this.bridge.getCollections(db);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* Get documents from a collection with pagination.
|
|
214
|
+
*/
|
|
215
|
+
async getDocuments(
|
|
216
|
+
db: string,
|
|
217
|
+
collection: string,
|
|
218
|
+
limit = 50,
|
|
219
|
+
skip = 0,
|
|
220
|
+
): Promise<IDocumentsResult> {
|
|
221
|
+
return this.bridge.getDocuments(db, collection, limit, skip);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* Get server metrics including database/collection counts and oplog info.
|
|
226
|
+
*/
|
|
227
|
+
async getMetrics(): Promise<ISmartDbMetrics> {
|
|
228
|
+
return this.bridge.getMetrics();
|
|
229
|
+
}
|
|
159
230
|
}
|