dataply 0.0.16-alpha.6 → 0.0.16-alpha.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cjs/index.js CHANGED
@@ -5614,7 +5614,6 @@ var VirtualFileSystem = class {
5614
5614
  this.fileSize = import_node_fs2.default.fstatSync(fileHandle).size;
5615
5615
  if (walPath) {
5616
5616
  this.logManager = new LogManager(walPath, pageSize);
5617
- this.recover();
5618
5617
  }
5619
5618
  }
5620
5619
  /** Cache list (Page ID -> Data Buffer) */
@@ -5633,10 +5632,9 @@ var VirtualFileSystem = class {
5633
5632
  activeTransactions = /* @__PURE__ */ new Map();
5634
5633
  /**
5635
5634
  * Performs recovery (Redo) using WAL logs.
5636
- * Called in constructor, so it's a synchronous process and data is only reflected in cache.
5637
- * Actual disk sync and log clearing are performed during future transactions or closure.
5635
+ * Called during initialization (DataplyAPI.init), ensuring data is fully restored before operations start.
5638
5636
  */
5639
- recover() {
5637
+ async recover() {
5640
5638
  if (!this.logManager) return;
5641
5639
  this.logManager.open();
5642
5640
  const restoredPages = this.logManager.readAllSync();
@@ -5669,11 +5667,10 @@ var VirtualFileSystem = class {
5669
5667
  this.fileSize = endPos;
5670
5668
  }
5671
5669
  }
5672
- Promise.all(promises).then(() => {
5673
- if (this.logManager && restoredPages.size > 0) {
5674
- this.logManager.clear().catch(console.error);
5675
- }
5676
- });
5670
+ await Promise.all(promises);
5671
+ if (this.logManager && restoredPages.size > 0) {
5672
+ await this.logManager.clear();
5673
+ }
5677
5674
  }
5678
5675
  /**
5679
5676
  * Prepares the transaction for commit (Phase 1).
@@ -5940,6 +5937,13 @@ var PageFileSystem = class {
5940
5937
  pageFactory = new PageManagerFactory();
5941
5938
  vfs;
5942
5939
  pageManagerFactory;
5940
+ /**
5941
+ * Initializes the page file system.
5942
+ * Performs VFS recovery if necessary.
5943
+ */
5944
+ async init() {
5945
+ await this.vfs.recover();
5946
+ }
5943
5947
  /**
5944
5948
  * Updates the bitmap status for a specific page.
5945
5949
  * @param pageId The ID of the page to update
@@ -7266,6 +7270,7 @@ var DataplyAPI = class {
7266
7270
  }
7267
7271
  await this.runWithDefault(async (tx) => {
7268
7272
  await this.hook.trigger("init", tx, async (tx2) => {
7273
+ await this.pfs.init();
7269
7274
  await this.rowTableEngine.init();
7270
7275
  this.initialized = true;
7271
7276
  return tx2;
@@ -21,6 +21,11 @@ export declare class PageFileSystem {
21
21
  * @param walPath WAL 파일 경로 (기본값: null)
22
22
  */
23
23
  constructor(fileHandle: number, pageSize: number, pageCacheCapacity: number, walPath?: string | undefined | null);
24
+ /**
25
+ * Initializes the page file system.
26
+ * Performs VFS recovery if necessary.
27
+ */
28
+ init(): Promise<void>;
24
29
  /**
25
30
  * Updates the bitmap status for a specific page.
26
31
  * @param pageId The ID of the page to update
@@ -22,10 +22,9 @@ export declare class VirtualFileSystem {
22
22
  constructor(fileHandle: number, pageSize: number, pageCacheCapacity: number, walPath?: string | undefined | null);
23
23
  /**
24
24
  * Performs recovery (Redo) using WAL logs.
25
- * Called in constructor, so it's a synchronous process and data is only reflected in cache.
26
- * Actual disk sync and log clearing are performed during future transactions or closure.
25
+ * Called during initialization (DataplyAPI.init), ensuring data is fully restored before operations start.
27
26
  */
28
- private recover;
27
+ recover(): Promise<void>;
29
28
  /**
30
29
  * Prepares the transaction for commit (Phase 1).
31
30
  * Writes dirty pages to WAL but does not update the main database file.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "dataply",
3
- "version": "0.0.16-alpha.6",
3
+ "version": "0.0.16-alpha.7",
4
4
  "description": "A lightweight storage engine for Node.js with support for MVCC, WAL.",
5
5
  "license": "MIT",
6
6
  "author": "izure <admin@izure.org>",
package/readme.md CHANGED
@@ -9,14 +9,15 @@
9
9
 
10
10
  ## Key Features
11
11
 
12
- - **🚀 Identity-Based Access**: Specialized in storing records and managing them via auto-generated Primary Keys.
13
- - **⚡ High-Performance B+Tree**: Optimizes data lookup and insertion through an asynchronous B+Tree structure.
14
- - **🛡️ MVCC Support**: Enables non-blocking read operations and guarantees data isolation between transactions.
15
- - **📝 WAL (Write-Ahead Logging)**: Ensures data integrity and provides recovery capabilities in case of system failures.
16
- - **💼 Transaction Mechanism**: Supports Commit and Rollback for atomic operations.
17
- - **📦 Page-Based Storage**: Efficient page caching and disk I/O optimization through Virtual File System (VFS).
18
- - **📉 Bitmap Space Optimization**: Uses bitmapped management to efficiently track page usage and maximize disk space utilization.
19
- - **⌨️ TypeScript Support**: Provides comprehensive type definitions for all APIs.
12
+ Dataply provides essential features for high-performance data management:
13
+
14
+ - **Identity-Based Access**: Manage records through auto-generated Primary Keys for ultra-fast retrieval.
15
+ - **High-Performance B+Tree**: Asynchronous B+Tree structure optimizes both lookups and insertions.
16
+ - **MVCC & Isolation**: Snapshot isolation via Multi-Version Concurrency Control (MVCC) enables non-blocking reads.
17
+ - **Reliability (WAL)**: Write-Ahead Logging (WAL) ensures data integrity and automatic crash recovery.
18
+ - **Atomic Transactions**: Full support for ACID-compliant Commit and Rollback operations.
19
+ - **Efficient Storage**: Fixed-size page management with VFS-based caching and Bitmap space optimization.
20
+ - **Type Safety**: Comprehensive TypeScript definitions for a seamless developer experience.
20
21
 
21
22
  ## Installation
22
23
 
@@ -148,10 +149,10 @@ try {
148
149
  ```
149
150
 
150
151
  ### Auto-Transaction
151
- If you omit the `tx` argument when calling methods like `insert`, `update`, or `delete`, Dataply internally **creates an individual transaction automatically**.
152
+ If you omit the `tx` argument, Dataply creates an internal transaction for each operation.
152
153
 
153
- - **Guaranteed Atomicity**: Even single operations are processed within an internal transaction, ensuring they are only finalized on success and rolled back on failure.
154
- - **Performance Note**: For batch processing or multiple related operations, wrapping them in a single explicit transaction is significantly faster than relying on auto-transactions due to reduced I/O overhead.
154
+ - **Security**: Atomicity is guaranteed even for single operations.
155
+ - **Optimization Tip**: For bulk operations, use an **explicit transaction** to significantly reduce I/O overhead and increase performance.
155
156
 
156
157
  ## API Reference
157
158
 
@@ -201,13 +202,13 @@ Cancels all changes made during the transaction and restores the original state.
201
202
  ### GlobalTransaction Class
202
203
 
203
204
  #### `add(tx: Transaction): void`
204
- Adds an individual transaction from a `Dataply` instance to the global transaction.
205
+ Registers a transaction from a Dataply instance to the global unit.
205
206
 
206
207
  #### `async commit(): Promise<void>`
207
- Atomically commits all added transactions using a 2-Phase Commit (2PC) process.
208
+ Executes an atomic commit across all registered transactions via a 2-Phase Commit (2PC) protocol.
208
209
 
209
210
  #### `async rollback(): Promise<void>`
210
- Rolls back all added transactions.
211
+ Rolls back all registered transactions simultaneously.
211
212
 
212
213
  ## Extending Dataply
213
214
 
@@ -331,12 +332,14 @@ As **Dataply** is currently in Alpha, there are several limitations to keep in m
331
332
 
332
333
  ### Q: Why should I use Dataply instead of a simple JSON file?
333
334
 
334
- The core differences between the commonly used JSON file approach and Dataply are as follows:
335
+ While JSON is simple, Dataply is designed for scalable and reliable data management:
335
336
 
336
- 1. **Memory Efficiency**: While JSON requires loading the entire file into memory, Dataply uses a **page-based storage mechanism**, allowing it to handle large-scale data reliably with a constant memory footprint.
337
- 2. **Superior Search Performance**: Unlike JSON, which requires a full scan (O(N)), Dataply ensures extremely fast lookups (O(log N)) regardless of data size using a **B+Tree index**.
338
- 3. **Data Integrity**: In contrast to JSON files that risk corruption during system failures, Dataply protects your data using **WAL (Write-Ahead Logging)** and **Transactions**.
339
- 4. **Concurrency Control**: Using **MVCC (Multi-Version Concurrency Control)** and **page-level locking**, Dataply delivers peak performance even in environments where multiple users are reading and writing simultaneously.
337
+ | Feature | JSON File Approach | Dataply Record Store |
338
+ | :--- | :--- | :--- |
339
+ | **Memory usage** | Loads entire file into RAM | Constant memory via page-based I/O |
340
+ | **Search speed** | Linear scan (O(N)) | B+Tree Index lookups (O(log N)) |
341
+ | **Integrity** | High risk of corruption on crash | Protected by WAL and Transactions |
342
+ | **Concurrency** | Single-user only | Multi-user via MVCC & Locking |
340
343
 
341
344
  ### Q: What can I build with Dataply?
342
345