@rippledb/client-controllers 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +51 -0
- package/dist/index.d.ts +113 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +135 -0
- package/dist/index.js.map +1 -0
- package/dist/index.test.d.ts +2 -0
- package/dist/index.test.d.ts.map +1 -0
- package/package.json +58 -0
package/README.md
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# @rippledb/client-controllers
|
|
2
|
+
|
|
3
|
+
Abstract CRUD controllers with batch loading for RippleDB.
|
|
4
|
+
|
|
5
|
+
📚 **Documentation:** [rippledb.dev/docs/adapters/controllers](https://rippledb.dev/docs/adapters/controllers)
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install @rippledb/client-controllers
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Usage
|
|
14
|
+
|
|
15
|
+
```typescript
|
|
16
|
+
import { createEntityController } from '@rippledb/client-controllers';
|
|
17
|
+
import { MemoryStore } from '@rippledb/store-memory';
|
|
18
|
+
|
|
19
|
+
const store = new MemoryStore<MySchema>();
|
|
20
|
+
|
|
21
|
+
// Create a controller for an entity type
|
|
22
|
+
const todoController = createEntityController({
|
|
23
|
+
store,
|
|
24
|
+
entity: 'todos',
|
|
25
|
+
stream: 'user-123',
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
// CRUD operations with automatic batch loading
|
|
29
|
+
const todo = await todoController.create({ title: 'Buy milk' });
|
|
30
|
+
const fetched = await todoController.read(todo.id);
|
|
31
|
+
const updated = await todoController.update(todo.id, { done: true });
|
|
32
|
+
await todoController.delete(todo.id);
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Features
|
|
36
|
+
|
|
37
|
+
- **Batch loading**: Multiple `read()` calls in the same tick are automatically batched
|
|
38
|
+
- **CRUD operations**: Simple, type-safe create/read/update/delete API
|
|
39
|
+
- **Framework agnostic**: Works with any UI framework or backend
|
|
40
|
+
- **Store abstraction**: Works with any `Store` implementation
|
|
41
|
+
|
|
42
|
+
## Relationship to bind-tanstack-query
|
|
43
|
+
|
|
44
|
+
- **Controllers**: Handle reads/writes and batch loading (data access layer)
|
|
45
|
+
- **bind-tanstack-query**: Handles cache invalidation (UI reactivity layer)
|
|
46
|
+
|
|
47
|
+
Use controllers for CRUD operations, and `bind-tanstack-query` for automatic cache invalidation.
|
|
48
|
+
|
|
49
|
+
## License
|
|
50
|
+
|
|
51
|
+
MIT
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import type { Store } from '@rippledb/client';
|
|
2
|
+
import type { RippleSchema, HlcState, EntityName } from '@rippledb/core';
|
|
3
|
+
/**
|
|
4
|
+
* Batch loader interface for efficient bulk reads.
|
|
5
|
+
* Collects keys during a tick and executes them as a single bulk query.
|
|
6
|
+
*/
|
|
7
|
+
export interface BatchLoader<K, V> {
|
|
8
|
+
/**
|
|
9
|
+
* Load a single key. Keys are collected per tick and batched.
|
|
10
|
+
*/
|
|
11
|
+
load(key: K): Promise<V | null>;
|
|
12
|
+
/**
|
|
13
|
+
* Load multiple keys. Always executes immediately (no batching).
|
|
14
|
+
*/
|
|
15
|
+
loadMany(keys: K[]): Promise<Map<K, V>>;
|
|
16
|
+
}
|
|
17
|
+
export type BatchLoaderFlushStrategy = 'auto' | 'microtask' | 'raf';
|
|
18
|
+
export type CreateBatchLoaderOptions = {
|
|
19
|
+
/**
|
|
20
|
+
* How to schedule the batch flush.
|
|
21
|
+
*
|
|
22
|
+
* - `microtask`: flush at end of current JS turn (lowest latency, smaller batch window)
|
|
23
|
+
* - `raf`: flush on next animation frame (bigger batch window, adds up to ~1 frame latency)
|
|
24
|
+
* - `auto`: `raf` when available, otherwise `microtask`
|
|
25
|
+
*
|
|
26
|
+
* @default 'auto'
|
|
27
|
+
*/
|
|
28
|
+
flush?: BatchLoaderFlushStrategy;
|
|
29
|
+
};
|
|
30
|
+
/**
|
|
31
|
+
* Creates a batch loader for a specific entity in a Store.
|
|
32
|
+
*
|
|
33
|
+
* The batch loader collects `load()` calls during a single tick/RAF,
|
|
34
|
+
* deduplicates keys, and executes them as a bulk query using `store.getRows`.
|
|
35
|
+
*
|
|
36
|
+
* @param store - The Store instance
|
|
37
|
+
* @param entity - The entity name to load from
|
|
38
|
+
* @returns A BatchLoader instance
|
|
39
|
+
*/
|
|
40
|
+
export declare function createBatchLoader<S extends RippleSchema = RippleSchema, E extends EntityName<S> = EntityName<S>>(store: Store<S>, entity: E, options?: CreateBatchLoaderOptions): BatchLoader<string, S[E]>;
|
|
41
|
+
/**
|
|
42
|
+
* Entity controller interface for CRUD operations with batch loading.
|
|
43
|
+
*/
|
|
44
|
+
export interface EntityController<S extends RippleSchema = RippleSchema, E extends EntityName<S> = EntityName<S>, ListQuery = unknown> {
|
|
45
|
+
/**
|
|
46
|
+
* Create a new entity. If patch contains an `id` field, it will be used.
|
|
47
|
+
* Otherwise, a UUID will be generated.
|
|
48
|
+
*/
|
|
49
|
+
create(patch: Partial<S[E]>): Promise<S[E]>;
|
|
50
|
+
/**
|
|
51
|
+
* Read a single entity by ID. Uses batch loading for efficiency.
|
|
52
|
+
*/
|
|
53
|
+
read(id: string): Promise<S[E] | null>;
|
|
54
|
+
/**
|
|
55
|
+
* Read multiple entities by IDs. Always executes immediately (no batching).
|
|
56
|
+
*/
|
|
57
|
+
readMany(ids: string[]): Promise<Map<string, S[E]>>;
|
|
58
|
+
/**
|
|
59
|
+
* Update an entity by ID with a partial patch.
|
|
60
|
+
*/
|
|
61
|
+
update(id: string, patch: Partial<S[E]>): Promise<S[E]>;
|
|
62
|
+
/**
|
|
63
|
+
* Delete an entity by ID.
|
|
64
|
+
*/
|
|
65
|
+
delete(id: string): Promise<void>;
|
|
66
|
+
/**
|
|
67
|
+
* List entities using a store-specific query.
|
|
68
|
+
*/
|
|
69
|
+
list(query: ListQuery): Promise<S[E][]>;
|
|
70
|
+
}
|
|
71
|
+
export type CreateEntityControllerOptions<S extends RippleSchema = RippleSchema, E extends EntityName<S> = EntityName<S>, ListQuery = unknown> = {
|
|
72
|
+
/**
|
|
73
|
+
* The Store instance to operate on.
|
|
74
|
+
*/
|
|
75
|
+
store: Store<S, ListQuery>;
|
|
76
|
+
/**
|
|
77
|
+
* The entity name this controller manages.
|
|
78
|
+
*/
|
|
79
|
+
entity: E;
|
|
80
|
+
/**
|
|
81
|
+
* The stream ID for all changes created by this controller.
|
|
82
|
+
*/
|
|
83
|
+
stream: string;
|
|
84
|
+
/**
|
|
85
|
+
* Optional HLC state. If not provided, a new one will be created.
|
|
86
|
+
*/
|
|
87
|
+
hlcState?: HlcState;
|
|
88
|
+
/**
|
|
89
|
+
* Optional function to generate entity IDs. Defaults to crypto.randomUUID().
|
|
90
|
+
*/
|
|
91
|
+
generateId?: () => string;
|
|
92
|
+
/**
|
|
93
|
+
* Optional batch loader scheduling configuration.
|
|
94
|
+
*/
|
|
95
|
+
batch?: CreateBatchLoaderOptions;
|
|
96
|
+
};
|
|
97
|
+
/**
|
|
98
|
+
* Creates an entity controller for CRUD operations with automatic batch loading.
|
|
99
|
+
*
|
|
100
|
+
* @example
|
|
101
|
+
* ```ts
|
|
102
|
+
* const todoController = createEntityController({
|
|
103
|
+
* store,
|
|
104
|
+
* entity: 'todos',
|
|
105
|
+
* stream: 'user-123',
|
|
106
|
+
* });
|
|
107
|
+
*
|
|
108
|
+
* const todo = await todoController.create({ title: 'Buy milk' });
|
|
109
|
+
* const fetched = await todoController.read(todo.id);
|
|
110
|
+
* ```
|
|
111
|
+
*/
|
|
112
|
+
export declare function createEntityController<S extends RippleSchema = RippleSchema, E extends EntityName<S> = EntityName<S>, ListQuery = unknown>(options: CreateEntityControllerOptions<S, E, ListQuery>): EntityController<S, E, ListQuery>;
|
|
113
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,kBAAkB,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,QAAQ,EAAE,UAAU,EAAE,MAAM,gBAAgB,CAAC;AAGzE;;;GAGG;AACH,MAAM,WAAW,WAAW,CAAC,CAAC,EAAE,CAAC;IAC/B;;OAEG;IACH,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,OAAO,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;IAEhC;;OAEG;IACH,QAAQ,CAAC,IAAI,EAAE,CAAC,EAAE,GAAG,OAAO,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;CACzC;AAED,MAAM,MAAM,wBAAwB,GAAG,MAAM,GAAG,WAAW,GAAG,KAAK,CAAC;AAEpE,MAAM,MAAM,wBAAwB,GAAG;IACrC;;;;;;;;OAQG;IACH,KAAK,CAAC,EAAE,wBAAwB,CAAC;CAClC,CAAC;AAOF;;;;;;;;;GASG;AACH,wBAAgB,iBAAiB,CAC/B,CAAC,SAAS,YAAY,GAAG,YAAY,EACrC,CAAC,SAAS,UAAU,CAAC,CAAC,CAAC,GAAG,UAAU,CAAC,CAAC,CAAC,EACvC,KAAK,EAAE,KAAK,CAAC,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,OAAO,GAAE,wBAA6B,GAAG,WAAW,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAwF/F;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB,CAC/B,CAAC,SAAS,YAAY,GAAG,YAAY,EACrC,CAAC,SAAS,UAAU,CAAC,CAAC,CAAC,GAAG,UAAU,CAAC,CAAC,CAAC,EACvC,SAAS,GAAG,OAAO;IAEnB;;;OAGG;IACH,MAAM,CAAC,KAAK,EAAE,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;IAE5C;;OAEG;IACH,IAAI,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;IAEvC;;OAEG;IACH,QAAQ,CAAC,GAAG,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;IAEpD;;OAEG;IACH,MAAM,CAAC,EAAE,EAAE,MAAM,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;IAExD;;OAEG;IACH,MAAM,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAElC;;OAEG;IACH,IAAI,CAAC,KAAK,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;CACzC;AAED,MAAM,MAAM,6BAA6B,CACvC,CAAC,SAAS,YAAY,GAAG,YAAY,EACrC,CAAC,SAAS,UAAU,CAAC,CAAC,CAAC,GAAG,UAAU,CAAC,CAAC,CAAC,EACvC,SAAS,GAAG,OAAO,IACjB;IACF;;OAEG;IACH,KAAK,EAAE,KAAK,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC;IAE3B;;OAEG;IACH,MAAM,EAAE,CAAC,CAAC;IAEV;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IAEf;;OAEG;IACH,QAAQ,CAAC,EAAE,QAAQ,CAAC;IAEpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,MAAM,CAAC;IAE1B;;OAEG;IACH,KAAK,CAAC,EAAE,wBAAwB,CAAC;CAClC,CAAC;AAEF;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,sBAAsB,CACpC,CAAC,SAAS,YAAY,GAAG,YAAY,EACrC,CAAC,SAAS,UAAU,CAAC,CAAC,CAAC,GAAG,UAAU,CAAC,CAAC,CAAC,EACvC,SAAS,GAAG,OAAO,EACnB,OAAO,EAAE,6BAA6B,CAAC,CAAC,EAAE,CAAC,EAAE,SAAS,CAAC,GAAG,gBAAgB,CAAC,CAAC,EAAE,CAAC,EAAE,SAAS,CAAC,CAmF5F"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
// src/index.ts
|
|
2
|
+
import { makeUpsert, makeDelete, tickHlc, createHlcState } from "@rippledb/core";
|
|
3
|
+
function createBatchLoader(store, entity, options = {}) {
|
|
4
|
+
const pending = /* @__PURE__ */ new Map();
|
|
5
|
+
let scheduled = false;
|
|
6
|
+
const flush = async () => {
|
|
7
|
+
scheduled = false;
|
|
8
|
+
if (pending.size === 0) return;
|
|
9
|
+
const keys = Array.from(pending.keys());
|
|
10
|
+
const requests = /* @__PURE__ */ new Map();
|
|
11
|
+
for (const [key, reqs] of pending.entries()) {
|
|
12
|
+
requests.set(key, reqs);
|
|
13
|
+
}
|
|
14
|
+
pending.clear();
|
|
15
|
+
try {
|
|
16
|
+
const uniqueKeys = Array.from(new Set(keys));
|
|
17
|
+
const results = await store.getRows(entity, uniqueKeys);
|
|
18
|
+
for (const [key, reqs] of requests.entries()) {
|
|
19
|
+
const value = results.get(key) ?? null;
|
|
20
|
+
for (const req of reqs) {
|
|
21
|
+
req.resolve(value);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
} catch (error) {
|
|
25
|
+
for (const reqs of requests.values()) {
|
|
26
|
+
for (const req of reqs) {
|
|
27
|
+
req.reject(error);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
};
|
|
32
|
+
const flushStrategy = options.flush ?? "auto";
|
|
33
|
+
const scheduleFlush = () => {
|
|
34
|
+
if (scheduled) return;
|
|
35
|
+
scheduled = true;
|
|
36
|
+
const raf = globalThis.requestAnimationFrame;
|
|
37
|
+
if (typeof raf === "function" && (flushStrategy === "raf" || flushStrategy === "auto")) {
|
|
38
|
+
raf(() => {
|
|
39
|
+
void flush();
|
|
40
|
+
});
|
|
41
|
+
return;
|
|
42
|
+
}
|
|
43
|
+
queueMicrotask(() => {
|
|
44
|
+
void flush();
|
|
45
|
+
});
|
|
46
|
+
};
|
|
47
|
+
return {
|
|
48
|
+
load(key) {
|
|
49
|
+
return new Promise((resolve, reject) => {
|
|
50
|
+
const reqs = pending.get(key) ?? [];
|
|
51
|
+
reqs.push({ resolve, reject });
|
|
52
|
+
pending.set(key, reqs);
|
|
53
|
+
scheduleFlush();
|
|
54
|
+
});
|
|
55
|
+
},
|
|
56
|
+
async loadMany(keys) {
|
|
57
|
+
const uniqueKeys = Array.from(new Set(keys));
|
|
58
|
+
return await store.getRows(entity, uniqueKeys);
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
function createEntityController(options) {
|
|
63
|
+
const {
|
|
64
|
+
store,
|
|
65
|
+
entity,
|
|
66
|
+
stream,
|
|
67
|
+
hlcState = createHlcState("controller"),
|
|
68
|
+
generateId = () => crypto.randomUUID(),
|
|
69
|
+
batch
|
|
70
|
+
} = options;
|
|
71
|
+
const batchLoader = createBatchLoader(store, entity, batch);
|
|
72
|
+
const getHlc = () => tickHlc(hlcState, Date.now());
|
|
73
|
+
return {
|
|
74
|
+
async create(patch) {
|
|
75
|
+
const id = patch.id ?? generateId();
|
|
76
|
+
const hlc = getHlc();
|
|
77
|
+
await store.applyChanges([
|
|
78
|
+
makeUpsert({
|
|
79
|
+
stream,
|
|
80
|
+
entity,
|
|
81
|
+
entityId: id,
|
|
82
|
+
patch: { ...patch, id },
|
|
83
|
+
hlc
|
|
84
|
+
})
|
|
85
|
+
]);
|
|
86
|
+
const created = await batchLoader.load(id);
|
|
87
|
+
if (!created) {
|
|
88
|
+
throw new Error(`Failed to read created entity ${entity}:${id}`);
|
|
89
|
+
}
|
|
90
|
+
return created;
|
|
91
|
+
},
|
|
92
|
+
read(id) {
|
|
93
|
+
return batchLoader.load(id);
|
|
94
|
+
},
|
|
95
|
+
readMany(ids) {
|
|
96
|
+
return batchLoader.loadMany(ids);
|
|
97
|
+
},
|
|
98
|
+
async update(id, patch) {
|
|
99
|
+
const hlc = getHlc();
|
|
100
|
+
await store.applyChanges([
|
|
101
|
+
makeUpsert({
|
|
102
|
+
stream,
|
|
103
|
+
entity,
|
|
104
|
+
entityId: id,
|
|
105
|
+
patch,
|
|
106
|
+
hlc
|
|
107
|
+
})
|
|
108
|
+
]);
|
|
109
|
+
const updated = await batchLoader.load(id);
|
|
110
|
+
if (!updated) {
|
|
111
|
+
throw new Error(`Failed to read updated entity ${entity}:${id}`);
|
|
112
|
+
}
|
|
113
|
+
return updated;
|
|
114
|
+
},
|
|
115
|
+
async delete(id) {
|
|
116
|
+
const hlc = getHlc();
|
|
117
|
+
await store.applyChanges([
|
|
118
|
+
makeDelete({
|
|
119
|
+
stream,
|
|
120
|
+
entity,
|
|
121
|
+
entityId: id,
|
|
122
|
+
hlc
|
|
123
|
+
})
|
|
124
|
+
]);
|
|
125
|
+
},
|
|
126
|
+
async list(query) {
|
|
127
|
+
return await store.listRows(query);
|
|
128
|
+
}
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
export {
|
|
132
|
+
createBatchLoader,
|
|
133
|
+
createEntityController
|
|
134
|
+
};
|
|
135
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"sourcesContent":["import type { Store } from '@rippledb/client';\nimport type { RippleSchema, HlcState, EntityName } from '@rippledb/core';\nimport { makeUpsert, makeDelete, tickHlc, createHlcState } from '@rippledb/core';\n\n/**\n * Batch loader interface for efficient bulk reads.\n * Collects keys during a tick and executes them as a single bulk query.\n */\nexport interface BatchLoader<K, V> {\n /**\n * Load a single key. Keys are collected per tick and batched.\n */\n load(key: K): Promise<V | null>;\n\n /**\n * Load multiple keys. Always executes immediately (no batching).\n */\n loadMany(keys: K[]): Promise<Map<K, V>>;\n}\n\nexport type BatchLoaderFlushStrategy = 'auto' | 'microtask' | 'raf';\n\nexport type CreateBatchLoaderOptions = {\n /**\n * How to schedule the batch flush.\n *\n * - `microtask`: flush at end of current JS turn (lowest latency, smaller batch window)\n * - `raf`: flush on next animation frame (bigger batch window, adds up to ~1 frame latency)\n * - `auto`: `raf` when available, otherwise `microtask`\n *\n * @default 'auto'\n */\n flush?: BatchLoaderFlushStrategy;\n};\n\ntype PendingRequest<V> = {\n resolve: (value: V | null) => void;\n reject: (error: unknown) => void;\n};\n\n/**\n * Creates a batch loader for a specific entity in a Store.\n *\n * The batch loader collects `load()` calls during a single tick/RAF,\n * deduplicates keys, and executes them as a bulk query using `store.getRows`.\n *\n * @param store - The Store instance\n * @param entity - The entity name to load from\n * @returns A BatchLoader instance\n */\nexport function createBatchLoader<\n S extends RippleSchema = RippleSchema,\n E extends EntityName<S> = EntityName<S>,\n>(store: Store<S>, entity: E, options: CreateBatchLoaderOptions = {}): BatchLoader<string, S[E]> {\n const pending = new Map<string, PendingRequest<S[E]>[]>();\n let scheduled = false;\n\n const flush = async () => {\n scheduled = false;\n\n if (pending.size === 0) return;\n\n const keys = Array.from(pending.keys());\n const requests = new Map<string, PendingRequest<S[E]>[]>();\n\n // Move pending requests to local map and clear\n for (const [key, reqs] of pending.entries()) {\n requests.set(key, reqs);\n }\n pending.clear();\n\n try {\n const uniqueKeys = Array.from(new Set(keys));\n const results = await store.getRows(entity, uniqueKeys);\n\n // Resolve all pending requests\n for (const [key, reqs] of requests.entries()) {\n const value = results.get(key) ?? null;\n for (const req of reqs) {\n req.resolve(value);\n }\n }\n } catch (error) {\n // Reject all pending requests on error\n for (const reqs of requests.values()) {\n for (const req of reqs) {\n req.reject(error);\n }\n }\n }\n };\n\n const flushStrategy: BatchLoaderFlushStrategy = options.flush ?? 'auto';\n\n const scheduleFlush = () => {\n if (scheduled) return;\n scheduled = true;\n\n const raf = (globalThis as unknown as { requestAnimationFrame?: (cb: () => void) => number })\n .requestAnimationFrame;\n\n if (typeof raf === 'function' && (flushStrategy === 'raf' || flushStrategy === 'auto')) {\n raf(() => {\n void flush();\n });\n return;\n }\n\n // Default: batch within the same tick\n queueMicrotask(() => {\n void flush();\n });\n };\n\n return {\n load(key: string): Promise<S[E] | null> {\n return new Promise((resolve, reject) => {\n const reqs = pending.get(key) ?? [];\n reqs.push({ resolve, reject });\n pending.set(key, reqs);\n scheduleFlush();\n });\n },\n\n async loadMany(keys: string[]): Promise<Map<string, S[E]>> {\n // Intentionally NOT batched:\n // - `load()` is for scattered reads and benefits from a tick/RAF batching window.\n // - `loadMany()` is the \"I already have the full ID set\" escape hatch and should\n // issue exactly one bulk `getRows()` immediately (no extra scheduling latency).\n //\n // Note: If callers mix `load()` + `loadMany()` within the same frame, this may\n // cause two bulk reads (one for the flush + one for loadMany). We can add an\n // explicit opt-in like `loadMany(ids, { coalesce: true })` later if we\n // see real call-sites where this matters.\n\t // I see two options:\n\t // 1. loadMany separates each key into a separate function call to move them into the schedule.\n\t // 2. loadMany lets currently scheduled keys piggy back on its load call.\n const uniqueKeys = Array.from(new Set(keys));\n return await store.getRows(entity, uniqueKeys);\n },\n };\n}\n\n/**\n * Entity controller interface for CRUD operations with batch loading.\n */\nexport interface EntityController<\n S extends RippleSchema = RippleSchema,\n E extends EntityName<S> = EntityName<S>,\n ListQuery = unknown,\n> {\n /**\n * Create a new entity. If patch contains an `id` field, it will be used.\n * Otherwise, a UUID will be generated.\n */\n create(patch: Partial<S[E]>): Promise<S[E]>;\n\n /**\n * Read a single entity by ID. Uses batch loading for efficiency.\n */\n read(id: string): Promise<S[E] | null>;\n\n /**\n * Read multiple entities by IDs. Always executes immediately (no batching).\n */\n readMany(ids: string[]): Promise<Map<string, S[E]>>;\n\n /**\n * Update an entity by ID with a partial patch.\n */\n update(id: string, patch: Partial<S[E]>): Promise<S[E]>;\n\n /**\n * Delete an entity by ID.\n */\n delete(id: string): Promise<void>;\n\n /**\n * List entities using a store-specific query.\n */\n list(query: ListQuery): Promise<S[E][]>;\n}\n\nexport type CreateEntityControllerOptions<\n S extends RippleSchema = RippleSchema,\n E extends EntityName<S> = EntityName<S>,\n ListQuery = unknown,\n> = {\n /**\n * The Store instance to operate on.\n */\n store: Store<S, ListQuery>;\n\n /**\n * The entity name this controller manages.\n */\n entity: E;\n\n /**\n * The stream ID for all changes created by this controller.\n */\n stream: string;\n\n /**\n * Optional HLC state. If not provided, a new one will be created.\n */\n hlcState?: HlcState;\n\n /**\n * Optional function to generate entity IDs. Defaults to crypto.randomUUID().\n */\n generateId?: () => string;\n\n /**\n * Optional batch loader scheduling configuration.\n */\n batch?: CreateBatchLoaderOptions;\n};\n\n/**\n * Creates an entity controller for CRUD operations with automatic batch loading.\n *\n * @example\n * ```ts\n * const todoController = createEntityController({\n * store,\n * entity: 'todos',\n * stream: 'user-123',\n * });\n *\n * const todo = await todoController.create({ title: 'Buy milk' });\n * const fetched = await todoController.read(todo.id);\n * ```\n */\nexport function createEntityController<\n S extends RippleSchema = RippleSchema,\n E extends EntityName<S> = EntityName<S>,\n ListQuery = unknown,\n>(options: CreateEntityControllerOptions<S, E, ListQuery>): EntityController<S, E, ListQuery> {\n const {\n store,\n entity,\n stream,\n hlcState = createHlcState('controller'),\n generateId = () => crypto.randomUUID(),\n batch,\n } = options;\n const batchLoader = createBatchLoader(store, entity, batch);\n\n const getHlc = () => tickHlc(hlcState, Date.now());\n\n return {\n async create(patch: Partial<S[E]>): Promise<S[E]> {\n // Extract ID from patch if present, otherwise generate one\n const id = (patch as Record<string, unknown>).id as string | undefined ?? generateId();\n const hlc = getHlc();\n\n await store.applyChanges([\n makeUpsert({\n stream,\n entity,\n entityId: id,\n patch: { ...patch, id } as Partial<S[E]>,\n hlc,\n }),\n ]);\n\n // Read back the created entity\n const created = await batchLoader.load(id);\n if (!created) {\n throw new Error(`Failed to read created entity ${entity}:${id}`);\n }\n return created;\n },\n\n read(id: string): Promise<S[E] | null> {\n return batchLoader.load(id);\n },\n\n readMany(ids: string[]): Promise<Map<string, S[E]>> {\n return batchLoader.loadMany(ids);\n },\n\n async update(id: string, patch: Partial<S[E]>): Promise<S[E]> {\n const hlc = getHlc();\n\n await store.applyChanges([\n makeUpsert({\n stream,\n entity,\n entityId: id,\n patch,\n hlc,\n }),\n ]);\n\n // Read back the updated entity\n const updated = await batchLoader.load(id);\n if (!updated) {\n throw new Error(`Failed to read updated entity ${entity}:${id}`);\n }\n return updated;\n },\n\n async delete(id: string): Promise<void> {\n const hlc = getHlc();\n\n await store.applyChanges([\n makeDelete({\n stream,\n entity,\n entityId: id,\n hlc,\n }),\n ]);\n },\n\n async list(query: ListQuery): Promise<S[E][]> {\n return (await store.listRows(query)) as S[E][];\n },\n };\n}\n"],"mappings":";AAEA,SAAS,YAAY,YAAY,SAAS,sBAAsB;AAgDzD,SAAS,kBAGd,OAAiB,QAAW,UAAoC,CAAC,GAA8B;AAC/F,QAAM,UAAU,oBAAI,IAAoC;AACxD,MAAI,YAAY;AAEhB,QAAM,QAAQ,YAAY;AACxB,gBAAY;AAEZ,QAAI,QAAQ,SAAS,EAAG;AAExB,UAAM,OAAO,MAAM,KAAK,QAAQ,KAAK,CAAC;AACtC,UAAM,WAAW,oBAAI,IAAoC;AAGzD,eAAW,CAAC,KAAK,IAAI,KAAK,QAAQ,QAAQ,GAAG;AAC3C,eAAS,IAAI,KAAK,IAAI;AAAA,IACxB;AACA,YAAQ,MAAM;AAEd,QAAI;AACF,YAAM,aAAa,MAAM,KAAK,IAAI,IAAI,IAAI,CAAC;AAC3C,YAAM,UAAU,MAAM,MAAM,QAAQ,QAAQ,UAAU;AAGtD,iBAAW,CAAC,KAAK,IAAI,KAAK,SAAS,QAAQ,GAAG;AAC5C,cAAM,QAAQ,QAAQ,IAAI,GAAG,KAAK;AAClC,mBAAW,OAAO,MAAM;AACtB,cAAI,QAAQ,KAAK;AAAA,QACnB;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AAEd,iBAAW,QAAQ,SAAS,OAAO,GAAG;AACpC,mBAAW,OAAO,MAAM;AACtB,cAAI,OAAO,KAAK;AAAA,QAClB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,gBAA0C,QAAQ,SAAS;AAEjE,QAAM,gBAAgB,MAAM;AAC1B,QAAI,UAAW;AACf,gBAAY;AAEZ,UAAM,MAAO,WACV;AAEH,QAAI,OAAO,QAAQ,eAAe,kBAAkB,SAAS,kBAAkB,SAAS;AACtF,UAAI,MAAM;AACR,aAAK,MAAM;AAAA,MACb,CAAC;AACD;AAAA,IACF;AAGA,mBAAe,MAAM;AACnB,WAAK,MAAM;AAAA,IACb,CAAC;AAAA,EACH;AAEA,SAAO;AAAA,IACL,KAAK,KAAmC;AACtC,aAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,cAAM,OAAO,QAAQ,IAAI,GAAG,KAAK,CAAC;AAClC,aAAK,KAAK,EAAE,SAAS,OAAO,CAAC;AAC7B,gBAAQ,IAAI,KAAK,IAAI;AACrB,sBAAc;AAAA,MAChB,CAAC;AAAA,IACH;AAAA,IAEA,MAAM,SAAS,MAA4C;AAazD,YAAM,aAAa,MAAM,KAAK,IAAI,IAAI,IAAI,CAAC;AAC3C,aAAO,MAAM,MAAM,QAAQ,QAAQ,UAAU;AAAA,IAC/C;AAAA,EACF;AACF;AA6FO,SAAS,uBAId,SAA4F;AAC5F,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA,WAAW,eAAe,YAAY;AAAA,IACtC,aAAa,MAAM,OAAO,WAAW;AAAA,IACrC;AAAA,EACF,IAAI;AACJ,QAAM,cAAc,kBAAkB,OAAO,QAAQ,KAAK;AAE1D,QAAM,SAAS,MAAM,QAAQ,UAAU,KAAK,IAAI,CAAC;AAEjD,SAAO;AAAA,IACL,MAAM,OAAO,OAAqC;AAEhD,YAAM,KAAM,MAAkC,MAA4B,WAAW;AACrF,YAAM,MAAM,OAAO;AAEnB,YAAM,MAAM,aAAa;AAAA,QACvB,WAAW;AAAA,UACT;AAAA,UACA;AAAA,UACA,UAAU;AAAA,UACV,OAAO,EAAE,GAAG,OAAO,GAAG;AAAA,UACtB;AAAA,QACF,CAAC;AAAA,MACH,CAAC;AAGD,YAAM,UAAU,MAAM,YAAY,KAAK,EAAE;AACzC,UAAI,CAAC,SAAS;AACZ,cAAM,IAAI,MAAM,iCAAiC,MAAM,IAAI,EAAE,EAAE;AAAA,MACjE;AACA,aAAO;AAAA,IACT;AAAA,IAEA,KAAK,IAAkC;AACrC,aAAO,YAAY,KAAK,EAAE;AAAA,IAC5B;AAAA,IAEA,SAAS,KAA2C;AAClD,aAAO,YAAY,SAAS,GAAG;AAAA,IACjC;AAAA,IAEA,MAAM,OAAO,IAAY,OAAqC;AAC5D,YAAM,MAAM,OAAO;AAEnB,YAAM,MAAM,aAAa;AAAA,QACvB,WAAW;AAAA,UACT;AAAA,UACA;AAAA,UACA,UAAU;AAAA,UACV;AAAA,UACA;AAAA,QACF,CAAC;AAAA,MACH,CAAC;AAGD,YAAM,UAAU,MAAM,YAAY,KAAK,EAAE;AACzC,UAAI,CAAC,SAAS;AACZ,cAAM,IAAI,MAAM,iCAAiC,MAAM,IAAI,EAAE,EAAE;AAAA,MACjE;AACA,aAAO;AAAA,IACT;AAAA,IAEA,MAAM,OAAO,IAA2B;AACtC,YAAM,MAAM,OAAO;AAEnB,YAAM,MAAM,aAAa;AAAA,QACvB,WAAW;AAAA,UACT;AAAA,UACA;AAAA,UACA,UAAU;AAAA,UACV;AAAA,QACF,CAAC;AAAA,MACH,CAAC;AAAA,IACH;AAAA,IAEA,MAAM,KAAK,OAAmC;AAC5C,aAAQ,MAAM,MAAM,SAAS,KAAK;AAAA,IACpC;AAAA,EACF;AACF;","names":[]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.test.d.ts","sourceRoot":"","sources":["../src/index.test.ts"],"names":[],"mappings":""}
|
package/package.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@rippledb/client-controllers",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"private": false,
|
|
5
|
+
"type": "module",
|
|
6
|
+
"sideEffects": false,
|
|
7
|
+
"description": "Abstract CRUD controllers with batch loading for RippleDB",
|
|
8
|
+
"keywords": [
|
|
9
|
+
"rippledb",
|
|
10
|
+
"crud",
|
|
11
|
+
"controllers",
|
|
12
|
+
"batch-loader",
|
|
13
|
+
"local-first",
|
|
14
|
+
"offline-first"
|
|
15
|
+
],
|
|
16
|
+
"repository": {
|
|
17
|
+
"type": "git",
|
|
18
|
+
"url": "https://github.com/eckerlein/rippledb.git",
|
|
19
|
+
"directory": "packages/client-controllers"
|
|
20
|
+
},
|
|
21
|
+
"exports": {
|
|
22
|
+
".": {
|
|
23
|
+
"types": "./dist/index.d.ts",
|
|
24
|
+
"import": "./dist/index.js"
|
|
25
|
+
}
|
|
26
|
+
},
|
|
27
|
+
"files": [
|
|
28
|
+
"dist"
|
|
29
|
+
],
|
|
30
|
+
"dependencies": {
|
|
31
|
+
"@rippledb/core": "0.1.1",
|
|
32
|
+
"@rippledb/client": "0.1.1"
|
|
33
|
+
},
|
|
34
|
+
"devDependencies": {
|
|
35
|
+
"eslint": "^9.37.0",
|
|
36
|
+
"tsup": "^8.5.0",
|
|
37
|
+
"typescript": "^5.9.3",
|
|
38
|
+
"vitest": "^3.2.4",
|
|
39
|
+
"@rippledb/store-memory": "0.1.1"
|
|
40
|
+
},
|
|
41
|
+
"tsup": {
|
|
42
|
+
"entry": [
|
|
43
|
+
"src/index.ts"
|
|
44
|
+
],
|
|
45
|
+
"format": [
|
|
46
|
+
"esm"
|
|
47
|
+
],
|
|
48
|
+
"dts": false,
|
|
49
|
+
"sourcemap": true,
|
|
50
|
+
"clean": true
|
|
51
|
+
},
|
|
52
|
+
"scripts": {
|
|
53
|
+
"build": "tsup && tsc -p tsconfig.build.json",
|
|
54
|
+
"test": "vitest run --passWithNoTests",
|
|
55
|
+
"test:watch": "vitest --passWithNoTests",
|
|
56
|
+
"lint": "eslint ."
|
|
57
|
+
}
|
|
58
|
+
}
|