@f0rbit/corpus 0.1.3 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/backend/cloudflare.d.ts +41 -1
- package/dist/backend/cloudflare.d.ts.map +1 -1
- package/dist/backend/cloudflare.js +69 -36
- package/dist/backend/file.d.ts +36 -1
- package/dist/backend/file.d.ts.map +1 -1
- package/dist/backend/file.js +55 -21
- package/dist/backend/layered.d.ts +38 -0
- package/dist/backend/layered.d.ts.map +1 -1
- package/dist/backend/layered.js +38 -0
- package/dist/backend/memory.d.ts +31 -1
- package/dist/backend/memory.d.ts.map +1 -1
- package/dist/backend/memory.js +41 -11
- package/dist/backends.d.ts +11 -0
- package/dist/backends.d.ts.map +1 -0
- package/dist/backends.js +9 -0
- package/dist/cloudflare.d.ts +2 -5
- package/dist/cloudflare.d.ts.map +1 -1
- package/dist/cloudflare.js +2 -5
- package/dist/codecs.d.ts +8 -0
- package/dist/codecs.d.ts.map +1 -0
- package/dist/codecs.js +6 -0
- package/dist/core.d.ts +9 -0
- package/dist/core.d.ts.map +1 -0
- package/dist/core.js +7 -0
- package/dist/corpus.d.ts +68 -1
- package/dist/corpus.d.ts.map +1 -1
- package/dist/corpus.js +194 -1
- package/dist/index.d.ts +3 -6
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -5
- package/dist/schema.d.ts +27 -0
- package/dist/schema.d.ts.map +1 -1
- package/dist/schema.js +27 -0
- package/dist/sst.d.ts +38 -0
- package/dist/sst.d.ts.map +1 -1
- package/dist/sst.js +38 -0
- package/dist/types.d.ts +229 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +91 -2
- package/dist/utils.d.ts +133 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +174 -0
- package/package.json +5 -2
- package/dist/codec.d.ts +0 -9
- package/dist/codec.d.ts.map +0 -1
- package/dist/codec.js +0 -21
- package/dist/hash.d.ts +0 -2
- package/dist/hash.d.ts.map +0 -1
- package/dist/hash.js +0 -5
- package/dist/store.d.ts +0 -3
- package/dist/store.d.ts.map +0 -1
- package/dist/store.js +0 -125
- package/dist/version.d.ts +0 -7
- package/dist/version.d.ts.map +0 -1
- package/dist/version.js +0 -31
|
@@ -1,4 +1,8 @@
|
|
|
1
|
-
|
|
1
|
+
/**
|
|
2
|
+
* @module Backends
|
|
3
|
+
* @description Cloudflare Workers storage backend using D1 and R2.
|
|
4
|
+
*/
|
|
5
|
+
import type { Backend, EventHandler } from "../types";
|
|
2
6
|
type D1Database = {
|
|
3
7
|
prepare: (sql: string) => unknown;
|
|
4
8
|
};
|
|
@@ -18,6 +22,42 @@ export type CloudflareBackendConfig = {
|
|
|
18
22
|
r2: R2Bucket;
|
|
19
23
|
on_event?: EventHandler;
|
|
20
24
|
};
|
|
25
|
+
/**
|
|
26
|
+
* Creates a Cloudflare Workers storage backend using D1 and R2.
|
|
27
|
+
* @category Backends
|
|
28
|
+
* @group Storage Backends
|
|
29
|
+
*
|
|
30
|
+
* Uses D1 (SQLite) for metadata storage and R2 (object storage) for binary data.
|
|
31
|
+
* Requires running `CORPUS_MIGRATION_SQL` on the D1 database before first use.
|
|
32
|
+
*
|
|
33
|
+
* This backend is designed for production use in Cloudflare Workers environments,
|
|
34
|
+
* providing durable, globally distributed storage.
|
|
35
|
+
*
|
|
36
|
+
* @param config - Configuration with `d1` (D1 database), `r2` (R2 bucket), and optional `on_event` handler
|
|
37
|
+
* @returns A Backend instance using Cloudflare D1 + R2
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* ```ts
|
|
41
|
+
* // In a Cloudflare Worker
|
|
42
|
+
* export default {
|
|
43
|
+
* async fetch(request: Request, env: Env) {
|
|
44
|
+
* const backend = create_cloudflare_backend({
|
|
45
|
+
* d1: env.CORPUS_DB,
|
|
46
|
+
* r2: env.CORPUS_BUCKET
|
|
47
|
+
* })
|
|
48
|
+
*
|
|
49
|
+
* const corpus = create_corpus()
|
|
50
|
+
* .with_backend(backend)
|
|
51
|
+
* .with_store(define_store('cache', json_codec(CacheSchema)))
|
|
52
|
+
* .build()
|
|
53
|
+
*
|
|
54
|
+
* // Use corpus...
|
|
55
|
+
* }
|
|
56
|
+
* }
|
|
57
|
+
* ```
|
|
58
|
+
*
|
|
59
|
+
* @see CORPUS_MIGRATION_SQL for required database setup
|
|
60
|
+
*/
|
|
21
61
|
export declare function create_cloudflare_backend(config: CloudflareBackendConfig): Backend;
|
|
22
62
|
export {};
|
|
23
63
|
//# sourceMappingURL=cloudflare.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"cloudflare.d.ts","sourceRoot":"","sources":["../../backend/cloudflare.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"cloudflare.d.ts","sourceRoot":"","sources":["../../backend/cloudflare.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAIH,OAAO,KAAK,EAAE,OAAO,EAAwF,YAAY,EAAE,MAAM,UAAU,CAAC;AAI5I,KAAK,UAAU,GAAG;IAAE,OAAO,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,OAAO,CAAA;CAAE,CAAC;AACxD,KAAK,QAAQ,GAAG;IACf,GAAG,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,OAAO,CAAC;QAAE,IAAI,EAAE,cAAc,CAAC,UAAU,CAAC,CAAC;QAAC,WAAW,EAAE,MAAM,OAAO,CAAC,WAAW,CAAC,CAAA;KAAE,GAAG,IAAI,CAAC,CAAC;IACpH,GAAG,EAAE,CAAC,GAAG,EAAE,MAAM,EAAE,IAAI,EAAE,cAAc,CAAC,UAAU,CAAC,GAAG,UAAU,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;IACnF,MAAM,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;IACvC,IAAI,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,OAAO,CAAC;QAAE,GAAG,EAAE,MAAM,CAAA;KAAE,GAAG,IAAI,CAAC,CAAC;CACvD,CAAC;AAEF,MAAM,MAAM,uBAAuB,GAAG;IACrC,EAAE,EAAE,UAAU,CAAC;IACf,EAAE,EAAE,QAAQ,CAAC;IACb,QAAQ,CAAC,EAAE,YAAY,CAAC;CACxB,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAmCG;AACH,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,uBAAuB,GAAG,OAAO,CA8OlF"}
|
|
@@ -1,7 +1,47 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
1
|
+
/**
|
|
2
|
+
* @module Backends
|
|
3
|
+
* @description Cloudflare Workers storage backend using D1 and R2.
|
|
4
|
+
*/
|
|
5
|
+
import { eq, and, desc, lt, gt, like, sql } from "drizzle-orm";
|
|
6
|
+
import { drizzle } from "drizzle-orm/d1";
|
|
7
|
+
import { ok, err } from "../types";
|
|
8
|
+
import { corpus_snapshots } from "../schema";
|
|
9
|
+
/**
|
|
10
|
+
* Creates a Cloudflare Workers storage backend using D1 and R2.
|
|
11
|
+
* @category Backends
|
|
12
|
+
* @group Storage Backends
|
|
13
|
+
*
|
|
14
|
+
* Uses D1 (SQLite) for metadata storage and R2 (object storage) for binary data.
|
|
15
|
+
* Requires running `CORPUS_MIGRATION_SQL` on the D1 database before first use.
|
|
16
|
+
*
|
|
17
|
+
* This backend is designed for production use in Cloudflare Workers environments,
|
|
18
|
+
* providing durable, globally distributed storage.
|
|
19
|
+
*
|
|
20
|
+
* @param config - Configuration with `d1` (D1 database), `r2` (R2 bucket), and optional `on_event` handler
|
|
21
|
+
* @returns A Backend instance using Cloudflare D1 + R2
|
|
22
|
+
*
|
|
23
|
+
* @example
|
|
24
|
+
* ```ts
|
|
25
|
+
* // In a Cloudflare Worker
|
|
26
|
+
* export default {
|
|
27
|
+
* async fetch(request: Request, env: Env) {
|
|
28
|
+
* const backend = create_cloudflare_backend({
|
|
29
|
+
* d1: env.CORPUS_DB,
|
|
30
|
+
* r2: env.CORPUS_BUCKET
|
|
31
|
+
* })
|
|
32
|
+
*
|
|
33
|
+
* const corpus = create_corpus()
|
|
34
|
+
* .with_backend(backend)
|
|
35
|
+
* .with_store(define_store('cache', json_codec(CacheSchema)))
|
|
36
|
+
* .build()
|
|
37
|
+
*
|
|
38
|
+
* // Use corpus...
|
|
39
|
+
* }
|
|
40
|
+
* }
|
|
41
|
+
* ```
|
|
42
|
+
*
|
|
43
|
+
* @see CORPUS_MIGRATION_SQL for required database setup
|
|
44
|
+
*/
|
|
5
45
|
export function create_cloudflare_backend(config) {
|
|
6
46
|
const db = drizzle(config.d1);
|
|
7
47
|
const { r2, on_event } = config;
|
|
@@ -31,15 +71,15 @@ export function create_cloudflare_backend(config) {
|
|
|
31
71
|
.where(and(eq(corpus_snapshots.store_id, store_id), eq(corpus_snapshots.version, version)))
|
|
32
72
|
.limit(1);
|
|
33
73
|
const row = rows[0];
|
|
34
|
-
emit({ type:
|
|
74
|
+
emit({ type: "meta_get", store_id, version, found: !!row });
|
|
35
75
|
if (!row) {
|
|
36
|
-
return err({ kind:
|
|
76
|
+
return err({ kind: "not_found", store_id, version });
|
|
37
77
|
}
|
|
38
78
|
return ok(row_to_meta(row));
|
|
39
79
|
}
|
|
40
80
|
catch (cause) {
|
|
41
|
-
const error = { kind:
|
|
42
|
-
emit({ type:
|
|
81
|
+
const error = { kind: "storage_error", cause: cause, operation: "metadata.get" };
|
|
82
|
+
emit({ type: "error", error });
|
|
43
83
|
return err(error);
|
|
44
84
|
}
|
|
45
85
|
},
|
|
@@ -72,26 +112,24 @@ export function create_cloudflare_backend(config) {
|
|
|
72
112
|
tags: meta.tags ? JSON.stringify(meta.tags) : null,
|
|
73
113
|
},
|
|
74
114
|
});
|
|
75
|
-
emit({ type:
|
|
115
|
+
emit({ type: "meta_put", store_id: meta.store_id, version: meta.version });
|
|
76
116
|
return ok(undefined);
|
|
77
117
|
}
|
|
78
118
|
catch (cause) {
|
|
79
|
-
const error = { kind:
|
|
80
|
-
emit({ type:
|
|
119
|
+
const error = { kind: "storage_error", cause: cause, operation: "metadata.put" };
|
|
120
|
+
emit({ type: "error", error });
|
|
81
121
|
return err(error);
|
|
82
122
|
}
|
|
83
123
|
},
|
|
84
124
|
async delete(store_id, version) {
|
|
85
125
|
try {
|
|
86
|
-
await db
|
|
87
|
-
|
|
88
|
-
.where(and(eq(corpus_snapshots.store_id, store_id), eq(corpus_snapshots.version, version)));
|
|
89
|
-
emit({ type: 'meta_delete', store_id, version });
|
|
126
|
+
await db.delete(corpus_snapshots).where(and(eq(corpus_snapshots.store_id, store_id), eq(corpus_snapshots.version, version)));
|
|
127
|
+
emit({ type: "meta_delete", store_id, version });
|
|
90
128
|
return ok(undefined);
|
|
91
129
|
}
|
|
92
130
|
catch (cause) {
|
|
93
|
-
const error = { kind:
|
|
94
|
-
emit({ type:
|
|
131
|
+
const error = { kind: "storage_error", cause: cause, operation: "metadata.delete" };
|
|
132
|
+
emit({ type: "error", error });
|
|
95
133
|
return err(error);
|
|
96
134
|
}
|
|
97
135
|
},
|
|
@@ -115,31 +153,26 @@ export function create_cloudflare_backend(config) {
|
|
|
115
153
|
let count = 0;
|
|
116
154
|
for (const row of rows) {
|
|
117
155
|
const meta = row_to_meta(row);
|
|
118
|
-
if (opts?.tags?.length && !opts.tags.
|
|
156
|
+
if (opts?.tags?.length && !opts.tags.every(t => meta.tags?.includes(t))) {
|
|
119
157
|
continue;
|
|
120
158
|
}
|
|
121
159
|
yield meta;
|
|
122
160
|
count++;
|
|
123
161
|
}
|
|
124
|
-
emit({ type:
|
|
162
|
+
emit({ type: "meta_list", store_id, count });
|
|
125
163
|
},
|
|
126
164
|
async get_latest(store_id) {
|
|
127
165
|
try {
|
|
128
|
-
const rows = await db
|
|
129
|
-
.select()
|
|
130
|
-
.from(corpus_snapshots)
|
|
131
|
-
.where(eq(corpus_snapshots.store_id, store_id))
|
|
132
|
-
.orderBy(desc(corpus_snapshots.created_at))
|
|
133
|
-
.limit(1);
|
|
166
|
+
const rows = await db.select().from(corpus_snapshots).where(eq(corpus_snapshots.store_id, store_id)).orderBy(desc(corpus_snapshots.created_at)).limit(1);
|
|
134
167
|
const row = rows[0];
|
|
135
168
|
if (!row) {
|
|
136
|
-
return err({ kind:
|
|
169
|
+
return err({ kind: "not_found", store_id, version: "latest" });
|
|
137
170
|
}
|
|
138
171
|
return ok(row_to_meta(row));
|
|
139
172
|
}
|
|
140
173
|
catch (cause) {
|
|
141
|
-
const error = { kind:
|
|
142
|
-
emit({ type:
|
|
174
|
+
const error = { kind: "storage_error", cause: cause, operation: "metadata.get_latest" };
|
|
175
|
+
emit({ type: "error", error });
|
|
143
176
|
return err(error);
|
|
144
177
|
}
|
|
145
178
|
},
|
|
@@ -175,9 +208,9 @@ export function create_cloudflare_backend(config) {
|
|
|
175
208
|
async get(data_key) {
|
|
176
209
|
try {
|
|
177
210
|
const object = await r2.get(data_key);
|
|
178
|
-
emit({ type:
|
|
211
|
+
emit({ type: "data_get", store_id: data_key.split("/")[0] ?? data_key, version: data_key, found: !!object });
|
|
179
212
|
if (!object) {
|
|
180
|
-
return err({ kind:
|
|
213
|
+
return err({ kind: "not_found", store_id: data_key, version: "" });
|
|
181
214
|
}
|
|
182
215
|
return ok({
|
|
183
216
|
stream: () => object.body,
|
|
@@ -185,8 +218,8 @@ export function create_cloudflare_backend(config) {
|
|
|
185
218
|
});
|
|
186
219
|
}
|
|
187
220
|
catch (cause) {
|
|
188
|
-
const error = { kind:
|
|
189
|
-
emit({ type:
|
|
221
|
+
const error = { kind: "storage_error", cause: cause, operation: "data.get" };
|
|
222
|
+
emit({ type: "error", error });
|
|
190
223
|
return err(error);
|
|
191
224
|
}
|
|
192
225
|
},
|
|
@@ -196,8 +229,8 @@ export function create_cloudflare_backend(config) {
|
|
|
196
229
|
return ok(undefined);
|
|
197
230
|
}
|
|
198
231
|
catch (cause) {
|
|
199
|
-
const error = { kind:
|
|
200
|
-
emit({ type:
|
|
232
|
+
const error = { kind: "storage_error", cause: cause, operation: "data.put" };
|
|
233
|
+
emit({ type: "error", error });
|
|
201
234
|
return err(error);
|
|
202
235
|
}
|
|
203
236
|
},
|
|
@@ -207,8 +240,8 @@ export function create_cloudflare_backend(config) {
|
|
|
207
240
|
return ok(undefined);
|
|
208
241
|
}
|
|
209
242
|
catch (cause) {
|
|
210
|
-
const error = { kind:
|
|
211
|
-
emit({ type:
|
|
243
|
+
const error = { kind: "storage_error", cause: cause, operation: "data.delete" };
|
|
244
|
+
emit({ type: "error", error });
|
|
212
245
|
return err(error);
|
|
213
246
|
}
|
|
214
247
|
},
|
package/dist/backend/file.d.ts
CHANGED
|
@@ -1,7 +1,42 @@
|
|
|
1
|
-
|
|
1
|
+
/**
|
|
2
|
+
* @module Backends
|
|
3
|
+
* @description File-system storage backend for local persistence.
|
|
4
|
+
*/
|
|
5
|
+
import type { Backend, EventHandler } from "../types";
|
|
2
6
|
export type FileBackendConfig = {
|
|
3
7
|
base_path: string;
|
|
4
8
|
on_event?: EventHandler;
|
|
5
9
|
};
|
|
10
|
+
/**
|
|
11
|
+
* Creates a file-system storage backend for local persistence.
|
|
12
|
+
* @category Backends
|
|
13
|
+
* @group Storage Backends
|
|
14
|
+
*
|
|
15
|
+
* Uses Bun's file APIs for efficient I/O. Metadata is stored as JSON files
|
|
16
|
+
* per store, and data is stored as binary files in a shared `_data` directory.
|
|
17
|
+
*
|
|
18
|
+
* Directory structure:
|
|
19
|
+
* ```
|
|
20
|
+
* base_path/
|
|
21
|
+
* <store_id>/_meta.json # Metadata for each store
|
|
22
|
+
* _data/<store_id>_<hash>.bin # Binary data files
|
|
23
|
+
* ```
|
|
24
|
+
*
|
|
25
|
+
* @param config - Configuration with `base_path` (root directory) and optional `on_event` handler
|
|
26
|
+
* @returns A Backend instance using file-system storage
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* ```ts
|
|
30
|
+
* const backend = create_file_backend({
|
|
31
|
+
* base_path: './data/corpus',
|
|
32
|
+
* on_event: (e) => console.log(e.type)
|
|
33
|
+
* })
|
|
34
|
+
*
|
|
35
|
+
* const corpus = create_corpus()
|
|
36
|
+
* .with_backend(backend)
|
|
37
|
+
* .with_store(define_store('documents', json_codec(DocSchema)))
|
|
38
|
+
* .build()
|
|
39
|
+
* ```
|
|
40
|
+
*/
|
|
6
41
|
export declare function create_file_backend(config: FileBackendConfig): Backend;
|
|
7
42
|
//# sourceMappingURL=file.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"file.d.ts","sourceRoot":"","sources":["../../backend/file.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAAwF,YAAY,EAAE,MAAM,UAAU,
|
|
1
|
+
{"version":3,"file":"file.d.ts","sourceRoot":"","sources":["../../backend/file.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,OAAO,EAAwF,YAAY,EAAE,MAAM,UAAU,CAAC;AAK5I,MAAM,MAAM,iBAAiB,GAAG;IAC/B,SAAS,EAAE,MAAM,CAAC;IAClB,QAAQ,CAAC,EAAE,YAAY,CAAC;CACxB,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE,iBAAiB,GAAG,OAAO,CAkMtE"}
|
package/dist/backend/file.js
CHANGED
|
@@ -1,26 +1,61 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
/**
|
|
2
|
+
* @module Backends
|
|
3
|
+
* @description File-system storage backend for local persistence.
|
|
4
|
+
*/
|
|
5
|
+
import { ok, err } from "../types";
|
|
6
|
+
import { mkdir, readdir } from "node:fs/promises";
|
|
7
|
+
import { join, dirname } from "node:path";
|
|
8
|
+
/**
|
|
9
|
+
* Creates a file-system storage backend for local persistence.
|
|
10
|
+
* @category Backends
|
|
11
|
+
* @group Storage Backends
|
|
12
|
+
*
|
|
13
|
+
* Uses Bun's file APIs for efficient I/O. Metadata is stored as JSON files
|
|
14
|
+
* per store, and data is stored as binary files in a shared `_data` directory.
|
|
15
|
+
*
|
|
16
|
+
* Directory structure:
|
|
17
|
+
* ```
|
|
18
|
+
* base_path/
|
|
19
|
+
* <store_id>/_meta.json # Metadata for each store
|
|
20
|
+
* _data/<store_id>_<hash>.bin # Binary data files
|
|
21
|
+
* ```
|
|
22
|
+
*
|
|
23
|
+
* @param config - Configuration with `base_path` (root directory) and optional `on_event` handler
|
|
24
|
+
* @returns A Backend instance using file-system storage
|
|
25
|
+
*
|
|
26
|
+
* @example
|
|
27
|
+
* ```ts
|
|
28
|
+
* const backend = create_file_backend({
|
|
29
|
+
* base_path: './data/corpus',
|
|
30
|
+
* on_event: (e) => console.log(e.type)
|
|
31
|
+
* })
|
|
32
|
+
*
|
|
33
|
+
* const corpus = create_corpus()
|
|
34
|
+
* .with_backend(backend)
|
|
35
|
+
* .with_store(define_store('documents', json_codec(DocSchema)))
|
|
36
|
+
* .build()
|
|
37
|
+
* ```
|
|
38
|
+
*/
|
|
4
39
|
export function create_file_backend(config) {
|
|
5
40
|
const { base_path, on_event } = config;
|
|
6
41
|
function emit(event) {
|
|
7
42
|
on_event?.(event);
|
|
8
43
|
}
|
|
9
44
|
function meta_path(store_id) {
|
|
10
|
-
return join(base_path, store_id,
|
|
45
|
+
return join(base_path, store_id, "_meta.json");
|
|
11
46
|
}
|
|
12
47
|
function data_path(data_key) {
|
|
13
|
-
return join(base_path,
|
|
48
|
+
return join(base_path, "_data", `${data_key.replace(/\//g, "_")}.bin`);
|
|
14
49
|
}
|
|
15
50
|
async function read_store_meta(store_id) {
|
|
16
51
|
const path = meta_path(store_id);
|
|
17
52
|
const file = Bun.file(path);
|
|
18
|
-
if (!await file.exists())
|
|
53
|
+
if (!(await file.exists()))
|
|
19
54
|
return new Map();
|
|
20
55
|
try {
|
|
21
56
|
const content = await file.text();
|
|
22
57
|
const entries = JSON.parse(content, (key, value) => {
|
|
23
|
-
if (key ===
|
|
58
|
+
if (key === "created_at" || key === "invoked_at") {
|
|
24
59
|
return value ? new Date(value) : value;
|
|
25
60
|
}
|
|
26
61
|
return value;
|
|
@@ -41,9 +76,9 @@ export function create_file_backend(config) {
|
|
|
41
76
|
async get(store_id, version) {
|
|
42
77
|
const store_meta = await read_store_meta(store_id);
|
|
43
78
|
const meta = store_meta.get(version);
|
|
44
|
-
emit({ type:
|
|
79
|
+
emit({ type: "meta_get", store_id, version, found: !!meta });
|
|
45
80
|
if (!meta) {
|
|
46
|
-
return err({ kind:
|
|
81
|
+
return err({ kind: "not_found", store_id, version });
|
|
47
82
|
}
|
|
48
83
|
return ok(meta);
|
|
49
84
|
},
|
|
@@ -51,14 +86,14 @@ export function create_file_backend(config) {
|
|
|
51
86
|
const store_meta = await read_store_meta(meta.store_id);
|
|
52
87
|
store_meta.set(meta.version, meta);
|
|
53
88
|
await write_store_meta(meta.store_id, store_meta);
|
|
54
|
-
emit({ type:
|
|
89
|
+
emit({ type: "meta_put", store_id: meta.store_id, version: meta.version });
|
|
55
90
|
return ok(undefined);
|
|
56
91
|
},
|
|
57
92
|
async delete(store_id, version) {
|
|
58
93
|
const store_meta = await read_store_meta(store_id);
|
|
59
94
|
store_meta.delete(version);
|
|
60
95
|
await write_store_meta(store_id, store_meta);
|
|
61
|
-
emit({ type:
|
|
96
|
+
emit({ type: "meta_delete", store_id, version });
|
|
62
97
|
return ok(undefined);
|
|
63
98
|
},
|
|
64
99
|
async *list(store_id, opts) {
|
|
@@ -69,7 +104,7 @@ export function create_file_backend(config) {
|
|
|
69
104
|
return false;
|
|
70
105
|
if (opts?.after && meta.created_at <= opts.after)
|
|
71
106
|
return false;
|
|
72
|
-
if (opts?.tags?.length && !opts.tags.
|
|
107
|
+
if (opts?.tags?.length && !opts.tags.every(t => meta.tags?.includes(t)))
|
|
73
108
|
return false;
|
|
74
109
|
return true;
|
|
75
110
|
})
|
|
@@ -80,7 +115,7 @@ export function create_file_backend(config) {
|
|
|
80
115
|
yield meta;
|
|
81
116
|
count++;
|
|
82
117
|
}
|
|
83
|
-
emit({ type:
|
|
118
|
+
emit({ type: "meta_list", store_id, count });
|
|
84
119
|
},
|
|
85
120
|
async get_latest(store_id) {
|
|
86
121
|
const store_meta = await read_store_meta(store_id);
|
|
@@ -91,7 +126,7 @@ export function create_file_backend(config) {
|
|
|
91
126
|
}
|
|
92
127
|
}
|
|
93
128
|
if (!latest) {
|
|
94
|
-
return err({ kind:
|
|
129
|
+
return err({ kind: "not_found", store_id, version: "latest" });
|
|
95
130
|
}
|
|
96
131
|
return ok(latest);
|
|
97
132
|
},
|
|
@@ -99,7 +134,7 @@ export function create_file_backend(config) {
|
|
|
99
134
|
try {
|
|
100
135
|
const entries = await readdir(base_path, { withFileTypes: true });
|
|
101
136
|
for (const entry of entries) {
|
|
102
|
-
if (!entry.isDirectory() || entry.name.startsWith(
|
|
137
|
+
if (!entry.isDirectory() || entry.name.startsWith("_"))
|
|
103
138
|
continue;
|
|
104
139
|
const store_meta = await read_store_meta(entry.name);
|
|
105
140
|
for (const meta of store_meta.values()) {
|
|
@@ -109,8 +144,7 @@ export function create_file_backend(config) {
|
|
|
109
144
|
}
|
|
110
145
|
}
|
|
111
146
|
}
|
|
112
|
-
catch {
|
|
113
|
-
}
|
|
147
|
+
catch { }
|
|
114
148
|
},
|
|
115
149
|
async find_by_hash(store_id, content_hash) {
|
|
116
150
|
const store_meta = await read_store_meta(store_id);
|
|
@@ -127,9 +161,9 @@ export function create_file_backend(config) {
|
|
|
127
161
|
const path = data_path(data_key);
|
|
128
162
|
const file = Bun.file(path);
|
|
129
163
|
const found = await file.exists();
|
|
130
|
-
emit({ type:
|
|
164
|
+
emit({ type: "data_get", store_id: data_key.split("/")[0] ?? data_key, version: data_key, found });
|
|
131
165
|
if (!found) {
|
|
132
|
-
return err({ kind:
|
|
166
|
+
return err({ kind: "not_found", store_id: data_key, version: "" });
|
|
133
167
|
}
|
|
134
168
|
return ok({
|
|
135
169
|
stream: () => file.stream(),
|
|
@@ -158,7 +192,7 @@ export function create_file_backend(config) {
|
|
|
158
192
|
return ok(undefined);
|
|
159
193
|
}
|
|
160
194
|
catch (cause) {
|
|
161
|
-
return err({ kind:
|
|
195
|
+
return err({ kind: "storage_error", cause: cause, operation: "put" });
|
|
162
196
|
}
|
|
163
197
|
},
|
|
164
198
|
async delete(data_key) {
|
|
@@ -171,7 +205,7 @@ export function create_file_backend(config) {
|
|
|
171
205
|
return ok(undefined);
|
|
172
206
|
}
|
|
173
207
|
catch (cause) {
|
|
174
|
-
return err({ kind:
|
|
208
|
+
return err({ kind: "storage_error", cause: cause, operation: "delete" });
|
|
175
209
|
}
|
|
176
210
|
},
|
|
177
211
|
async exists(data_key) {
|
|
@@ -1,8 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @module Backends
|
|
3
|
+
* @description Layered backend for caching and replication strategies.
|
|
4
|
+
*/
|
|
1
5
|
import type { Backend } from '../types';
|
|
2
6
|
export type LayeredBackendOptions = {
|
|
3
7
|
read: Backend[];
|
|
4
8
|
write: Backend[];
|
|
5
9
|
list_strategy?: 'merge' | 'first';
|
|
6
10
|
};
|
|
11
|
+
/**
|
|
12
|
+
* Creates a layered backend that combines multiple backends with read/write separation.
|
|
13
|
+
* @category Backends
|
|
14
|
+
* @group Composite Backends
|
|
15
|
+
*
|
|
16
|
+
* Read operations use fallback: tries each read backend in order until one succeeds.
|
|
17
|
+
* Write operations use fanout: writes to all write backends (fails if any fail).
|
|
18
|
+
*
|
|
19
|
+
* Common use cases:
|
|
20
|
+
* - **Caching**: Memory backend first for reads, file backend for persistence
|
|
21
|
+
* - **Replication**: Write to multiple backends for redundancy
|
|
22
|
+
* - **Migration**: Read from old + new backends, write only to new
|
|
23
|
+
*
|
|
24
|
+
* @param options - Configuration with `read` backends (tried in order), `write` backends (all receive writes), and optional `list_strategy` ('merge' or 'first')
|
|
25
|
+
* @returns A Backend that delegates to the configured backends
|
|
26
|
+
*
|
|
27
|
+
* @example
|
|
28
|
+
* ```ts
|
|
29
|
+
* // Caching layer: memory cache with file persistence
|
|
30
|
+
* const cache = create_memory_backend()
|
|
31
|
+
* const storage = create_file_backend({ base_path: './data' })
|
|
32
|
+
*
|
|
33
|
+
* const backend = create_layered_backend({
|
|
34
|
+
* read: [cache, storage], // Try cache first, fall back to disk
|
|
35
|
+
* write: [cache, storage], // Write to both
|
|
36
|
+
* })
|
|
37
|
+
*
|
|
38
|
+
* // Migration: read from old and new, write only to new
|
|
39
|
+
* const backend = create_layered_backend({
|
|
40
|
+
* read: [newBackend, oldBackend],
|
|
41
|
+
* write: [newBackend],
|
|
42
|
+
* })
|
|
43
|
+
* ```
|
|
44
|
+
*/
|
|
7
45
|
export declare function create_layered_backend(options: LayeredBackendOptions): Backend;
|
|
8
46
|
//# sourceMappingURL=layered.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"layered.d.ts","sourceRoot":"","sources":["../../backend/layered.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAA6E,MAAM,UAAU,CAAA;AAGlH,MAAM,MAAM,qBAAqB,GAAG;IAClC,IAAI,EAAE,OAAO,EAAE,CAAA;IACf,KAAK,EAAE,OAAO,EAAE,CAAA;IAChB,aAAa,CAAC,EAAE,OAAO,GAAG,OAAO,CAAA;CAClC,CAAA;AAED,wBAAgB,sBAAsB,CAAC,OAAO,EAAE,qBAAqB,GAAG,OAAO,CA4I9E"}
|
|
1
|
+
{"version":3,"file":"layered.d.ts","sourceRoot":"","sources":["../../backend/layered.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,OAAO,EAA6E,MAAM,UAAU,CAAA;AAGlH,MAAM,MAAM,qBAAqB,GAAG;IAClC,IAAI,EAAE,OAAO,EAAE,CAAA;IACf,KAAK,EAAE,OAAO,EAAE,CAAA;IAChB,aAAa,CAAC,EAAE,OAAO,GAAG,OAAO,CAAA;CAClC,CAAA;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiCG;AACH,wBAAgB,sBAAsB,CAAC,OAAO,EAAE,qBAAqB,GAAG,OAAO,CA4I9E"}
|
package/dist/backend/layered.js
CHANGED
|
@@ -1,4 +1,42 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @module Backends
|
|
3
|
+
* @description Layered backend for caching and replication strategies.
|
|
4
|
+
*/
|
|
1
5
|
import { ok, err } from '../types';
|
|
6
|
+
/**
|
|
7
|
+
* Creates a layered backend that combines multiple backends with read/write separation.
|
|
8
|
+
* @category Backends
|
|
9
|
+
* @group Composite Backends
|
|
10
|
+
*
|
|
11
|
+
* Read operations use fallback: tries each read backend in order until one succeeds.
|
|
12
|
+
* Write operations use fanout: writes to all write backends (fails if any fail).
|
|
13
|
+
*
|
|
14
|
+
* Common use cases:
|
|
15
|
+
* - **Caching**: Memory backend first for reads, file backend for persistence
|
|
16
|
+
* - **Replication**: Write to multiple backends for redundancy
|
|
17
|
+
* - **Migration**: Read from old + new backends, write only to new
|
|
18
|
+
*
|
|
19
|
+
* @param options - Configuration with `read` backends (tried in order), `write` backends (all receive writes), and optional `list_strategy` ('merge' or 'first')
|
|
20
|
+
* @returns A Backend that delegates to the configured backends
|
|
21
|
+
*
|
|
22
|
+
* @example
|
|
23
|
+
* ```ts
|
|
24
|
+
* // Caching layer: memory cache with file persistence
|
|
25
|
+
* const cache = create_memory_backend()
|
|
26
|
+
* const storage = create_file_backend({ base_path: './data' })
|
|
27
|
+
*
|
|
28
|
+
* const backend = create_layered_backend({
|
|
29
|
+
* read: [cache, storage], // Try cache first, fall back to disk
|
|
30
|
+
* write: [cache, storage], // Write to both
|
|
31
|
+
* })
|
|
32
|
+
*
|
|
33
|
+
* // Migration: read from old and new, write only to new
|
|
34
|
+
* const backend = create_layered_backend({
|
|
35
|
+
* read: [newBackend, oldBackend],
|
|
36
|
+
* write: [newBackend],
|
|
37
|
+
* })
|
|
38
|
+
* ```
|
|
39
|
+
*/
|
|
2
40
|
export function create_layered_backend(options) {
|
|
3
41
|
const { read, write, list_strategy = 'merge' } = options;
|
|
4
42
|
const metadata = {
|
package/dist/backend/memory.d.ts
CHANGED
|
@@ -1,6 +1,36 @@
|
|
|
1
|
-
|
|
1
|
+
/**
|
|
2
|
+
* @module Backends
|
|
3
|
+
* @description In-memory storage backend for testing and development.
|
|
4
|
+
*/
|
|
5
|
+
import type { Backend, EventHandler } from "../types";
|
|
2
6
|
export type MemoryBackendOptions = {
|
|
3
7
|
on_event?: EventHandler;
|
|
4
8
|
};
|
|
9
|
+
/**
|
|
10
|
+
* Creates an in-memory storage backend.
|
|
11
|
+
* @category Backends
|
|
12
|
+
* @group Storage Backends
|
|
13
|
+
*
|
|
14
|
+
* Ideal for testing, development, and ephemeral storage scenarios.
|
|
15
|
+
* All data is lost when the process ends.
|
|
16
|
+
*
|
|
17
|
+
* @param options - Optional configuration with `on_event` handler for observability
|
|
18
|
+
* @returns A Backend instance using in-memory storage
|
|
19
|
+
*
|
|
20
|
+
* @example
|
|
21
|
+
* ```ts
|
|
22
|
+
* // Basic usage for testing
|
|
23
|
+
* const backend = create_memory_backend()
|
|
24
|
+
* const corpus = create_corpus()
|
|
25
|
+
* .with_backend(backend)
|
|
26
|
+
* .with_store(define_store('test', text_codec()))
|
|
27
|
+
* .build()
|
|
28
|
+
*
|
|
29
|
+
* // With event logging
|
|
30
|
+
* const backend = create_memory_backend({
|
|
31
|
+
* on_event: (e) => console.log(`[${e.type}]`, e)
|
|
32
|
+
* })
|
|
33
|
+
* ```
|
|
34
|
+
*/
|
|
5
35
|
export declare function create_memory_backend(options?: MemoryBackendOptions): Backend;
|
|
6
36
|
//# sourceMappingURL=memory.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"memory.d.ts","sourceRoot":"","sources":["../../backend/memory.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAAwF,YAAY,EAAE,MAAM,UAAU,
|
|
1
|
+
{"version":3,"file":"memory.d.ts","sourceRoot":"","sources":["../../backend/memory.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,OAAO,EAAwF,YAAY,EAAE,MAAM,UAAU,CAAC;AAG5I,MAAM,MAAM,oBAAoB,GAAG;IAClC,QAAQ,CAAC,EAAE,YAAY,CAAC;CACxB,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AACH,wBAAgB,qBAAqB,CAAC,OAAO,CAAC,EAAE,oBAAoB,GAAG,OAAO,CAgJ7E"}
|