@noy-db/to-aws-s3 0.1.0-pre.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 vLannaAi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,41 @@
1
+ # @noy-db/to-aws-s3
2
+
3
+ > AWS S3 adapter for [noy-db](https://github.com/vLannaAi/noy-db) — encrypted object storage with zero-knowledge cloud sync.
4
+
5
+ [![npm](https://img.shields.io/npm/v/@noy-db/to-aws-s3.svg)](https://www.npmjs.com/package/@noy-db/to-aws-s3)
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pnpm add @noy-db/hub @noy-db/to-aws-s3 @aws-sdk/client-s3
11
+ ```
12
+
13
+ `@aws-sdk/client-s3` is a peer dependency — install it in your app.
14
+
15
+ ## Usage
16
+
17
+ ```ts
18
+ import { createNoydb } from '@noy-db/hub'
19
+ import { s3 } from '@noy-db/to-aws-s3'
20
+ import { S3Client } from '@aws-sdk/client-s3'
21
+
22
+ const client = new S3Client({ region: 'ap-southeast-1' })
23
+
24
+ const db = await createNoydb({
25
+ adapter: s3({ client, bucket: 'noydb-prod', prefix: 'tenant-a/' }),
26
+ userId: 'alice',
27
+ passphrase: process.env.NOYDB_PASSPHRASE!,
28
+ })
29
+ ```
30
+
31
+ Each record becomes an S3 object containing only a ciphertext envelope. S3 never sees plaintext — even with full bucket access, an attacker learns nothing without the user's passphrase.
32
+
33
+ Best suited for:
34
+
35
+ - Infrequent-access archival with strong privacy guarantees
36
+ - Cold storage of audit trails and backups
37
+ - Lower-cost alternative to DynamoDB for small teams
38
+
39
+ ## License
40
+
41
+ MIT © vLannaAi — see the [noy-db repo](https://github.com/vLannaAi/noy-db) for full documentation.
package/dist/index.cjs ADDED
@@ -0,0 +1,174 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ s3: () => s3
24
+ });
25
+ module.exports = __toCommonJS(index_exports);
26
+ var import_hub = require("@noy-db/hub");
27
+ var import_client_s3 = require("@aws-sdk/client-s3");
28
+ function s3(options) {
29
+ const { bucket, prefix = "" } = options;
30
+ const client = options.client ?? new import_client_s3.S3Client({
31
+ ...options.region ? { region: options.region } : {}
32
+ });
33
+ function objectKey(vault, collection, id) {
34
+ const parts = [vault, collection, `${id}.json`];
35
+ return prefix ? `${prefix}/${parts.join("/")}` : parts.join("/");
36
+ }
37
+ function collPrefix(vault, collection) {
38
+ const parts = [vault, collection, ""];
39
+ return prefix ? `${prefix}/${parts.join("/")}` : parts.join("/");
40
+ }
41
+ function compPrefix(vault) {
42
+ return prefix ? `${prefix}/${vault}/` : `${vault}/`;
43
+ }
44
+ return {
45
+ name: "s3",
46
+ async get(vault, collection, id) {
47
+ try {
48
+ const result = await client.send(new import_client_s3.GetObjectCommand({
49
+ Bucket: bucket,
50
+ Key: objectKey(vault, collection, id)
51
+ }));
52
+ if (!result.Body) return null;
53
+ const body = await result.Body.transformToString();
54
+ return JSON.parse(body);
55
+ } catch (err) {
56
+ if (err instanceof Error && (err.name === "NoSuchKey" || err.name === "NotFound")) {
57
+ return null;
58
+ }
59
+ throw err;
60
+ }
61
+ },
62
+ async put(vault, collection, id, envelope, expectedVersion) {
63
+ if (expectedVersion !== void 0) {
64
+ const existing = await this.get(vault, collection, id);
65
+ if (existing && existing._v !== expectedVersion) {
66
+ throw new import_hub.ConflictError(existing._v, `Version conflict: expected ${expectedVersion}, found ${existing._v}`);
67
+ }
68
+ }
69
+ await client.send(new import_client_s3.PutObjectCommand({
70
+ Bucket: bucket,
71
+ Key: objectKey(vault, collection, id),
72
+ Body: JSON.stringify(envelope),
73
+ ContentType: "application/json"
74
+ }));
75
+ },
76
+ async delete(vault, collection, id) {
77
+ await client.send(new import_client_s3.DeleteObjectCommand({
78
+ Bucket: bucket,
79
+ Key: objectKey(vault, collection, id)
80
+ }));
81
+ },
82
+ async list(vault, collection) {
83
+ const pfx = collPrefix(vault, collection);
84
+ const result = await client.send(new import_client_s3.ListObjectsV2Command({
85
+ Bucket: bucket,
86
+ Prefix: pfx
87
+ }));
88
+ return (result.Contents ?? []).map((obj) => obj.Key ?? "").filter((k) => k.endsWith(".json")).map((k) => k.slice(pfx.length, -5));
89
+ },
90
+ async loadAll(vault) {
91
+ const pfx = compPrefix(vault);
92
+ const listResult = await client.send(new import_client_s3.ListObjectsV2Command({
93
+ Bucket: bucket,
94
+ Prefix: pfx
95
+ }));
96
+ const snapshot = {};
97
+ for (const obj of listResult.Contents ?? []) {
98
+ const key = obj.Key ?? "";
99
+ if (!key.endsWith(".json")) continue;
100
+ const relativePath = key.slice(pfx.length);
101
+ const parts = relativePath.split("/");
102
+ if (parts.length !== 2) continue;
103
+ const collection = parts[0];
104
+ const id = parts[1].slice(0, -5);
105
+ if (collection.startsWith("_")) continue;
106
+ const getResult = await client.send(new import_client_s3.GetObjectCommand({
107
+ Bucket: bucket,
108
+ Key: key
109
+ }));
110
+ if (!getResult.Body) continue;
111
+ const body = await getResult.Body.transformToString();
112
+ if (!snapshot[collection]) snapshot[collection] = {};
113
+ snapshot[collection][id] = JSON.parse(body);
114
+ }
115
+ return snapshot;
116
+ },
117
+ async saveAll(vault, data) {
118
+ for (const [collection, records] of Object.entries(data)) {
119
+ for (const [id, envelope] of Object.entries(records)) {
120
+ await this.put(vault, collection, id, envelope);
121
+ }
122
+ }
123
+ },
124
+ async ping() {
125
+ try {
126
+ await client.send(new import_client_s3.HeadBucketCommand({ Bucket: bucket }));
127
+ return true;
128
+ } catch {
129
+ return false;
130
+ }
131
+ },
132
+ /**
133
+ * Paginate over a collection using S3's native `ContinuationToken`.
134
+ *
135
+ * Each page does:
136
+ * 1. ListObjectsV2 with MaxKeys = limit and the previous token
137
+ * 2. GetObject for every key on the page (in parallel)
138
+ *
139
+ * The 2-step pattern is necessary because S3 list responses don't
140
+ * include object bodies. For very large collections this is N+1 — but
141
+ * the parallel GETs amortize well, and consumers willing to pay for
142
+ * stronger pagination should use a different adapter (Dynamo).
143
+ */
144
+ async listPage(vault, collection, cursor, limit = 100) {
145
+ const pfx = collPrefix(vault, collection);
146
+ const listResult = await client.send(new import_client_s3.ListObjectsV2Command({
147
+ Bucket: bucket,
148
+ Prefix: pfx,
149
+ MaxKeys: limit,
150
+ ...cursor ? { ContinuationToken: cursor } : {}
151
+ }));
152
+ const keys = (listResult.Contents ?? []).map((obj) => obj.Key ?? "").filter((k) => k.endsWith(".json"));
153
+ const items = await Promise.all(keys.map(async (key) => {
154
+ const id = key.slice(pfx.length, -5);
155
+ const getResult = await client.send(new import_client_s3.GetObjectCommand({
156
+ Bucket: bucket,
157
+ Key: key
158
+ }));
159
+ if (!getResult.Body) return null;
160
+ const body = await getResult.Body.transformToString();
161
+ return { id, envelope: JSON.parse(body) };
162
+ }));
163
+ return {
164
+ items: items.filter((x) => x !== null),
165
+ nextCursor: listResult.IsTruncated && listResult.NextContinuationToken ? listResult.NextContinuationToken : null
166
+ };
167
+ }
168
+ };
169
+ }
170
+ // Annotate the CommonJS export names for ESM import in node:
171
+ 0 && (module.exports = {
172
+ s3
173
+ });
174
+ //# sourceMappingURL=index.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * **@noy-db/to-aws-s3** — S3 object store for NOYDB.\n *\n * Each record is stored as a JSON object at\n * `{prefix}/{vault}/{collection}/{id}.json`. The `loadAll()` method uses\n * `ListObjectsV2` to enumerate keys then fetches them in parallel.\n *\n * ## When to use\n *\n * - **Blob / attachment storage** — pair with `@noy-db/to-aws-dynamo` via\n * `routeStore({ default: dynamo(...), blobs: s3(...) })` to route\n * encrypted binary chunks to S3.\n * - **Archive tier** — configure `routeStore` age-based tiering so old\n * records migrate to S3 while hot records stay in DynamoDB.\n * - **Large vaults** — S3 has no item size limit, unlike DynamoDB's 400 KB cap.\n *\n * ## Limitations\n *\n * - **`casAtomic: false`** — S3 has no server-side conditional write on\n * arbitrary metadata. Concurrent puts may result in last-write-wins.\n * Use DynamoDB for records that need conflict-safe writes.\n * - **`loadAll()` is O(N) requests** — listing + fetching every object in a\n * vault. Suitable for vaults up to ~10K records; beyond that, prefer\n * DynamoDB for indexed stores and S3 only for append-heavy blob storage.\n *\n * ## IAM minimum permissions\n *\n * ```json\n * { \"Action\": [\"s3:GetObject\", \"s3:PutObject\", \"s3:DeleteObject\",\n * \"s3:ListBucket\"] }\n * ```\n *\n * @packageDocumentation\n */\n\nimport type { NoydbStore, EncryptedEnvelope, VaultSnapshot } from '@noy-db/hub'\nimport { ConflictError } from '@noy-db/hub'\nimport {\n S3Client,\n GetObjectCommand,\n PutObjectCommand,\n DeleteObjectCommand,\n ListObjectsV2Command,\n HeadBucketCommand,\n} from '@aws-sdk/client-s3'\n\n/**\n * Options for `s3()`.\n *\n * Objects are stored at `{prefix}/{vault}/{collection}/{id}.json`.\n * `loadAll()` uses `ListObjectsV2` over the vault prefix followed by parallel\n * `GetObject` calls — suitable for vaults with up to ~10K records. For larger\n * vaults, use DynamoDB or pair with `routeStore` age-tiering so S3 only\n * holds archived records.\n *\n * Note: S3 does not support atomic CAS (`casAtomic: false`). Last-write-wins\n * on concurrent puts.\n */\nexport interface S3Options {\n /** S3 bucket name. */\n bucket: string\n /** Key prefix within the bucket. Default: ''. */\n prefix?: string\n /** AWS region. Used only when `client` is not provided. Default: 'us-east-1'. */\n region?: string\n /**\n * Pre-built S3Client from `@aws-sdk/client-s3`. If provided, the adapter\n * uses this client directly and ignores `region`. Useful for apps that want\n * to share a client across adapters or supply custom middleware.\n */\n client?: S3Client\n}\n\n/**\n * Create an S3 adapter.\n * Key scheme: `{prefix}/{vault}/{collection}/{id}.json`\n */\nexport function s3(options: S3Options): NoydbStore {\n const { bucket, prefix = '' } = options\n\n const client = options.client ?? new S3Client({\n ...(options.region ? { region: options.region } : {}),\n })\n\n function objectKey(vault: string, collection: string, id: string): string {\n const parts = [vault, collection, `${id}.json`]\n return prefix ? `${prefix}/${parts.join('/')}` : parts.join('/')\n }\n\n function collPrefix(vault: string, collection: string): string {\n const parts = [vault, collection, '']\n return prefix ? `${prefix}/${parts.join('/')}` : parts.join('/')\n }\n\n function compPrefix(vault: string): string {\n return prefix ? `${prefix}/${vault}/` : `${vault}/`\n }\n\n return {\n name: 's3',\n\n async get(vault, collection, id) {\n try {\n const result = await client.send(new GetObjectCommand({\n Bucket: bucket,\n Key: objectKey(vault, collection, id),\n }))\n\n if (!result.Body) return null\n const body = await result.Body.transformToString()\n return JSON.parse(body) as EncryptedEnvelope\n } catch (err: unknown) {\n if (err instanceof Error && (err.name === 'NoSuchKey' || err.name === 'NotFound')) {\n return null\n }\n throw err\n }\n },\n\n async put(vault, collection, id, envelope, expectedVersion) {\n if (expectedVersion !== undefined) {\n const existing = await this.get(vault, collection, id)\n if (existing && existing._v !== expectedVersion) {\n throw new ConflictError(existing._v, `Version conflict: expected ${expectedVersion}, found ${existing._v}`)\n }\n }\n\n await client.send(new PutObjectCommand({\n Bucket: bucket,\n Key: objectKey(vault, collection, id),\n Body: JSON.stringify(envelope),\n ContentType: 'application/json',\n }))\n },\n\n async delete(vault, collection, id) {\n await client.send(new DeleteObjectCommand({\n Bucket: bucket,\n Key: objectKey(vault, collection, id),\n }))\n },\n\n async list(vault, collection) {\n const pfx = collPrefix(vault, collection)\n const result = await client.send(new ListObjectsV2Command({\n Bucket: bucket,\n Prefix: pfx,\n }))\n\n return (result.Contents ?? [])\n .map(obj => obj.Key ?? '')\n .filter(k => k.endsWith('.json'))\n .map(k => k.slice(pfx.length, -5))\n },\n\n async loadAll(vault) {\n const pfx = compPrefix(vault)\n const listResult = await client.send(new ListObjectsV2Command({\n Bucket: bucket,\n Prefix: pfx,\n }))\n\n const snapshot: VaultSnapshot = {}\n\n for (const obj of listResult.Contents ?? []) {\n const key = obj.Key ?? ''\n if (!key.endsWith('.json')) continue\n\n const relativePath = key.slice(pfx.length)\n const parts = relativePath.split('/')\n if (parts.length !== 2) continue\n\n const collection = parts[0]!\n const id = parts[1]!.slice(0, -5)\n if (collection.startsWith('_')) continue\n\n const getResult = await client.send(new GetObjectCommand({\n Bucket: bucket,\n Key: key,\n }))\n\n if (!getResult.Body) continue\n const body = await getResult.Body.transformToString()\n\n if (!snapshot[collection]) snapshot[collection] = {}\n snapshot[collection][id] = JSON.parse(body) as EncryptedEnvelope\n }\n\n return snapshot\n },\n\n async saveAll(vault, data) {\n for (const [collection, records] of Object.entries(data)) {\n for (const [id, envelope] of Object.entries(records)) {\n await this.put(vault, collection, id, envelope)\n }\n }\n },\n\n async ping() {\n try {\n await client.send(new HeadBucketCommand({ Bucket: bucket }))\n return true\n } catch {\n return false\n }\n },\n\n /**\n * Paginate over a collection using S3's native `ContinuationToken`.\n *\n * Each page does:\n * 1. ListObjectsV2 with MaxKeys = limit and the previous token\n * 2. GetObject for every key on the page (in parallel)\n *\n * The 2-step pattern is necessary because S3 list responses don't\n * include object bodies. For very large collections this is N+1 — but\n * the parallel GETs amortize well, and consumers willing to pay for\n * stronger pagination should use a different adapter (Dynamo).\n */\n async listPage(vault, collection, cursor, limit = 100) {\n const pfx = collPrefix(vault, collection)\n const listResult = await client.send(new ListObjectsV2Command({\n Bucket: bucket,\n Prefix: pfx,\n MaxKeys: limit,\n ...(cursor ? { ContinuationToken: cursor } : {}),\n }))\n\n const keys = (listResult.Contents ?? [])\n .map(obj => obj.Key ?? '')\n .filter(k => k.endsWith('.json'))\n\n // Fetch every body in parallel — bounded by `limit` so we never\n // fan out beyond the page size.\n const items = await Promise.all(keys.map(async (key) => {\n const id = key.slice(pfx.length, -5)\n const getResult = await client.send(new GetObjectCommand({\n Bucket: bucket,\n Key: key,\n }))\n if (!getResult.Body) return null\n const body = await getResult.Body.transformToString()\n return { id, envelope: JSON.parse(body) as EncryptedEnvelope }\n }))\n\n return {\n items: items.filter((x): x is { id: string; envelope: EncryptedEnvelope } => x !== null),\n nextCursor: listResult.IsTruncated && listResult.NextContinuationToken\n ? listResult.NextContinuationToken\n : null,\n }\n },\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAoCA,iBAA8B;AAC9B,uBAOO;AAiCA,SAAS,GAAG,SAAgC;AACjD,QAAM,EAAE,QAAQ,SAAS,GAAG,IAAI;AAEhC,QAAM,SAAS,QAAQ,UAAU,IAAI,0BAAS;AAAA,IAC5C,GAAI,QAAQ,SAAS,EAAE,QAAQ,QAAQ,OAAO,IAAI,CAAC;AAAA,EACrD,CAAC;AAED,WAAS,UAAU,OAAe,YAAoB,IAAoB;AACxE,UAAM,QAAQ,CAAC,OAAO,YAAY,GAAG,EAAE,OAAO;AAC9C,WAAO,SAAS,GAAG,MAAM,IAAI,MAAM,KAAK,GAAG,CAAC,KAAK,MAAM,KAAK,GAAG;AAAA,EACjE;AAEA,WAAS,WAAW,OAAe,YAA4B;AAC7D,UAAM,QAAQ,CAAC,OAAO,YAAY,EAAE;AACpC,WAAO,SAAS,GAAG,MAAM,IAAI,MAAM,KAAK,GAAG,CAAC,KAAK,MAAM,KAAK,GAAG;AAAA,EACjE;AAEA,WAAS,WAAW,OAAuB;AACzC,WAAO,SAAS,GAAG,MAAM,IAAI,KAAK,MAAM,GAAG,KAAK;AAAA,EAClD;AAEA,SAAO;AAAA,IACL,MAAM;AAAA,IAEN,MAAM,IAAI,OAAO,YAAY,IAAI;AAC/B,UAAI;AACF,cAAM,SAAS,MAAM,OAAO,KAAK,IAAI,kCAAiB;AAAA,UACpD,QAAQ;AAAA,UACR,KAAK,UAAU,OAAO,YAAY,EAAE;AAAA,QACtC,CAAC,CAAC;AAEF,YAAI,CAAC,OAAO,KAAM,QAAO;AACzB,cAAM,OAAO,MAAM,OAAO,KAAK,kBAAkB;AACjD,eAAO,KAAK,MAAM,IAAI;AAAA,MACxB,SAAS,KAAc;AACrB,YAAI,eAAe,UAAU,IAAI,SAAS,eAAe,IAAI,SAAS,aAAa;AACjF,iBAAO;AAAA,QACT;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAAA,IAEA,MAAM,IAAI,OAAO,YAAY,IAAI,UAAU,iBAAiB;AAC1D,UAAI,oBAAoB,QAAW;AACjC,cAAM,WAAW,MAAM,KAAK,IAAI,OAAO,YAAY,EAAE;AACrD,YAAI,YAAY,SAAS,OAAO,iBAAiB;AAC/C,gBAAM,IAAI,yBAAc,SAAS,IAAI,8BAA8B,eAAe,WAAW,SAAS,EAAE,EAAE;AAAA,QAC5G;AAAA,MACF;AAEA,YAAM,OAAO,KAAK,IAAI,kCAAiB;AAAA,QACrC,QAAQ;AAAA,QACR,KAAK,UAAU,OAAO,YAAY,EAAE;AAAA,QACpC,MAAM,KAAK,UAAU,QAAQ;AAAA,QAC7B,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,IACJ;AAAA,IAEA,MAAM,OAAO,OAAO,YAAY,IAAI;AAClC,YAAM,OAAO,KAAK,IAAI,qCAAoB;AAAA,QACxC,QAAQ;AAAA,QACR,KAAK,UAAU,OAAO,YAAY,EAAE;AAAA,MACtC,CAAC,CAAC;AAAA,IACJ;AAAA,IAEA,MAAM,KAAK,OAAO,YAAY;AAC5B,YAAM,MAAM,WAAW,OAAO,UAAU;AACxC,YAAM,SAAS,MAAM,OAAO,KAAK,IAAI,sCAAqB;AAAA,QACxD,QAAQ;AAAA,QACR,QAAQ;AAAA,MACV,CAAC,CAAC;AAEF,cAAQ,OAAO,YAAY,CAAC,GACzB,IAAI,SAAO,IAAI,OAAO,EAAE,EACxB,OAAO,OAAK,EAAE,SAAS,OAAO,CAAC,EAC/B,IAAI,OAAK,EAAE,MAAM,IAAI,QAAQ,EAAE,CAAC;AAAA,IACrC;AAAA,IAEA,MAAM,QAAQ,OAAO;AACnB,YAAM,MAAM,WAAW,KAAK;AAC5B,YAAM,aAAa,MAAM,OAAO,KAAK,IAAI,sCAAqB;AAAA,QAC5D,QAAQ;AAAA,QACR,QAAQ;AAAA,MACV,CAAC,CAAC;AAEF,YAAM,WAA0B,CAAC;AAEjC,iBAAW,OAAO,WAAW,YAAY,CAAC,GAAG;AAC3C,cAAM,MAAM,IAAI,OAAO;AACvB,YAAI,CAAC,IAAI,SAAS,OAAO,EAAG;AAE5B,cAAM,eAAe,IAAI,MAAM,IAAI,MAAM;AACzC,cAAM,QAAQ,aAAa,MAAM,GAAG;AACpC,YAAI,MAAM,WAAW,EAAG;AAExB,cAAM,aAAa,MAAM,CAAC;AAC1B,cAAM,KAAK,MAAM,CAAC,EAAG,MAAM,GAAG,EAAE;AAChC,YAAI,WAAW,WAAW,GAAG,EAAG;AAEhC,cAAM,YAAY,MAAM,OAAO,KAAK,IAAI,kCAAiB;AAAA,UACvD,QAAQ;AAAA,UACR,KAAK;AAAA,QACP,CAAC,CAAC;AAEF,YAAI,CAAC,UAAU,KAAM;AACrB,cAAM,OAAO,MAAM,UAAU,KAAK,kBAAkB;AAEpD,YAAI,CAAC,SAAS,UAAU,EAAG,UAAS,UAAU,IAAI,CAAC;AACnD,iBAAS,UAAU,EAAE,EAAE,IAAI,KAAK,MAAM,IAAI;AAAA,MAC5C;AAEA,aAAO;AAAA,IACT;AAAA,IAEA,MAAM,QAAQ,OAAO,MAAM;AACzB,iBAAW,CAAC,YAAY,OAAO,KAAK,OAAO,QAAQ,IAAI,GAAG;AACxD,mBAAW,CAAC,IAAI,QAAQ,KAAK,OAAO,QAAQ,OAAO,GAAG;AACpD,gBAAM,KAAK,IAAI,OAAO,YAAY,IAAI,QAAQ;AAAA,QAChD;AAAA,MACF;AAAA,IACF;AAAA,IAEA,MAAM,OAAO;AACX,UAAI;AACF,cAAM,OAAO,KAAK,IAAI,mCAAkB,EAAE,QAAQ,OAAO,CAAC,CAAC;AAC3D,eAAO;AAAA,MACT,QAAQ;AACN,eAAO;AAAA,MACT;AAAA,IACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAcA,MAAM,SAAS,OAAO,YAAY,QAAQ,QAAQ,KAAK;AACrD,YAAM,MAAM,WAAW,OAAO,UAAU;AACxC,YAAM,aAAa,MAAM,OAAO,KAAK,IAAI,sCAAqB;AAAA,QAC5D,QAAQ;AAAA,QACR,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,GAAI,SAAS,EAAE,mBAAmB,OAAO,IAAI,CAAC;AAAA,MAChD,CAAC,CAAC;AAEF,YAAM,QAAQ,WAAW,YAAY,CAAC,GACnC,IAAI,SAAO,IAAI,OAAO,EAAE,EACxB,OAAO,OAAK,EAAE,SAAS,OAAO,CAAC;AAIlC,YAAM,QAAQ,MAAM,QAAQ,IAAI,KAAK,IAAI,OAAO,QAAQ;AACtD,cAAM,KAAK,IAAI,MAAM,IAAI,QAAQ,EAAE;AACnC,cAAM,YAAY,MAAM,OAAO,KAAK,IAAI,kCAAiB;AAAA,UACvD,QAAQ;AAAA,UACR,KAAK;AAAA,QACP,CAAC,CAAC;AACF,YAAI,CAAC,UAAU,KAAM,QAAO;AAC5B,cAAM,OAAO,MAAM,UAAU,KAAK,kBAAkB;AACpD,eAAO,EAAE,IAAI,UAAU,KAAK,MAAM,IAAI,EAAuB;AAAA,MAC/D,CAAC,CAAC;AAEF,aAAO;AAAA,QACL,OAAO,MAAM,OAAO,CAAC,MAAwD,MAAM,IAAI;AAAA,QACvF,YAAY,WAAW,eAAe,WAAW,wBAC7C,WAAW,wBACX;AAAA,MACN;AAAA,IACF;AAAA,EACF;AACF;","names":[]}
@@ -0,0 +1,71 @@
1
+ import { NoydbStore } from '@noy-db/hub';
2
+ import { S3Client } from '@aws-sdk/client-s3';
3
+
4
+ /**
5
+ * **@noy-db/to-aws-s3** — S3 object store for NOYDB.
6
+ *
7
+ * Each record is stored as a JSON object at
8
+ * `{prefix}/{vault}/{collection}/{id}.json`. The `loadAll()` method uses
9
+ * `ListObjectsV2` to enumerate keys then fetches them in parallel.
10
+ *
11
+ * ## When to use
12
+ *
13
+ * - **Blob / attachment storage** — pair with `@noy-db/to-aws-dynamo` via
14
+ * `routeStore({ default: dynamo(...), blobs: s3(...) })` to route
15
+ * encrypted binary chunks to S3.
16
+ * - **Archive tier** — configure `routeStore` age-based tiering so old
17
+ * records migrate to S3 while hot records stay in DynamoDB.
18
+ * - **Large vaults** — S3 has no item size limit, unlike DynamoDB's 400 KB cap.
19
+ *
20
+ * ## Limitations
21
+ *
22
+ * - **`casAtomic: false`** — S3 has no server-side conditional write on
23
+ * arbitrary metadata. Concurrent puts may result in last-write-wins.
24
+ * Use DynamoDB for records that need conflict-safe writes.
25
+ * - **`loadAll()` is O(N) requests** — listing + fetching every object in a
26
+ * vault. Suitable for vaults up to ~10K records; beyond that, prefer
27
+ * DynamoDB for indexed stores and S3 only for append-heavy blob storage.
28
+ *
29
+ * ## IAM minimum permissions
30
+ *
31
+ * ```json
32
+ * { "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject",
33
+ * "s3:ListBucket"] }
34
+ * ```
35
+ *
36
+ * @packageDocumentation
37
+ */
38
+
39
+ /**
40
+ * Options for `s3()`.
41
+ *
42
+ * Objects are stored at `{prefix}/{vault}/{collection}/{id}.json`.
43
+ * `loadAll()` uses `ListObjectsV2` over the vault prefix followed by parallel
44
+ * `GetObject` calls — suitable for vaults with up to ~10K records. For larger
45
+ * vaults, use DynamoDB or pair with `routeStore` age-tiering so S3 only
46
+ * holds archived records.
47
+ *
48
+ * Note: S3 does not support atomic CAS (`casAtomic: false`). Last-write-wins
49
+ * on concurrent puts.
50
+ */
51
+ interface S3Options {
52
+ /** S3 bucket name. */
53
+ bucket: string;
54
+ /** Key prefix within the bucket. Default: ''. */
55
+ prefix?: string;
56
+ /** AWS region. Used only when `client` is not provided. Default: 'us-east-1'. */
57
+ region?: string;
58
+ /**
59
+ * Pre-built S3Client from `@aws-sdk/client-s3`. If provided, the adapter
60
+ * uses this client directly and ignores `region`. Useful for apps that want
61
+ * to share a client across adapters or supply custom middleware.
62
+ */
63
+ client?: S3Client;
64
+ }
65
+ /**
66
+ * Create an S3 adapter.
67
+ * Key scheme: `{prefix}/{vault}/{collection}/{id}.json`
68
+ */
69
+ declare function s3(options: S3Options): NoydbStore;
70
+
71
+ export { type S3Options, s3 };
@@ -0,0 +1,71 @@
1
+ import { NoydbStore } from '@noy-db/hub';
2
+ import { S3Client } from '@aws-sdk/client-s3';
3
+
4
+ /**
5
+ * **@noy-db/to-aws-s3** — S3 object store for NOYDB.
6
+ *
7
+ * Each record is stored as a JSON object at
8
+ * `{prefix}/{vault}/{collection}/{id}.json`. The `loadAll()` method uses
9
+ * `ListObjectsV2` to enumerate keys then fetches them in parallel.
10
+ *
11
+ * ## When to use
12
+ *
13
+ * - **Blob / attachment storage** — pair with `@noy-db/to-aws-dynamo` via
14
+ * `routeStore({ default: dynamo(...), blobs: s3(...) })` to route
15
+ * encrypted binary chunks to S3.
16
+ * - **Archive tier** — configure `routeStore` age-based tiering so old
17
+ * records migrate to S3 while hot records stay in DynamoDB.
18
+ * - **Large vaults** — S3 has no item size limit, unlike DynamoDB's 400 KB cap.
19
+ *
20
+ * ## Limitations
21
+ *
22
+ * - **`casAtomic: false`** — S3 has no server-side conditional write on
23
+ * arbitrary metadata. Concurrent puts may result in last-write-wins.
24
+ * Use DynamoDB for records that need conflict-safe writes.
25
+ * - **`loadAll()` is O(N) requests** — listing + fetching every object in a
26
+ * vault. Suitable for vaults up to ~10K records; beyond that, prefer
27
+ * DynamoDB for indexed stores and S3 only for append-heavy blob storage.
28
+ *
29
+ * ## IAM minimum permissions
30
+ *
31
+ * ```json
32
+ * { "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject",
33
+ * "s3:ListBucket"] }
34
+ * ```
35
+ *
36
+ * @packageDocumentation
37
+ */
38
+
39
+ /**
40
+ * Options for `s3()`.
41
+ *
42
+ * Objects are stored at `{prefix}/{vault}/{collection}/{id}.json`.
43
+ * `loadAll()` uses `ListObjectsV2` over the vault prefix followed by parallel
44
+ * `GetObject` calls — suitable for vaults with up to ~10K records. For larger
45
+ * vaults, use DynamoDB or pair with `routeStore` age-tiering so S3 only
46
+ * holds archived records.
47
+ *
48
+ * Note: S3 does not support atomic CAS (`casAtomic: false`). Last-write-wins
49
+ * on concurrent puts.
50
+ */
51
+ interface S3Options {
52
+ /** S3 bucket name. */
53
+ bucket: string;
54
+ /** Key prefix within the bucket. Default: ''. */
55
+ prefix?: string;
56
+ /** AWS region. Used only when `client` is not provided. Default: 'us-east-1'. */
57
+ region?: string;
58
+ /**
59
+ * Pre-built S3Client from `@aws-sdk/client-s3`. If provided, the adapter
60
+ * uses this client directly and ignores `region`. Useful for apps that want
61
+ * to share a client across adapters or supply custom middleware.
62
+ */
63
+ client?: S3Client;
64
+ }
65
+ /**
66
+ * Create an S3 adapter.
67
+ * Key scheme: `{prefix}/{vault}/{collection}/{id}.json`
68
+ */
69
+ declare function s3(options: S3Options): NoydbStore;
70
+
71
+ export { type S3Options, s3 };
package/dist/index.js ADDED
@@ -0,0 +1,156 @@
1
+ // src/index.ts
2
+ import { ConflictError } from "@noy-db/hub";
3
+ import {
4
+ S3Client,
5
+ GetObjectCommand,
6
+ PutObjectCommand,
7
+ DeleteObjectCommand,
8
+ ListObjectsV2Command,
9
+ HeadBucketCommand
10
+ } from "@aws-sdk/client-s3";
11
+ function s3(options) {
12
+ const { bucket, prefix = "" } = options;
13
+ const client = options.client ?? new S3Client({
14
+ ...options.region ? { region: options.region } : {}
15
+ });
16
+ function objectKey(vault, collection, id) {
17
+ const parts = [vault, collection, `${id}.json`];
18
+ return prefix ? `${prefix}/${parts.join("/")}` : parts.join("/");
19
+ }
20
+ function collPrefix(vault, collection) {
21
+ const parts = [vault, collection, ""];
22
+ return prefix ? `${prefix}/${parts.join("/")}` : parts.join("/");
23
+ }
24
+ function compPrefix(vault) {
25
+ return prefix ? `${prefix}/${vault}/` : `${vault}/`;
26
+ }
27
+ return {
28
+ name: "s3",
29
+ async get(vault, collection, id) {
30
+ try {
31
+ const result = await client.send(new GetObjectCommand({
32
+ Bucket: bucket,
33
+ Key: objectKey(vault, collection, id)
34
+ }));
35
+ if (!result.Body) return null;
36
+ const body = await result.Body.transformToString();
37
+ return JSON.parse(body);
38
+ } catch (err) {
39
+ if (err instanceof Error && (err.name === "NoSuchKey" || err.name === "NotFound")) {
40
+ return null;
41
+ }
42
+ throw err;
43
+ }
44
+ },
45
+ async put(vault, collection, id, envelope, expectedVersion) {
46
+ if (expectedVersion !== void 0) {
47
+ const existing = await this.get(vault, collection, id);
48
+ if (existing && existing._v !== expectedVersion) {
49
+ throw new ConflictError(existing._v, `Version conflict: expected ${expectedVersion}, found ${existing._v}`);
50
+ }
51
+ }
52
+ await client.send(new PutObjectCommand({
53
+ Bucket: bucket,
54
+ Key: objectKey(vault, collection, id),
55
+ Body: JSON.stringify(envelope),
56
+ ContentType: "application/json"
57
+ }));
58
+ },
59
+ async delete(vault, collection, id) {
60
+ await client.send(new DeleteObjectCommand({
61
+ Bucket: bucket,
62
+ Key: objectKey(vault, collection, id)
63
+ }));
64
+ },
65
+ async list(vault, collection) {
66
+ const pfx = collPrefix(vault, collection);
67
+ const result = await client.send(new ListObjectsV2Command({
68
+ Bucket: bucket,
69
+ Prefix: pfx
70
+ }));
71
+ return (result.Contents ?? []).map((obj) => obj.Key ?? "").filter((k) => k.endsWith(".json")).map((k) => k.slice(pfx.length, -5));
72
+ },
73
+ async loadAll(vault) {
74
+ const pfx = compPrefix(vault);
75
+ const listResult = await client.send(new ListObjectsV2Command({
76
+ Bucket: bucket,
77
+ Prefix: pfx
78
+ }));
79
+ const snapshot = {};
80
+ for (const obj of listResult.Contents ?? []) {
81
+ const key = obj.Key ?? "";
82
+ if (!key.endsWith(".json")) continue;
83
+ const relativePath = key.slice(pfx.length);
84
+ const parts = relativePath.split("/");
85
+ if (parts.length !== 2) continue;
86
+ const collection = parts[0];
87
+ const id = parts[1].slice(0, -5);
88
+ if (collection.startsWith("_")) continue;
89
+ const getResult = await client.send(new GetObjectCommand({
90
+ Bucket: bucket,
91
+ Key: key
92
+ }));
93
+ if (!getResult.Body) continue;
94
+ const body = await getResult.Body.transformToString();
95
+ if (!snapshot[collection]) snapshot[collection] = {};
96
+ snapshot[collection][id] = JSON.parse(body);
97
+ }
98
+ return snapshot;
99
+ },
100
+ async saveAll(vault, data) {
101
+ for (const [collection, records] of Object.entries(data)) {
102
+ for (const [id, envelope] of Object.entries(records)) {
103
+ await this.put(vault, collection, id, envelope);
104
+ }
105
+ }
106
+ },
107
+ async ping() {
108
+ try {
109
+ await client.send(new HeadBucketCommand({ Bucket: bucket }));
110
+ return true;
111
+ } catch {
112
+ return false;
113
+ }
114
+ },
115
+ /**
116
+ * Paginate over a collection using S3's native `ContinuationToken`.
117
+ *
118
+ * Each page does:
119
+ * 1. ListObjectsV2 with MaxKeys = limit and the previous token
120
+ * 2. GetObject for every key on the page (in parallel)
121
+ *
122
+ * The 2-step pattern is necessary because S3 list responses don't
123
+ * include object bodies. For very large collections this is N+1 — but
124
+ * the parallel GETs amortize well, and consumers willing to pay for
125
+ * stronger pagination should use a different adapter (Dynamo).
126
+ */
127
+ async listPage(vault, collection, cursor, limit = 100) {
128
+ const pfx = collPrefix(vault, collection);
129
+ const listResult = await client.send(new ListObjectsV2Command({
130
+ Bucket: bucket,
131
+ Prefix: pfx,
132
+ MaxKeys: limit,
133
+ ...cursor ? { ContinuationToken: cursor } : {}
134
+ }));
135
+ const keys = (listResult.Contents ?? []).map((obj) => obj.Key ?? "").filter((k) => k.endsWith(".json"));
136
+ const items = await Promise.all(keys.map(async (key) => {
137
+ const id = key.slice(pfx.length, -5);
138
+ const getResult = await client.send(new GetObjectCommand({
139
+ Bucket: bucket,
140
+ Key: key
141
+ }));
142
+ if (!getResult.Body) return null;
143
+ const body = await getResult.Body.transformToString();
144
+ return { id, envelope: JSON.parse(body) };
145
+ }));
146
+ return {
147
+ items: items.filter((x) => x !== null),
148
+ nextCursor: listResult.IsTruncated && listResult.NextContinuationToken ? listResult.NextContinuationToken : null
149
+ };
150
+ }
151
+ };
152
+ }
153
+ export {
154
+ s3
155
+ };
156
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * **@noy-db/to-aws-s3** — S3 object store for NOYDB.\n *\n * Each record is stored as a JSON object at\n * `{prefix}/{vault}/{collection}/{id}.json`. The `loadAll()` method uses\n * `ListObjectsV2` to enumerate keys then fetches them in parallel.\n *\n * ## When to use\n *\n * - **Blob / attachment storage** — pair with `@noy-db/to-aws-dynamo` via\n * `routeStore({ default: dynamo(...), blobs: s3(...) })` to route\n * encrypted binary chunks to S3.\n * - **Archive tier** — configure `routeStore` age-based tiering so old\n * records migrate to S3 while hot records stay in DynamoDB.\n * - **Large vaults** — S3 has no item size limit, unlike DynamoDB's 400 KB cap.\n *\n * ## Limitations\n *\n * - **`casAtomic: false`** — S3 has no server-side conditional write on\n * arbitrary metadata. Concurrent puts may result in last-write-wins.\n * Use DynamoDB for records that need conflict-safe writes.\n * - **`loadAll()` is O(N) requests** — listing + fetching every object in a\n * vault. Suitable for vaults up to ~10K records; beyond that, prefer\n * DynamoDB for indexed stores and S3 only for append-heavy blob storage.\n *\n * ## IAM minimum permissions\n *\n * ```json\n * { \"Action\": [\"s3:GetObject\", \"s3:PutObject\", \"s3:DeleteObject\",\n * \"s3:ListBucket\"] }\n * ```\n *\n * @packageDocumentation\n */\n\nimport type { NoydbStore, EncryptedEnvelope, VaultSnapshot } from '@noy-db/hub'\nimport { ConflictError } from '@noy-db/hub'\nimport {\n S3Client,\n GetObjectCommand,\n PutObjectCommand,\n DeleteObjectCommand,\n ListObjectsV2Command,\n HeadBucketCommand,\n} from '@aws-sdk/client-s3'\n\n/**\n * Options for `s3()`.\n *\n * Objects are stored at `{prefix}/{vault}/{collection}/{id}.json`.\n * `loadAll()` uses `ListObjectsV2` over the vault prefix followed by parallel\n * `GetObject` calls — suitable for vaults with up to ~10K records. For larger\n * vaults, use DynamoDB or pair with `routeStore` age-tiering so S3 only\n * holds archived records.\n *\n * Note: S3 does not support atomic CAS (`casAtomic: false`). Last-write-wins\n * on concurrent puts.\n */\nexport interface S3Options {\n /** S3 bucket name. */\n bucket: string\n /** Key prefix within the bucket. Default: ''. */\n prefix?: string\n /** AWS region. Used only when `client` is not provided. Default: 'us-east-1'. */\n region?: string\n /**\n * Pre-built S3Client from `@aws-sdk/client-s3`. If provided, the adapter\n * uses this client directly and ignores `region`. Useful for apps that want\n * to share a client across adapters or supply custom middleware.\n */\n client?: S3Client\n}\n\n/**\n * Create an S3 adapter.\n * Key scheme: `{prefix}/{vault}/{collection}/{id}.json`\n */\nexport function s3(options: S3Options): NoydbStore {\n const { bucket, prefix = '' } = options\n\n const client = options.client ?? new S3Client({\n ...(options.region ? { region: options.region } : {}),\n })\n\n function objectKey(vault: string, collection: string, id: string): string {\n const parts = [vault, collection, `${id}.json`]\n return prefix ? `${prefix}/${parts.join('/')}` : parts.join('/')\n }\n\n function collPrefix(vault: string, collection: string): string {\n const parts = [vault, collection, '']\n return prefix ? `${prefix}/${parts.join('/')}` : parts.join('/')\n }\n\n function compPrefix(vault: string): string {\n return prefix ? `${prefix}/${vault}/` : `${vault}/`\n }\n\n return {\n name: 's3',\n\n async get(vault, collection, id) {\n try {\n const result = await client.send(new GetObjectCommand({\n Bucket: bucket,\n Key: objectKey(vault, collection, id),\n }))\n\n if (!result.Body) return null\n const body = await result.Body.transformToString()\n return JSON.parse(body) as EncryptedEnvelope\n } catch (err: unknown) {\n if (err instanceof Error && (err.name === 'NoSuchKey' || err.name === 'NotFound')) {\n return null\n }\n throw err\n }\n },\n\n async put(vault, collection, id, envelope, expectedVersion) {\n if (expectedVersion !== undefined) {\n const existing = await this.get(vault, collection, id)\n if (existing && existing._v !== expectedVersion) {\n throw new ConflictError(existing._v, `Version conflict: expected ${expectedVersion}, found ${existing._v}`)\n }\n }\n\n await client.send(new PutObjectCommand({\n Bucket: bucket,\n Key: objectKey(vault, collection, id),\n Body: JSON.stringify(envelope),\n ContentType: 'application/json',\n }))\n },\n\n async delete(vault, collection, id) {\n await client.send(new DeleteObjectCommand({\n Bucket: bucket,\n Key: objectKey(vault, collection, id),\n }))\n },\n\n async list(vault, collection) {\n const pfx = collPrefix(vault, collection)\n const result = await client.send(new ListObjectsV2Command({\n Bucket: bucket,\n Prefix: pfx,\n }))\n\n return (result.Contents ?? [])\n .map(obj => obj.Key ?? '')\n .filter(k => k.endsWith('.json'))\n .map(k => k.slice(pfx.length, -5))\n },\n\n async loadAll(vault) {\n const pfx = compPrefix(vault)\n const listResult = await client.send(new ListObjectsV2Command({\n Bucket: bucket,\n Prefix: pfx,\n }))\n\n const snapshot: VaultSnapshot = {}\n\n for (const obj of listResult.Contents ?? []) {\n const key = obj.Key ?? ''\n if (!key.endsWith('.json')) continue\n\n const relativePath = key.slice(pfx.length)\n const parts = relativePath.split('/')\n if (parts.length !== 2) continue\n\n const collection = parts[0]!\n const id = parts[1]!.slice(0, -5)\n if (collection.startsWith('_')) continue\n\n const getResult = await client.send(new GetObjectCommand({\n Bucket: bucket,\n Key: key,\n }))\n\n if (!getResult.Body) continue\n const body = await getResult.Body.transformToString()\n\n if (!snapshot[collection]) snapshot[collection] = {}\n snapshot[collection][id] = JSON.parse(body) as EncryptedEnvelope\n }\n\n return snapshot\n },\n\n async saveAll(vault, data) {\n for (const [collection, records] of Object.entries(data)) {\n for (const [id, envelope] of Object.entries(records)) {\n await this.put(vault, collection, id, envelope)\n }\n }\n },\n\n async ping() {\n try {\n await client.send(new HeadBucketCommand({ Bucket: bucket }))\n return true\n } catch {\n return false\n }\n },\n\n /**\n * Paginate over a collection using S3's native `ContinuationToken`.\n *\n * Each page does:\n * 1. ListObjectsV2 with MaxKeys = limit and the previous token\n * 2. GetObject for every key on the page (in parallel)\n *\n * The 2-step pattern is necessary because S3 list responses don't\n * include object bodies. For very large collections this is N+1 — but\n * the parallel GETs amortize well, and consumers willing to pay for\n * stronger pagination should use a different adapter (Dynamo).\n */\n async listPage(vault, collection, cursor, limit = 100) {\n const pfx = collPrefix(vault, collection)\n const listResult = await client.send(new ListObjectsV2Command({\n Bucket: bucket,\n Prefix: pfx,\n MaxKeys: limit,\n ...(cursor ? { ContinuationToken: cursor } : {}),\n }))\n\n const keys = (listResult.Contents ?? [])\n .map(obj => obj.Key ?? '')\n .filter(k => k.endsWith('.json'))\n\n // Fetch every body in parallel — bounded by `limit` so we never\n // fan out beyond the page size.\n const items = await Promise.all(keys.map(async (key) => {\n const id = key.slice(pfx.length, -5)\n const getResult = await client.send(new GetObjectCommand({\n Bucket: bucket,\n Key: key,\n }))\n if (!getResult.Body) return null\n const body = await getResult.Body.transformToString()\n return { id, envelope: JSON.parse(body) as EncryptedEnvelope }\n }))\n\n return {\n items: items.filter((x): x is { id: string; envelope: EncryptedEnvelope } => x !== null),\n nextCursor: listResult.IsTruncated && listResult.NextContinuationToken\n ? listResult.NextContinuationToken\n : null,\n }\n },\n }\n}\n"],"mappings":";AAoCA,SAAS,qBAAqB;AAC9B;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAiCA,SAAS,GAAG,SAAgC;AACjD,QAAM,EAAE,QAAQ,SAAS,GAAG,IAAI;AAEhC,QAAM,SAAS,QAAQ,UAAU,IAAI,SAAS;AAAA,IAC5C,GAAI,QAAQ,SAAS,EAAE,QAAQ,QAAQ,OAAO,IAAI,CAAC;AAAA,EACrD,CAAC;AAED,WAAS,UAAU,OAAe,YAAoB,IAAoB;AACxE,UAAM,QAAQ,CAAC,OAAO,YAAY,GAAG,EAAE,OAAO;AAC9C,WAAO,SAAS,GAAG,MAAM,IAAI,MAAM,KAAK,GAAG,CAAC,KAAK,MAAM,KAAK,GAAG;AAAA,EACjE;AAEA,WAAS,WAAW,OAAe,YAA4B;AAC7D,UAAM,QAAQ,CAAC,OAAO,YAAY,EAAE;AACpC,WAAO,SAAS,GAAG,MAAM,IAAI,MAAM,KAAK,GAAG,CAAC,KAAK,MAAM,KAAK,GAAG;AAAA,EACjE;AAEA,WAAS,WAAW,OAAuB;AACzC,WAAO,SAAS,GAAG,MAAM,IAAI,KAAK,MAAM,GAAG,KAAK;AAAA,EAClD;AAEA,SAAO;AAAA,IACL,MAAM;AAAA,IAEN,MAAM,IAAI,OAAO,YAAY,IAAI;AAC/B,UAAI;AACF,cAAM,SAAS,MAAM,OAAO,KAAK,IAAI,iBAAiB;AAAA,UACpD,QAAQ;AAAA,UACR,KAAK,UAAU,OAAO,YAAY,EAAE;AAAA,QACtC,CAAC,CAAC;AAEF,YAAI,CAAC,OAAO,KAAM,QAAO;AACzB,cAAM,OAAO,MAAM,OAAO,KAAK,kBAAkB;AACjD,eAAO,KAAK,MAAM,IAAI;AAAA,MACxB,SAAS,KAAc;AACrB,YAAI,eAAe,UAAU,IAAI,SAAS,eAAe,IAAI,SAAS,aAAa;AACjF,iBAAO;AAAA,QACT;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAAA,IAEA,MAAM,IAAI,OAAO,YAAY,IAAI,UAAU,iBAAiB;AAC1D,UAAI,oBAAoB,QAAW;AACjC,cAAM,WAAW,MAAM,KAAK,IAAI,OAAO,YAAY,EAAE;AACrD,YAAI,YAAY,SAAS,OAAO,iBAAiB;AAC/C,gBAAM,IAAI,cAAc,SAAS,IAAI,8BAA8B,eAAe,WAAW,SAAS,EAAE,EAAE;AAAA,QAC5G;AAAA,MACF;AAEA,YAAM,OAAO,KAAK,IAAI,iBAAiB;AAAA,QACrC,QAAQ;AAAA,QACR,KAAK,UAAU,OAAO,YAAY,EAAE;AAAA,QACpC,MAAM,KAAK,UAAU,QAAQ;AAAA,QAC7B,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,IACJ;AAAA,IAEA,MAAM,OAAO,OAAO,YAAY,IAAI;AAClC,YAAM,OAAO,KAAK,IAAI,oBAAoB;AAAA,QACxC,QAAQ;AAAA,QACR,KAAK,UAAU,OAAO,YAAY,EAAE;AAAA,MACtC,CAAC,CAAC;AAAA,IACJ;AAAA,IAEA,MAAM,KAAK,OAAO,YAAY;AAC5B,YAAM,MAAM,WAAW,OAAO,UAAU;AACxC,YAAM,SAAS,MAAM,OAAO,KAAK,IAAI,qBAAqB;AAAA,QACxD,QAAQ;AAAA,QACR,QAAQ;AAAA,MACV,CAAC,CAAC;AAEF,cAAQ,OAAO,YAAY,CAAC,GACzB,IAAI,SAAO,IAAI,OAAO,EAAE,EACxB,OAAO,OAAK,EAAE,SAAS,OAAO,CAAC,EAC/B,IAAI,OAAK,EAAE,MAAM,IAAI,QAAQ,EAAE,CAAC;AAAA,IACrC;AAAA,IAEA,MAAM,QAAQ,OAAO;AACnB,YAAM,MAAM,WAAW,KAAK;AAC5B,YAAM,aAAa,MAAM,OAAO,KAAK,IAAI,qBAAqB;AAAA,QAC5D,QAAQ;AAAA,QACR,QAAQ;AAAA,MACV,CAAC,CAAC;AAEF,YAAM,WAA0B,CAAC;AAEjC,iBAAW,OAAO,WAAW,YAAY,CAAC,GAAG;AAC3C,cAAM,MAAM,IAAI,OAAO;AACvB,YAAI,CAAC,IAAI,SAAS,OAAO,EAAG;AAE5B,cAAM,eAAe,IAAI,MAAM,IAAI,MAAM;AACzC,cAAM,QAAQ,aAAa,MAAM,GAAG;AACpC,YAAI,MAAM,WAAW,EAAG;AAExB,cAAM,aAAa,MAAM,CAAC;AAC1B,cAAM,KAAK,MAAM,CAAC,EAAG,MAAM,GAAG,EAAE;AAChC,YAAI,WAAW,WAAW,GAAG,EAAG;AAEhC,cAAM,YAAY,MAAM,OAAO,KAAK,IAAI,iBAAiB;AAAA,UACvD,QAAQ;AAAA,UACR,KAAK;AAAA,QACP,CAAC,CAAC;AAEF,YAAI,CAAC,UAAU,KAAM;AACrB,cAAM,OAAO,MAAM,UAAU,KAAK,kBAAkB;AAEpD,YAAI,CAAC,SAAS,UAAU,EAAG,UAAS,UAAU,IAAI,CAAC;AACnD,iBAAS,UAAU,EAAE,EAAE,IAAI,KAAK,MAAM,IAAI;AAAA,MAC5C;AAEA,aAAO;AAAA,IACT;AAAA,IAEA,MAAM,QAAQ,OAAO,MAAM;AACzB,iBAAW,CAAC,YAAY,OAAO,KAAK,OAAO,QAAQ,IAAI,GAAG;AACxD,mBAAW,CAAC,IAAI,QAAQ,KAAK,OAAO,QAAQ,OAAO,GAAG;AACpD,gBAAM,KAAK,IAAI,OAAO,YAAY,IAAI,QAAQ;AAAA,QAChD;AAAA,MACF;AAAA,IACF;AAAA,IAEA,MAAM,OAAO;AACX,UAAI;AACF,cAAM,OAAO,KAAK,IAAI,kBAAkB,EAAE,QAAQ,OAAO,CAAC,CAAC;AAC3D,eAAO;AAAA,MACT,QAAQ;AACN,eAAO;AAAA,MACT;AAAA,IACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAcA,MAAM,SAAS,OAAO,YAAY,QAAQ,QAAQ,KAAK;AACrD,YAAM,MAAM,WAAW,OAAO,UAAU;AACxC,YAAM,aAAa,MAAM,OAAO,KAAK,IAAI,qBAAqB;AAAA,QAC5D,QAAQ;AAAA,QACR,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,GAAI,SAAS,EAAE,mBAAmB,OAAO,IAAI,CAAC;AAAA,MAChD,CAAC,CAAC;AAEF,YAAM,QAAQ,WAAW,YAAY,CAAC,GACnC,IAAI,SAAO,IAAI,OAAO,EAAE,EACxB,OAAO,OAAK,EAAE,SAAS,OAAO,CAAC;AAIlC,YAAM,QAAQ,MAAM,QAAQ,IAAI,KAAK,IAAI,OAAO,QAAQ;AACtD,cAAM,KAAK,IAAI,MAAM,IAAI,QAAQ,EAAE;AACnC,cAAM,YAAY,MAAM,OAAO,KAAK,IAAI,iBAAiB;AAAA,UACvD,QAAQ;AAAA,UACR,KAAK;AAAA,QACP,CAAC,CAAC;AACF,YAAI,CAAC,UAAU,KAAM,QAAO;AAC5B,cAAM,OAAO,MAAM,UAAU,KAAK,kBAAkB;AACpD,eAAO,EAAE,IAAI,UAAU,KAAK,MAAM,IAAI,EAAuB;AAAA,MAC/D,CAAC,CAAC;AAEF,aAAO;AAAA,QACL,OAAO,MAAM,OAAO,CAAC,MAAwD,MAAM,IAAI;AAAA,QACvF,YAAY,WAAW,eAAe,WAAW,wBAC7C,WAAW,wBACX;AAAA,MACN;AAAA,IACF;AAAA,EACF;AACF;","names":[]}
package/package.json ADDED
@@ -0,0 +1,69 @@
1
+ {
2
+ "name": "@noy-db/to-aws-s3",
3
+ "version": "0.1.0-pre.3",
4
+ "description": "AWS S3 adapter for noy-db — encrypted object storage with zero-knowledge cloud sync",
5
+ "license": "MIT",
6
+ "author": "vLannaAi <vicio@lanna.ai>",
7
+ "homepage": "https://github.com/vLannaAi/noy-db/tree/main/packages/to-aws-s3#readme",
8
+ "repository": {
9
+ "type": "git",
10
+ "url": "git+https://github.com/vLannaAi/noy-db.git",
11
+ "directory": "packages/to-aws-s3"
12
+ },
13
+ "bugs": {
14
+ "url": "https://github.com/vLannaAi/noy-db/issues"
15
+ },
16
+ "type": "module",
17
+ "sideEffects": false,
18
+ "exports": {
19
+ ".": {
20
+ "import": {
21
+ "types": "./dist/index.d.ts",
22
+ "default": "./dist/index.js"
23
+ },
24
+ "require": {
25
+ "types": "./dist/index.d.cts",
26
+ "default": "./dist/index.cjs"
27
+ }
28
+ }
29
+ },
30
+ "main": "./dist/index.cjs",
31
+ "module": "./dist/index.js",
32
+ "types": "./dist/index.d.ts",
33
+ "files": [
34
+ "dist",
35
+ "README.md",
36
+ "LICENSE"
37
+ ],
38
+ "engines": {
39
+ "node": ">=18.0.0"
40
+ },
41
+ "peerDependencies": {
42
+ "@aws-sdk/client-s3": "^3.0.0",
43
+ "@noy-db/hub": "0.1.0-pre.3"
44
+ },
45
+ "devDependencies": {
46
+ "@aws-sdk/client-s3": "^3.0.0",
47
+ "@noy-db/hub": "0.1.0-pre.3"
48
+ },
49
+ "keywords": [
50
+ "noy-db",
51
+ "adapter",
52
+ "s3",
53
+ "aws",
54
+ "object-storage",
55
+ "cloud",
56
+ "encryption",
57
+ "zero-knowledge"
58
+ ],
59
+ "publishConfig": {
60
+ "access": "public",
61
+ "tag": "latest"
62
+ },
63
+ "scripts": {
64
+ "build": "tsup",
65
+ "test": "vitest run",
66
+ "lint": "eslint src/",
67
+ "typecheck": "tsc --noEmit"
68
+ }
69
+ }