@casfa/storage-s3 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,145 @@
1
+ # @casfa/storage-s3
2
+
3
+ S3 storage provider for CAS.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ bun add @casfa/storage-s3
9
+ ```
10
+
11
+ ## Overview
12
+
13
+ An S3-backed storage provider for CAS (Content-Addressable Storage). Suitable for cloud deployments with high availability and durability requirements.
14
+
15
+ ## Usage
16
+
17
+ ### Basic Usage
18
+
19
+ ```typescript
20
+ import { createS3Storage } from '@casfa/storage-s3';
21
+
22
+ const storage = createS3Storage({
23
+ bucket: 'my-cas-bucket',
24
+ region: 'us-east-1',
25
+ prefix: 'cas/', // Optional key prefix
26
+ });
27
+
28
+ // Store data
29
+ await storage.put('node:abcd1234...', data);
30
+
31
+ // Retrieve data
32
+ const data = await storage.get('node:abcd1234...');
33
+ ```
34
+
35
+ ### With Custom S3 Client
36
+
37
+ ```typescript
38
+ import { S3Client } from '@aws-sdk/client-s3';
39
+ import { createS3Storage } from '@casfa/storage-s3';
40
+
41
+ const s3Client = new S3Client({
42
+ region: 'us-east-1',
43
+ credentials: {
44
+ accessKeyId: 'AKIA...',
45
+ secretAccessKey: '...',
46
+ },
47
+ });
48
+
49
+ const storage = createS3Storage({
50
+ bucket: 'my-cas-bucket',
51
+ client: s3Client,
52
+ });
53
+ ```
54
+
55
+ ## Configuration
56
+
57
+ ```typescript
58
+ interface S3StorageConfig {
59
+ // Required: S3 bucket name
60
+ bucket: string;
61
+
62
+ // AWS region (required if not using custom client)
63
+ region?: string;
64
+
65
+ // Optional: Key prefix for all objects
66
+ prefix?: string;
67
+
68
+ // Optional: Custom S3 client
69
+ client?: S3Client;
70
+
71
+ // Optional: Storage class for new objects
72
+ storageClass?: 'STANDARD' | 'STANDARD_IA' | 'GLACIER' | 'DEEP_ARCHIVE';
73
+ }
74
+ ```
75
+
76
+ ## S3 Key Structure
77
+
78
+ Objects are stored with sharded prefixes for better S3 performance:
79
+
80
+ ```
81
+ {prefix}ab/cd/abcd1234...
82
+ ```
83
+
84
+ Where `ab` and `cd` are the first 4 characters of the hash, providing good distribution across S3 partitions.
85
+
86
+ ## API Reference
87
+
88
+ ### Functions
89
+
90
+ - `createS3Storage(config)` - Create S3 storage
91
+
92
+ ### StorageProvider Interface
93
+
94
+ ```typescript
95
+ interface StorageProvider {
96
+ get(key: string): Promise<Uint8Array | null>;
97
+ put(key: string, data: Uint8Array): Promise<void>;
98
+ }
99
+ ```
100
+
101
+ ## AWS Permissions
102
+
103
+ Required IAM permissions for the S3 bucket:
104
+
105
+ ```json
106
+ {
107
+ "Version": "2012-10-17",
108
+ "Statement": [
109
+ {
110
+ "Effect": "Allow",
111
+ "Action": [
112
+ "s3:GetObject",
113
+ "s3:PutObject",
114
+ "s3:DeleteObject",
115
+ "s3:HeadObject"
116
+ ],
117
+ "Resource": "arn:aws:s3:::my-cas-bucket/*"
118
+ }
119
+ ]
120
+ }
121
+ ```
122
+
123
+ ## Performance Tips
124
+
125
+ 1. **Use caching** to reduce S3 API calls and latency
126
+ 2. **Use S3 Transfer Acceleration** for global access
127
+ 3. **Choose appropriate storage class** based on access patterns
128
+ 4. **Enable S3 Intelligent-Tiering** for cost optimization
129
+ 5. **Use regional endpoints** to minimize latency
130
+
131
+ ## Cost Considerations
132
+
133
+ - CAS data is immutable, so versioning is not needed
134
+ - Consider S3 Intelligent-Tiering for infrequently accessed data
135
+ - Monitor PUT/GET request costs for high-throughput workloads
136
+
137
+ ## Related Packages
138
+
139
+ - `@casfa/storage-core` - Core types and utilities
140
+ - `@casfa/storage-fs` - File system storage (for local deployment)
141
+ - `@casfa/storage-memory` - In-memory storage (for testing)
142
+
143
+ ## License
144
+
145
+ MIT
@@ -0,0 +1,157 @@
1
+ # @casfa/storage-s3
2
+
3
+ 基于 S3 的 CAS 存储提供者。
4
+
5
+ ## 安装
6
+
7
+ ```bash
8
+ bun add @casfa/storage-s3
9
+ ```
10
+
11
+ ## 概述
12
+
13
+ 基于 S3 的 CAS(内容寻址存储)存储提供者。适用于对高可用性和数据持久性有要求的云端部署。
14
+
15
+ ## 使用方法
16
+
17
+ ### 基本用法
18
+
19
+ ```typescript
20
+ import { createS3Storage } from '@casfa/storage-s3';
21
+
22
+ const storage = createS3Storage({
23
+ bucket: 'my-cas-bucket',
24
+ region: 'us-east-1',
25
+ prefix: 'cas/', // 可选的键前缀
26
+ });
27
+
28
+ // 存储数据
29
+ await storage.put('node:abcd1234...', data);
30
+
31
+ // 检索数据
32
+ const data = await storage.get('node:abcd1234...');
33
+ ```
34
+
35
+ ### 使用自定义 S3 客户端
36
+
37
+ ```typescript
38
+ import { S3Client } from '@aws-sdk/client-s3';
39
+ import { createS3Storage } from '@casfa/storage-s3';
40
+
41
+ const s3Client = new S3Client({
42
+ region: 'us-east-1',
43
+ credentials: {
44
+ accessKeyId: 'AKIA...',
45
+ secretAccessKey: '...',
46
+ },
47
+ });
48
+
49
+ const storage = createS3Storage({
50
+ bucket: 'my-cas-bucket',
51
+ client: s3Client,
52
+ });
53
+ ```
54
+
55
+ ## 配置
56
+
57
+ ```typescript
58
+ interface S3StorageConfig {
59
+ // 必需:S3 桶名称
60
+ bucket: string;
61
+
62
+ // AWS 区域(不使用自定义客户端时必需)
63
+ region?: string;
64
+
65
+ // 可选:所有对象的键前缀
66
+ prefix?: string;
67
+
68
+ // 可选:自定义 S3 客户端
69
+ client?: S3Client;
70
+
71
+ // 可选:新对象的存储类别
72
+ storageClass?: 'STANDARD' | 'STANDARD_IA' | 'GLACIER' | 'DEEP_ARCHIVE';
73
+ }
74
+ ```
75
+
76
+ ### 缓存选项
77
+
78
+ ```typescript
79
+ interface S3StorageWithCacheConfig extends S3StorageConfig {
80
+ // LRU 缓存最大条目数(默认: 1000)
81
+ cacheSize?: number;
82
+
83
+ // 缓存 TTL,单位毫秒(默认: 60000)
84
+ cacheMaxAge?: number;
85
+ }
86
+ ```
87
+
88
+ ## S3 键结构
89
+
90
+ 对象使用分片前缀存储,以获得更好的 S3 性能:
91
+
92
+ ```
93
+ {prefix}ab/cd/abcd1234...
94
+ ```
95
+
96
+ 其中 `ab` 和 `cd` 是哈希的前 4 个字符,可在 S3 分区间提供良好的分布。
97
+
98
+ ## API 参考
99
+
100
+ ### 函数
101
+
102
+ - `createS3Storage(config)` - 创建 S3 存储
103
+
104
+ ### StorageProvider 接口
105
+
106
+ ```typescript
107
+ interface StorageProvider {
108
+ get(key: string): Promise<Uint8Array | null>;
109
+ put(key: string, data: Uint8Array): Promise<void>;
110
+ }
111
+ ```
112
+
113
+ ## AWS 权限
114
+
115
+ S3 桶所需的 IAM 权限:
116
+
117
+ ```json
118
+ {
119
+ "Version": "2012-10-17",
120
+ "Statement": [
121
+ {
122
+ "Effect": "Allow",
123
+ "Action": [
124
+ "s3:GetObject",
125
+ "s3:PutObject",
126
+ "s3:DeleteObject",
127
+ "s3:HeadObject"
128
+ ],
129
+ "Resource": "arn:aws:s3:::my-cas-bucket/*"
130
+ }
131
+ ]
132
+ }
133
+ ```
134
+
135
+ ## 性能建议
136
+
137
+ 1. **使用缓存** 以减少 S3 API 调用和延迟
138
+ 2. **使用 S3 Transfer Acceleration** 以实现全球访问
139
+ 3. **根据访问模式选择合适的存储类别**
140
+ 4. **启用 S3 Intelligent-Tiering** 以优化成本
141
+ 5. **使用区域端点** 以最小化延迟
142
+
143
+ ## 成本考虑
144
+
145
+ - CAS 数据是不可变的,因此不需要版本控制
146
+ - 对于不常访问的数据,考虑使用 S3 Intelligent-Tiering
147
+ - 对于高吞吐量工作负载,注意监控 PUT/GET 请求费用
148
+
149
+ ## 相关包
150
+
151
+ - `@casfa/storage-core` - 核心类型与工具
152
+ - `@casfa/storage-fs` - 文件系统存储(用于本地部署)
153
+ - `@casfa/storage-memory` - 内存存储(用于测试)
154
+
155
+ ## 许可证
156
+
157
+ MIT
package/dist/index.d.ts CHANGED
@@ -1,42 +1,7 @@
1
- import { S3Client } from '@aws-sdk/client-s3';
2
- import { StorageProvider } from '@casfa/storage-core';
3
-
4
1
  /**
5
- * S3 Storage Provider for CAS
2
+ * CAS Storage S3
6
3
  *
7
- * Implements StorageProvider with:
8
- * - LRU cache for key existence checks
9
- * - S3 backend storage
4
+ * S3 storage provider for CAS.
10
5
  */
11
-
12
- /**
13
- * S3 Storage configuration
14
- */
15
- type S3StorageConfig = {
16
- /** S3 bucket name */
17
- bucket: string;
18
- /** Optional S3 client (for testing or custom config) */
19
- client?: S3Client;
20
- /** LRU cache size for key existence (default: 10000) */
21
- cacheSize?: number;
22
- /** Key prefix in S3 (default: "cas/sha256/") */
23
- prefix?: string;
24
- };
25
- /**
26
- * Create an S3-backed storage provider
27
- */
28
- declare const createS3Storage: (config: S3StorageConfig) => StorageProvider;
29
- /**
30
- * Create S3 storage with cache control methods (for testing)
31
- */
32
- declare const createS3StorageWithCache: (config: S3StorageConfig) => {
33
- clearCache: () => void;
34
- getCacheStats: () => {
35
- size: number;
36
- };
37
- has: (key: string) => Promise<boolean>;
38
- get: (key: string) => Promise<Uint8Array | null>;
39
- put: (key: string, value: Uint8Array) => Promise<void>;
40
- };
41
-
42
- export { type S3StorageConfig, createS3Storage, createS3StorageWithCache };
6
+ export { createS3Storage, type S3StorageConfig } from "./s3-storage.ts";
7
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EAAE,eAAe,EAAE,KAAK,eAAe,EAAE,MAAM,iBAAiB,CAAC"}
package/dist/index.js CHANGED
@@ -5,49 +5,22 @@ import {
5
5
  PutObjectCommand,
6
6
  S3Client
7
7
  } from "@aws-sdk/client-s3";
8
- import {
9
- createLRUCache,
10
- DEFAULT_CACHE_SIZE,
11
- toStoragePath
12
- } from "@casfa/storage-core";
8
+ var toStoragePath = (key, prefix) => {
9
+ const subdir = key.slice(0, 2);
10
+ return `${prefix}${subdir}/${key}`;
11
+ };
13
12
  var createS3Storage = (config) => {
14
- const client = config.client ?? new S3Client({});
13
+ const client = config.client ?? new S3Client(config.region ? { region: config.region } : {});
15
14
  const bucket = config.bucket;
16
- const prefix = config.prefix ?? "cas/sha256/";
17
- const existsCache = createLRUCache(config.cacheSize ?? DEFAULT_CACHE_SIZE);
15
+ const prefix = config.prefix ?? "cas/v1/";
18
16
  const toS3Key = (casKey) => toStoragePath(casKey, prefix);
19
- const has = async (key) => {
20
- const cached = existsCache.get(key);
21
- if (cached !== void 0) {
22
- return cached;
23
- }
24
- try {
25
- await client.send(
26
- new HeadObjectCommand({
27
- Bucket: bucket,
28
- Key: toS3Key(key)
29
- })
30
- );
31
- existsCache.set(key, true);
32
- return true;
33
- } catch (error) {
34
- const err = error;
35
- if (err.name === "NotFound" || err.$metadata?.httpStatusCode === 404) {
36
- return false;
37
- }
38
- throw error;
39
- }
40
- };
41
17
  const get = async (key) => {
42
18
  try {
43
- const result = await client.send(
44
- new GetObjectCommand({
45
- Bucket: bucket,
46
- Key: toS3Key(key)
47
- })
48
- );
19
+ const result = await client.send(new GetObjectCommand({
20
+ Bucket: bucket,
21
+ Key: toS3Key(key)
22
+ }));
49
23
  const bytes = await result.Body.transformToByteArray();
50
- existsCache.set(key, true);
51
24
  return new Uint8Array(bytes);
52
25
  } catch (error) {
53
26
  const err = error;
@@ -58,40 +31,30 @@ var createS3Storage = (config) => {
58
31
  }
59
32
  };
60
33
  const put = async (key, value) => {
61
- if (existsCache.get(key)) {
62
- return;
63
- }
64
- const exists = await has(key);
65
- if (exists) {
34
+ const s3Key = toS3Key(key);
35
+ try {
36
+ await client.send(new HeadObjectCommand({
37
+ Bucket: bucket,
38
+ Key: s3Key
39
+ }));
66
40
  return;
41
+ } catch (error) {
42
+ const err = error;
43
+ if (err.name !== "NotFound" && err.$metadata?.httpStatusCode !== 404) {
44
+ throw error;
45
+ }
67
46
  }
68
- await client.send(
69
- new PutObjectCommand({
70
- Bucket: bucket,
71
- Key: toS3Key(key),
72
- Body: value,
73
- ContentType: "application/octet-stream"
74
- })
75
- );
76
- existsCache.set(key, true);
77
- };
78
- return { has, get, put };
79
- };
80
- var createS3StorageWithCache = (config) => {
81
- const client = config.client ?? new S3Client({});
82
- const _bucket = config.bucket;
83
- const prefix = config.prefix ?? "cas/sha256/";
84
- const existsCache = createLRUCache(config.cacheSize ?? DEFAULT_CACHE_SIZE);
85
- const _toS3Key = (casKey) => toStoragePath(casKey, prefix);
86
- const storage = createS3Storage({ ...config, client });
87
- return {
88
- ...storage,
89
- clearCache: () => existsCache.clear(),
90
- getCacheStats: () => ({ size: existsCache.size() })
47
+ await client.send(new PutObjectCommand({
48
+ Bucket: bucket,
49
+ Key: s3Key,
50
+ Body: value,
51
+ ContentType: "application/octet-stream"
52
+ }));
91
53
  };
54
+ return { get, put };
92
55
  };
93
56
  export {
94
- createS3Storage,
95
- createS3StorageWithCache
57
+ createS3Storage
96
58
  };
97
- //# sourceMappingURL=index.js.map
59
+
60
+ //# debugId=FBB9BF9E1762965D64756E2164756E21
package/dist/index.js.map CHANGED
@@ -1 +1,10 @@
1
- {"version":3,"sources":["../src/s3-storage.ts"],"sourcesContent":["/**\n * S3 Storage Provider for CAS\n *\n * Implements StorageProvider with:\n * - LRU cache for key existence checks\n * - S3 backend storage\n */\n\nimport {\n GetObjectCommand,\n HeadObjectCommand,\n PutObjectCommand,\n S3Client,\n} from \"@aws-sdk/client-s3\";\nimport {\n createLRUCache,\n DEFAULT_CACHE_SIZE,\n type StorageProvider,\n toStoragePath,\n} from \"@casfa/storage-core\";\n\n/**\n * S3 Storage configuration\n */\nexport type S3StorageConfig = {\n /** S3 bucket name */\n bucket: string;\n /** Optional S3 client (for testing or custom config) */\n client?: S3Client;\n /** LRU cache size for key existence (default: 10000) */\n cacheSize?: number;\n /** Key prefix in S3 (default: \"cas/sha256/\") */\n prefix?: string;\n};\n\n/**\n * Create an S3-backed storage provider\n */\nexport const createS3Storage = (config: S3StorageConfig): StorageProvider => {\n const client = config.client ?? new S3Client({});\n const bucket = config.bucket;\n const prefix = config.prefix ?? \"cas/sha256/\";\n const existsCache = createLRUCache<string, boolean>(config.cacheSize ?? DEFAULT_CACHE_SIZE);\n\n const toS3Key = (casKey: string): string => toStoragePath(casKey, prefix);\n\n const has = async (key: string): Promise<boolean> => {\n // Check cache first\n const cached = existsCache.get(key);\n if (cached !== undefined) {\n return cached;\n }\n\n // Check S3\n try {\n await client.send(\n new HeadObjectCommand({\n Bucket: bucket,\n Key: toS3Key(key),\n })\n );\n existsCache.set(key, true);\n return true;\n } catch (error: unknown) {\n const err = error as { name?: string; $metadata?: { httpStatusCode?: number } };\n if (err.name === \"NotFound\" || err.$metadata?.httpStatusCode === 404) {\n // Don't cache non-existence (it might be uploaded later)\n return false;\n }\n throw error;\n }\n };\n\n const get = async (key: string): Promise<Uint8Array | null> => {\n try {\n const result = await client.send(\n new GetObjectCommand({\n Bucket: bucket,\n Key: toS3Key(key),\n })\n );\n\n const bytes = await result.Body!.transformToByteArray();\n\n // Mark as existing in cache\n existsCache.set(key, true);\n\n return new Uint8Array(bytes);\n } catch (error: unknown) {\n const err = error as { name?: string; $metadata?: { httpStatusCode?: number } };\n if (err.name === \"NoSuchKey\" || err.$metadata?.httpStatusCode === 404) {\n return null;\n }\n throw error;\n }\n };\n\n const put = async (key: string, value: Uint8Array): Promise<void> => {\n // Check cache first (avoid redundant writes)\n if (existsCache.get(key)) {\n return;\n }\n\n // Check if already exists in S3\n const exists = await has(key);\n if (exists) {\n return;\n }\n\n // Upload to S3\n await client.send(\n new PutObjectCommand({\n Bucket: bucket,\n Key: toS3Key(key),\n Body: value,\n ContentType: \"application/octet-stream\",\n })\n );\n\n // Mark as existing\n existsCache.set(key, true);\n };\n\n return { has, get, put };\n};\n\n/**\n * Create S3 storage with cache control methods (for testing)\n */\nexport const createS3StorageWithCache = (config: S3StorageConfig) => {\n const client = config.client ?? new S3Client({});\n const _bucket = config.bucket;\n const prefix = config.prefix ?? \"cas/sha256/\";\n const existsCache = createLRUCache<string, boolean>(config.cacheSize ?? DEFAULT_CACHE_SIZE);\n\n const _toS3Key = (casKey: string): string => toStoragePath(casKey, prefix);\n\n const storage = createS3Storage({ ...config, client });\n\n return {\n ...storage,\n clearCache: () => existsCache.clear(),\n getCacheStats: () => ({ size: existsCache.size() }),\n };\n};\n"],"mappings":";AAQA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EAEA;AAAA,OACK;AAmBA,IAAM,kBAAkB,CAAC,WAA6C;AAC3E,QAAM,SAAS,OAAO,UAAU,IAAI,SAAS,CAAC,CAAC;AAC/C,QAAM,SAAS,OAAO;AACtB,QAAM,SAAS,OAAO,UAAU;AAChC,QAAM,cAAc,eAAgC,OAAO,aAAa,kBAAkB;AAE1F,QAAM,UAAU,CAAC,WAA2B,cAAc,QAAQ,MAAM;AAExE,QAAM,MAAM,OAAO,QAAkC;AAEnD,UAAM,SAAS,YAAY,IAAI,GAAG;AAClC,QAAI,WAAW,QAAW;AACxB,aAAO;AAAA,IACT;AAGA,QAAI;AACF,YAAM,OAAO;AAAA,QACX,IAAI,kBAAkB;AAAA,UACpB,QAAQ;AAAA,UACR,KAAK,QAAQ,GAAG;AAAA,QAClB,CAAC;AAAA,MACH;AACA,kBAAY,IAAI,KAAK,IAAI;AACzB,aAAO;AAAA,IACT,SAAS,OAAgB;AACvB,YAAM,MAAM;AACZ,UAAI,IAAI,SAAS,cAAc,IAAI,WAAW,mBAAmB,KAAK;AAEpE,eAAO;AAAA,MACT;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,QAAM,MAAM,OAAO,QAA4C;AAC7D,QAAI;AACF,YAAM,SAAS,MAAM,OAAO;AAAA,QAC1B,IAAI,iBAAiB;AAAA,UACnB,QAAQ;AAAA,UACR,KAAK,QAAQ,GAAG;AAAA,QAClB,CAAC;AAAA,MACH;AAEA,YAAM,QAAQ,MAAM,OAAO,KAAM,qBAAqB;AAGtD,kBAAY,IAAI,KAAK,IAAI;AAEzB,aAAO,IAAI,WAAW,KAAK;AAAA,IAC7B,SAAS,OAAgB;AACvB,YAAM,MAAM;AACZ,UAAI,IAAI,SAAS,eAAe,IAAI,WAAW,mBAAmB,KAAK;AACrE,eAAO;AAAA,MACT;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,QAAM,MAAM,OAAO,KAAa,UAAqC;AAEnE,QAAI,YAAY,IAAI,GAAG,GAAG;AACxB;AAAA,IACF;AAGA,UAAM,SAAS,MAAM,IAAI,GAAG;AAC5B,QAAI,QAAQ;AACV;AAAA,IACF;AAGA,UAAM,OAAO;AAAA,MACX,IAAI,iBAAiB;AAAA,QACnB,QAAQ;AAAA,QACR,KAAK,QAAQ,GAAG;AAAA,QAChB,MAAM;AAAA,QACN,aAAa;AAAA,MACf,CAAC;AAAA,IACH;AAGA,gBAAY,IAAI,KAAK,IAAI;AAAA,EAC3B;AAEA,SAAO,EAAE,KAAK,KAAK,IAAI;AACzB;AAKO,IAAM,2BAA2B,CAAC,WAA4B;AACnE,QAAM,SAAS,OAAO,UAAU,IAAI,SAAS,CAAC,CAAC;AAC/C,QAAM,UAAU,OAAO;AACvB,QAAM,SAAS,OAAO,UAAU;AAChC,QAAM,cAAc,eAAgC,OAAO,aAAa,kBAAkB;AAE1F,QAAM,WAAW,CAAC,WAA2B,cAAc,QAAQ,MAAM;AAEzE,QAAM,UAAU,gBAAgB,EAAE,GAAG,QAAQ,OAAO,CAAC;AAErD,SAAO;AAAA,IACL,GAAG;AAAA,IACH,YAAY,MAAM,YAAY,MAAM;AAAA,IACpC,eAAe,OAAO,EAAE,MAAM,YAAY,KAAK,EAAE;AAAA,EACnD;AACF;","names":[]}
1
+ {
2
+ "version": 3,
3
+ "sources": ["../src/s3-storage.ts"],
4
+ "sourcesContent": [
5
+ "/**\n * S3 Storage Provider for CAS\n *\n * Implements StorageProvider with:\n * - S3 backend storage\n * - Internal HeadObject check in put() to avoid redundant uploads\n */\n\nimport {\n GetObjectCommand,\n HeadObjectCommand,\n PutObjectCommand,\n S3Client,\n} from \"@aws-sdk/client-s3\";\nimport type { StorageProvider } from \"@casfa/storage-core\";\n\n/**\n * Create storage path from a CB32 storage key.\n * Uses first 2 chars as subdirectory for better distribution.\n *\n * Example: 240B5PHBGEC2A705WTKKMVRS30 -> cas/v1/24/240B5PHBGEC2A705WTKKMVRS30\n */\nconst toStoragePath = (key: string, prefix: string): string => {\n const subdir = key.slice(0, 2);\n return `${prefix}${subdir}/${key}`;\n};\n\n/**\n * S3 Storage configuration\n */\nexport type S3StorageConfig = {\n /** S3 bucket name */\n bucket: string;\n /** AWS region for the S3 bucket (e.g. \"us-west-2\") */\n region?: string;\n /** Optional S3 client (for testing or custom config) */\n client?: S3Client;\n /** Key prefix in S3 (default: \"cas/v1/\") */\n prefix?: string;\n};\n\n/**\n * Create an S3-backed storage provider\n */\nexport const createS3Storage = (config: S3StorageConfig): StorageProvider => {\n const client = config.client ?? new S3Client(config.region ? { region: config.region } : {});\n const bucket = config.bucket;\n const prefix = config.prefix ?? \"cas/v1/\";\n\n const toS3Key = (casKey: string): string => toStoragePath(casKey, prefix);\n\n const get = async (key: string): Promise<Uint8Array | null> => {\n try {\n const result = await client.send(\n new GetObjectCommand({\n Bucket: bucket,\n Key: toS3Key(key),\n })\n );\n\n const bytes = await result.Body!.transformToByteArray();\n return new Uint8Array(bytes);\n } catch (error: unknown) {\n const err = error as { name?: string; $metadata?: { httpStatusCode?: number } };\n if (err.name === \"NoSuchKey\" || err.$metadata?.httpStatusCode === 404) {\n return null;\n }\n throw error;\n }\n };\n\n const put = async (key: string, value: Uint8Array): Promise<void> => {\n const s3Key = toS3Key(key);\n\n // Internal optimization: HeadObject is cheaper than PutObject\n try {\n await client.send(\n new HeadObjectCommand({\n Bucket: bucket,\n Key: s3Key,\n })\n );\n return; // already exists\n } catch (error: unknown) {\n const err = error as { name?: string; $metadata?: { httpStatusCode?: number } };\n if (err.name !== \"NotFound\" && err.$metadata?.httpStatusCode !== 404) {\n throw error; // unexpected error\n }\n // not found — proceed to upload\n }\n\n // Upload to S3\n await client.send(\n new PutObjectCommand({\n Bucket: bucket,\n Key: s3Key,\n Body: value,\n ContentType: \"application/octet-stream\",\n })\n );\n };\n\n return { get, put };\n};\n"
6
+ ],
7
+ "mappings": ";AAQA;AAAA;AAAA;AAAA;AAAA;AAAA;AAcA,IAAM,gBAAgB,CAAC,KAAa,WAA2B;AAAA,EAC7D,MAAM,SAAS,IAAI,MAAM,GAAG,CAAC;AAAA,EAC7B,OAAO,GAAG,SAAS,UAAU;AAAA;AAoBxB,IAAM,kBAAkB,CAAC,WAA6C;AAAA,EAC3E,MAAM,SAAS,OAAO,UAAU,IAAI,SAAS,OAAO,SAAS,EAAE,QAAQ,OAAO,OAAO,IAAI,CAAC,CAAC;AAAA,EAC3F,MAAM,SAAS,OAAO;AAAA,EACtB,MAAM,SAAS,OAAO,UAAU;AAAA,EAEhC,MAAM,UAAU,CAAC,WAA2B,cAAc,QAAQ,MAAM;AAAA,EAExE,MAAM,MAAM,OAAO,QAA4C;AAAA,IAC7D,IAAI;AAAA,MACF,MAAM,SAAS,MAAM,OAAO,KAC1B,IAAI,iBAAiB;AAAA,QACnB,QAAQ;AAAA,QACR,KAAK,QAAQ,GAAG;AAAA,MAClB,CAAC,CACH;AAAA,MAEA,MAAM,QAAQ,MAAM,OAAO,KAAM,qBAAqB;AAAA,MACtD,OAAO,IAAI,WAAW,KAAK;AAAA,MAC3B,OAAO,OAAgB;AAAA,MACvB,MAAM,MAAM;AAAA,MACZ,IAAI,IAAI,SAAS,eAAe,IAAI,WAAW,mBAAmB,KAAK;AAAA,QACrE,OAAO;AAAA,MACT;AAAA,MACA,MAAM;AAAA;AAAA;AAAA,EAIV,MAAM,MAAM,OAAO,KAAa,UAAqC;AAAA,IACnE,MAAM,QAAQ,QAAQ,GAAG;AAAA,IAGzB,IAAI;AAAA,MACF,MAAM,OAAO,KACX,IAAI,kBAAkB;AAAA,QACpB,QAAQ;AAAA,QACR,KAAK;AAAA,MACP,CAAC,CACH;AAAA,MACA;AAAA,MACA,OAAO,OAAgB;AAAA,MACvB,MAAM,MAAM;AAAA,MACZ,IAAI,IAAI,SAAS,cAAc,IAAI,WAAW,mBAAmB,KAAK;AAAA,QACpE,MAAM;AAAA,MACR;AAAA;AAAA,IAKF,MAAM,OAAO,KACX,IAAI,iBAAiB;AAAA,MACnB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL,MAAM;AAAA,MACN,aAAa;AAAA,IACf,CAAC,CACH;AAAA;AAAA,EAGF,OAAO,EAAE,KAAK,IAAI;AAAA;",
8
+ "debugId": "FBB9BF9E1762965D64756E2164756E21",
9
+ "names": []
10
+ }
@@ -0,0 +1,27 @@
1
+ /**
2
+ * S3 Storage Provider for CAS
3
+ *
4
+ * Implements StorageProvider with:
5
+ * - S3 backend storage
6
+ * - Internal HeadObject check in put() to avoid redundant uploads
7
+ */
8
+ import { S3Client } from "@aws-sdk/client-s3";
9
+ import type { StorageProvider } from "@casfa/storage-core";
10
+ /**
11
+ * S3 Storage configuration
12
+ */
13
+ export type S3StorageConfig = {
14
+ /** S3 bucket name */
15
+ bucket: string;
16
+ /** AWS region for the S3 bucket (e.g. "us-west-2") */
17
+ region?: string;
18
+ /** Optional S3 client (for testing or custom config) */
19
+ client?: S3Client;
20
+ /** Key prefix in S3 (default: "cas/v1/") */
21
+ prefix?: string;
22
+ };
23
+ /**
24
+ * Create an S3-backed storage provider
25
+ */
26
+ export declare const createS3Storage: (config: S3StorageConfig) => StorageProvider;
27
+ //# sourceMappingURL=s3-storage.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"s3-storage.d.ts","sourceRoot":"","sources":["../src/s3-storage.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAEH,OAAO,EAIL,QAAQ,EACT,MAAM,oBAAoB,CAAC;AAC5B,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AAa3D;;GAEG;AACH,MAAM,MAAM,eAAe,GAAG;IAC5B,qBAAqB;IACrB,MAAM,EAAE,MAAM,CAAC;IACf,sDAAsD;IACtD,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,wDAAwD;IACxD,MAAM,CAAC,EAAE,QAAQ,CAAC;IAClB,4CAA4C;IAC5C,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB,CAAC;AAEF;;GAEG;AACH,eAAO,MAAM,eAAe,GAAI,QAAQ,eAAe,KAAG,eA2DzD,CAAC"}
package/package.json CHANGED
@@ -1,19 +1,19 @@
1
1
  {
2
2
  "name": "@casfa/storage-s3",
3
- "version": "0.2.0",
3
+ "version": "0.3.0",
4
4
  "description": "S3 storage provider for CAS",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
7
7
  "types": "./dist/index.d.ts",
8
8
  "exports": {
9
9
  ".": {
10
+ "bun": "./src/index.ts",
10
11
  "types": "./dist/index.d.ts",
11
12
  "import": "./dist/index.js"
12
13
  }
13
14
  },
14
15
  "scripts": {
15
- "build": "tsup",
16
- "dev": "tsup --watch",
16
+ "build": "bun ../../scripts/build-pkg.ts",
17
17
  "typecheck": "tsc --noEmit",
18
18
  "lint": "biome check .",
19
19
  "lint:fix": "biome check --write .",