@snaha/swarm-id 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +431 -0
- package/dist/chunk/bmt.d.ts +17 -0
- package/dist/chunk/bmt.d.ts.map +1 -0
- package/dist/chunk/cac.d.ts +18 -0
- package/dist/chunk/cac.d.ts.map +1 -0
- package/dist/chunk/constants.d.ts +10 -0
- package/dist/chunk/constants.d.ts.map +1 -0
- package/dist/chunk/encrypted-cac.d.ts +48 -0
- package/dist/chunk/encrypted-cac.d.ts.map +1 -0
- package/dist/chunk/encryption.d.ts +86 -0
- package/dist/chunk/encryption.d.ts.map +1 -0
- package/dist/chunk/index.d.ts +6 -0
- package/dist/chunk/index.d.ts.map +1 -0
- package/dist/index.d.ts +46 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/proxy/act/act.d.ts +78 -0
- package/dist/proxy/act/act.d.ts.map +1 -0
- package/dist/proxy/act/crypto.d.ts +44 -0
- package/dist/proxy/act/crypto.d.ts.map +1 -0
- package/dist/proxy/act/grantee-list.d.ts +82 -0
- package/dist/proxy/act/grantee-list.d.ts.map +1 -0
- package/dist/proxy/act/history.d.ts +183 -0
- package/dist/proxy/act/history.d.ts.map +1 -0
- package/dist/proxy/act/index.d.ts +104 -0
- package/dist/proxy/act/index.d.ts.map +1 -0
- package/dist/proxy/chunking-encrypted.d.ts +14 -0
- package/dist/proxy/chunking-encrypted.d.ts.map +1 -0
- package/dist/proxy/chunking.d.ts +15 -0
- package/dist/proxy/chunking.d.ts.map +1 -0
- package/dist/proxy/download-data.d.ts +16 -0
- package/dist/proxy/download-data.d.ts.map +1 -0
- package/dist/proxy/feed-manifest.d.ts +62 -0
- package/dist/proxy/feed-manifest.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/async-finder.d.ts +77 -0
- package/dist/proxy/feeds/epochs/async-finder.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/epoch.d.ts +88 -0
- package/dist/proxy/feeds/epochs/epoch.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/finder.d.ts +67 -0
- package/dist/proxy/feeds/epochs/finder.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/index.d.ts +35 -0
- package/dist/proxy/feeds/epochs/index.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/test-utils.d.ts +93 -0
- package/dist/proxy/feeds/epochs/test-utils.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/types.d.ts +109 -0
- package/dist/proxy/feeds/epochs/types.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/updater.d.ts +68 -0
- package/dist/proxy/feeds/epochs/updater.d.ts.map +1 -0
- package/dist/proxy/feeds/epochs/utils.d.ts +22 -0
- package/dist/proxy/feeds/epochs/utils.d.ts.map +1 -0
- package/dist/proxy/feeds/index.d.ts +5 -0
- package/dist/proxy/feeds/index.d.ts.map +1 -0
- package/dist/proxy/feeds/sequence/async-finder.d.ts +14 -0
- package/dist/proxy/feeds/sequence/async-finder.d.ts.map +1 -0
- package/dist/proxy/feeds/sequence/finder.d.ts +17 -0
- package/dist/proxy/feeds/sequence/finder.d.ts.map +1 -0
- package/dist/proxy/feeds/sequence/index.d.ts +23 -0
- package/dist/proxy/feeds/sequence/index.d.ts.map +1 -0
- package/dist/proxy/feeds/sequence/types.d.ts +80 -0
- package/dist/proxy/feeds/sequence/types.d.ts.map +1 -0
- package/dist/proxy/feeds/sequence/updater.d.ts +26 -0
- package/dist/proxy/feeds/sequence/updater.d.ts.map +1 -0
- package/dist/proxy/index.d.ts +6 -0
- package/dist/proxy/index.d.ts.map +1 -0
- package/dist/proxy/manifest-builder.d.ts +183 -0
- package/dist/proxy/manifest-builder.d.ts.map +1 -0
- package/dist/proxy/mantaray-encrypted.d.ts +27 -0
- package/dist/proxy/mantaray-encrypted.d.ts.map +1 -0
- package/dist/proxy/mantaray.d.ts +26 -0
- package/dist/proxy/mantaray.d.ts.map +1 -0
- package/dist/proxy/types.d.ts +29 -0
- package/dist/proxy/types.d.ts.map +1 -0
- package/dist/proxy/upload-data.d.ts +17 -0
- package/dist/proxy/upload-data.d.ts.map +1 -0
- package/dist/proxy/upload-encrypted-data.d.ts +103 -0
- package/dist/proxy/upload-encrypted-data.d.ts.map +1 -0
- package/dist/schemas.d.ts +240 -0
- package/dist/schemas.d.ts.map +1 -0
- package/dist/storage/debounced-uploader.d.ts +62 -0
- package/dist/storage/debounced-uploader.d.ts.map +1 -0
- package/dist/storage/utilization-store.d.ts +108 -0
- package/dist/storage/utilization-store.d.ts.map +1 -0
- package/dist/swarm-id-auth.d.ts +74 -0
- package/dist/swarm-id-auth.d.ts.map +1 -0
- package/dist/swarm-id-auth.js +2 -0
- package/dist/swarm-id-auth.js.map +1 -0
- package/dist/swarm-id-client.d.ts +878 -0
- package/dist/swarm-id-client.d.ts.map +1 -0
- package/dist/swarm-id-client.js +2 -0
- package/dist/swarm-id-client.js.map +1 -0
- package/dist/swarm-id-proxy.d.ts +236 -0
- package/dist/swarm-id-proxy.d.ts.map +1 -0
- package/dist/swarm-id-proxy.js +2 -0
- package/dist/swarm-id-proxy.js.map +1 -0
- package/dist/swarm-id.esm.js +2 -0
- package/dist/swarm-id.esm.js.map +1 -0
- package/dist/swarm-id.umd.js +2 -0
- package/dist/swarm-id.umd.js.map +1 -0
- package/dist/sync/index.d.ts +9 -0
- package/dist/sync/index.d.ts.map +1 -0
- package/dist/sync/key-derivation.d.ts +25 -0
- package/dist/sync/key-derivation.d.ts.map +1 -0
- package/dist/sync/restore-account.d.ts +28 -0
- package/dist/sync/restore-account.d.ts.map +1 -0
- package/dist/sync/serialization.d.ts +16 -0
- package/dist/sync/serialization.d.ts.map +1 -0
- package/dist/sync/store-interfaces.d.ts +53 -0
- package/dist/sync/store-interfaces.d.ts.map +1 -0
- package/dist/sync/sync-account.d.ts +44 -0
- package/dist/sync/sync-account.d.ts.map +1 -0
- package/dist/sync/types.d.ts +13 -0
- package/dist/sync/types.d.ts.map +1 -0
- package/dist/test-fixtures.d.ts +17 -0
- package/dist/test-fixtures.d.ts.map +1 -0
- package/dist/types-BD_VkNn0.js +2 -0
- package/dist/types-BD_VkNn0.js.map +1 -0
- package/dist/types-lJCaT-50.js +2 -0
- package/dist/types-lJCaT-50.js.map +1 -0
- package/dist/types.d.ts +2157 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/utils/account-payload.d.ts +94 -0
- package/dist/utils/account-payload.d.ts.map +1 -0
- package/dist/utils/account-state-snapshot.d.ts +38 -0
- package/dist/utils/account-state-snapshot.d.ts.map +1 -0
- package/dist/utils/backup-encryption.d.ts +127 -0
- package/dist/utils/backup-encryption.d.ts.map +1 -0
- package/dist/utils/batch-utilization.d.ts +432 -0
- package/dist/utils/batch-utilization.d.ts.map +1 -0
- package/dist/utils/constants.d.ts +11 -0
- package/dist/utils/constants.d.ts.map +1 -0
- package/dist/utils/hex.d.ts +17 -0
- package/dist/utils/hex.d.ts.map +1 -0
- package/dist/utils/key-derivation.d.ts +92 -0
- package/dist/utils/key-derivation.d.ts.map +1 -0
- package/dist/utils/storage-managers.d.ts +65 -0
- package/dist/utils/storage-managers.d.ts.map +1 -0
- package/dist/utils/swarm-id-export.d.ts +24 -0
- package/dist/utils/swarm-id-export.d.ts.map +1 -0
- package/dist/utils/ttl.d.ts +49 -0
- package/dist/utils/ttl.d.ts.map +1 -0
- package/dist/utils/url.d.ts +41 -0
- package/dist/utils/url.d.ts.map +1 -0
- package/dist/utils/versioned-storage.d.ts +131 -0
- package/dist/utils/versioned-storage.d.ts.map +1 -0
- package/package.json +78 -0
- package/src/chunk/bmt.test.ts +217 -0
- package/src/chunk/bmt.ts +57 -0
- package/src/chunk/cac.test.ts +214 -0
- package/src/chunk/cac.ts +65 -0
- package/src/chunk/constants.ts +18 -0
- package/src/chunk/encrypted-cac.test.ts +385 -0
- package/src/chunk/encrypted-cac.ts +131 -0
- package/src/chunk/encryption.test.ts +352 -0
- package/src/chunk/encryption.ts +300 -0
- package/src/chunk/index.ts +47 -0
- package/src/index.ts +430 -0
- package/src/proxy/act/act.test.ts +278 -0
- package/src/proxy/act/act.ts +158 -0
- package/src/proxy/act/bee-compat.test.ts +948 -0
- package/src/proxy/act/crypto.test.ts +436 -0
- package/src/proxy/act/crypto.ts +376 -0
- package/src/proxy/act/grantee-list.test.ts +393 -0
- package/src/proxy/act/grantee-list.ts +239 -0
- package/src/proxy/act/history.test.ts +360 -0
- package/src/proxy/act/history.ts +413 -0
- package/src/proxy/act/index.test.ts +748 -0
- package/src/proxy/act/index.ts +853 -0
- package/src/proxy/chunking-encrypted.ts +95 -0
- package/src/proxy/chunking.ts +65 -0
- package/src/proxy/download-data.ts +448 -0
- package/src/proxy/feed-manifest.ts +174 -0
- package/src/proxy/feeds/epochs/async-finder.ts +372 -0
- package/src/proxy/feeds/epochs/epoch.test.ts +249 -0
- package/src/proxy/feeds/epochs/epoch.ts +181 -0
- package/src/proxy/feeds/epochs/finder.ts +282 -0
- package/src/proxy/feeds/epochs/index.ts +73 -0
- package/src/proxy/feeds/epochs/integration.test.ts +1336 -0
- package/src/proxy/feeds/epochs/test-utils.ts +274 -0
- package/src/proxy/feeds/epochs/types.ts +128 -0
- package/src/proxy/feeds/epochs/updater.ts +192 -0
- package/src/proxy/feeds/epochs/utils.ts +62 -0
- package/src/proxy/feeds/index.ts +5 -0
- package/src/proxy/feeds/sequence/async-finder.ts +31 -0
- package/src/proxy/feeds/sequence/finder.ts +73 -0
- package/src/proxy/feeds/sequence/index.ts +54 -0
- package/src/proxy/feeds/sequence/integration.test.ts +966 -0
- package/src/proxy/feeds/sequence/types.ts +103 -0
- package/src/proxy/feeds/sequence/updater.ts +71 -0
- package/src/proxy/index.ts +5 -0
- package/src/proxy/manifest-builder.test.ts +427 -0
- package/src/proxy/manifest-builder.ts +679 -0
- package/src/proxy/mantaray-encrypted.ts +78 -0
- package/src/proxy/mantaray.ts +104 -0
- package/src/proxy/types.ts +32 -0
- package/src/proxy/upload-data.ts +189 -0
- package/src/proxy/upload-encrypted-data.ts +658 -0
- package/src/schemas.ts +299 -0
- package/src/storage/debounced-uploader.ts +192 -0
- package/src/storage/utilization-store.ts +397 -0
- package/src/swarm-id-client.test.ts +99 -0
- package/src/swarm-id-client.ts +3095 -0
- package/src/swarm-id-proxy.ts +3891 -0
- package/src/sync/index.ts +28 -0
- package/src/sync/restore-account.ts +90 -0
- package/src/sync/serialization.ts +39 -0
- package/src/sync/store-interfaces.ts +62 -0
- package/src/sync/sync-account.test.ts +302 -0
- package/src/sync/sync-account.ts +396 -0
- package/src/sync/types.ts +11 -0
- package/src/test-fixtures.ts +109 -0
- package/src/types.ts +1651 -0
- package/src/utils/account-state-snapshot.test.ts +595 -0
- package/src/utils/account-state-snapshot.ts +94 -0
- package/src/utils/backup-encryption.test.ts +442 -0
- package/src/utils/backup-encryption.ts +352 -0
- package/src/utils/batch-utilization.ts +1309 -0
- package/src/utils/constants.ts +20 -0
- package/src/utils/hex.ts +27 -0
- package/src/utils/key-derivation.ts +197 -0
- package/src/utils/storage-managers.ts +365 -0
- package/src/utils/ttl.ts +129 -0
- package/src/utils/url.test.ts +136 -0
- package/src/utils/url.ts +71 -0
- package/src/utils/versioned-storage.ts +323 -0
|
@@ -0,0 +1,1309 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Batch Utilization Tracking for Swarm Storage
|
|
3
|
+
*
|
|
4
|
+
* This module implements utilization tracking for mutable postage batches.
|
|
5
|
+
* It manages two counter arrays:
|
|
6
|
+
* - Utilization counters (local, uint8): Track slots 0-255 per bucket for utilization chunks
|
|
7
|
+
* - Data counters (on-chain, uint32): Track slots 256+ per bucket for data chunks
|
|
8
|
+
*
|
|
9
|
+
* The system uses pre-calculation to handle the circular dependency of storing
|
|
10
|
+
* utilization data that tracks the storage of itself.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import {
|
|
14
|
+
Stamper,
|
|
15
|
+
BatchId,
|
|
16
|
+
Topic,
|
|
17
|
+
Identifier,
|
|
18
|
+
type Bee,
|
|
19
|
+
EthAddress,
|
|
20
|
+
type EnvelopeWithBatchId,
|
|
21
|
+
} from "@ethersphere/bee-js"
|
|
22
|
+
import {
|
|
23
|
+
makeEncryptedContentAddressedChunk,
|
|
24
|
+
makeContentAddressedChunk,
|
|
25
|
+
type ContentAddressedChunk,
|
|
26
|
+
} from "../chunk"
|
|
27
|
+
import { Binary, type Chunk as CafeChunk } from "cafe-utility"
|
|
28
|
+
import type { UtilizationStoreDB } from "../storage/utilization-store"
|
|
29
|
+
import { calculateContentHash } from "../storage/utilization-store"
|
|
30
|
+
import { uploadSingleChunkWithEncryption } from "../proxy/upload-encrypted-data"
|
|
31
|
+
|
|
32
|
+
// ============================================================================
|
|
33
|
+
// Constants
|
|
34
|
+
// ============================================================================
|
|
35
|
+
|
|
36
|
+
/** Number of buckets in a postage batch (2^16) */
|
|
37
|
+
export const NUM_BUCKETS = 65536
|
|
38
|
+
|
|
39
|
+
/** Bucket depth parameter (determines bucket count) */
|
|
40
|
+
export const BUCKET_DEPTH = 16
|
|
41
|
+
|
|
42
|
+
/** Number of slots reserved per bucket for utilization chunks (0-3) */
|
|
43
|
+
export const UTILIZATION_SLOTS_PER_BUCKET = 4
|
|
44
|
+
|
|
45
|
+
/** Starting slot index for data chunks */
|
|
46
|
+
export const DATA_COUNTER_START = 4
|
|
47
|
+
|
|
48
|
+
/** Size of each chunk in bytes */
|
|
49
|
+
export const CHUNK_SIZE = 4096
|
|
50
|
+
|
|
51
|
+
/** Batch depth for N=256 slots per bucket with 65536 buckets */
|
|
52
|
+
export const DEFAULT_BATCH_DEPTH = 24
|
|
53
|
+
|
|
54
|
+
// ============================================================================
|
|
55
|
+
// Types
|
|
56
|
+
// ============================================================================
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Metadata for a single utilization chunk
|
|
60
|
+
*/
|
|
61
|
+
export interface ChunkMetadata {
|
|
62
|
+
/** Chunk index (0-63) */
|
|
63
|
+
index: number
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Content hash / CAC reference (same thing for content-addressed chunks)
|
|
67
|
+
* Empty string means never uploaded
|
|
68
|
+
*/
|
|
69
|
+
contentHash: string
|
|
70
|
+
|
|
71
|
+
/** Last upload timestamp */
|
|
72
|
+
lastUpload: number
|
|
73
|
+
|
|
74
|
+
/** Whether this chunk needs uploading */
|
|
75
|
+
dirty: boolean
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Utilization state for a postage batch
|
|
80
|
+
*
|
|
81
|
+
* This new version stores utilization data as 64 chunks on Swarm
|
|
82
|
+
* with IndexedDB caching for performance.
|
|
83
|
+
*/
|
|
84
|
+
export interface BatchUtilizationState {
|
|
85
|
+
/** Batch ID this state belongs to */
|
|
86
|
+
batchId: BatchId
|
|
87
|
+
|
|
88
|
+
/** Data counters (65,536 uint32 values) */
|
|
89
|
+
dataCounters: Uint32Array // [65536]
|
|
90
|
+
|
|
91
|
+
/** Metadata for each of the 64 utilization chunks */
|
|
92
|
+
chunks: ChunkMetadata[] // [64]
|
|
93
|
+
|
|
94
|
+
/** Topic for SOC storage */
|
|
95
|
+
topic: Topic
|
|
96
|
+
|
|
97
|
+
/** Last sync timestamp */
|
|
98
|
+
lastSync: number
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Chunk with bucket assignment
|
|
103
|
+
*/
|
|
104
|
+
export interface ChunkWithBucket {
|
|
105
|
+
chunk: ContentAddressedChunk
|
|
106
|
+
bucket: number
|
|
107
|
+
slot: number
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Result of calculating utilization update
|
|
112
|
+
*/
|
|
113
|
+
export interface UtilizationUpdate {
|
|
114
|
+
/** Updated data counters */
|
|
115
|
+
dataCounters: Uint32Array
|
|
116
|
+
|
|
117
|
+
/** Utilization chunks to upload */
|
|
118
|
+
utilizationChunks: ChunkWithBucket[]
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// ============================================================================
|
|
122
|
+
// Bucket Mapping
|
|
123
|
+
// ============================================================================
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Calculate which bucket a chunk belongs to based on its address.
|
|
127
|
+
* Uses the first 2 bytes of the chunk address as a big-endian uint16.
|
|
128
|
+
*
|
|
129
|
+
* This matches bee-js Stamper implementation.
|
|
130
|
+
*
|
|
131
|
+
* @param chunkAddress - The chunk's content address (32 bytes)
|
|
132
|
+
* @returns Bucket index (0-65535)
|
|
133
|
+
*/
|
|
134
|
+
export function toBucket(chunkAddress: Uint8Array): number {
|
|
135
|
+
if (chunkAddress.length < 2) {
|
|
136
|
+
throw new Error("Chunk address must be at least 2 bytes")
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// First 2 bytes as big-endian uint16
|
|
140
|
+
return (chunkAddress[0] << 8) | chunkAddress[1]
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Calculate bucket assignments for multiple chunks
|
|
145
|
+
*/
|
|
146
|
+
export function assignChunksToBuckets(
|
|
147
|
+
chunks: ContentAddressedChunk[],
|
|
148
|
+
): ChunkWithBucket[] {
|
|
149
|
+
return chunks.map((chunk) => {
|
|
150
|
+
const address = chunk.address.toUint8Array()
|
|
151
|
+
const bucket = toBucket(address)
|
|
152
|
+
|
|
153
|
+
return {
|
|
154
|
+
chunk,
|
|
155
|
+
bucket,
|
|
156
|
+
slot: 0, // Will be assigned later
|
|
157
|
+
}
|
|
158
|
+
})
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// ============================================================================
|
|
162
|
+
// Chunk Mapping for Swarm Storage
|
|
163
|
+
// ============================================================================
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Number of utilization chunks (64 chunks of 4KB each = 262KB total)
|
|
167
|
+
* Each chunk contains 1,024 buckets (1,024 × 4 bytes = 4,096 bytes)
|
|
168
|
+
*/
|
|
169
|
+
export const NUM_UTILIZATION_CHUNKS = 64
|
|
170
|
+
export const BUCKETS_PER_CHUNK = 1024
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Calculate which utilization chunk a bucket belongs to
|
|
174
|
+
* @param bucketIndex - Bucket index (0-65535)
|
|
175
|
+
* @returns Chunk index (0-63)
|
|
176
|
+
*/
|
|
177
|
+
export function getChunkIndexForBucket(bucketIndex: number): number {
|
|
178
|
+
if (bucketIndex < 0 || bucketIndex >= NUM_BUCKETS) {
|
|
179
|
+
throw new Error(
|
|
180
|
+
`Invalid bucket index: ${bucketIndex} (must be 0-${NUM_BUCKETS - 1})`,
|
|
181
|
+
)
|
|
182
|
+
}
|
|
183
|
+
return Math.floor(bucketIndex / BUCKETS_PER_CHUNK)
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Calculate the offset of a bucket within its chunk
|
|
188
|
+
* @param bucketIndex - Bucket index (0-65535)
|
|
189
|
+
* @returns Offset within chunk (0-1023)
|
|
190
|
+
*/
|
|
191
|
+
export function getBucketOffsetInChunk(bucketIndex: number): number {
|
|
192
|
+
if (bucketIndex < 0 || bucketIndex >= NUM_BUCKETS) {
|
|
193
|
+
throw new Error(
|
|
194
|
+
`Invalid bucket index: ${bucketIndex} (must be 0-${NUM_BUCKETS - 1})`,
|
|
195
|
+
)
|
|
196
|
+
}
|
|
197
|
+
return bucketIndex % BUCKETS_PER_CHUNK
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Extract a 4KB chunk from the dataCounters array
|
|
202
|
+
* @param dataCounters - Full array of 65,536 counters
|
|
203
|
+
* @param chunkIndex - Index of chunk to extract (0-63)
|
|
204
|
+
* @returns 4KB Uint8Array containing serialized counters for this chunk
|
|
205
|
+
*/
|
|
206
|
+
export function extractChunk(
|
|
207
|
+
dataCounters: Uint32Array,
|
|
208
|
+
chunkIndex: number,
|
|
209
|
+
): Uint8Array {
|
|
210
|
+
if (chunkIndex < 0 || chunkIndex >= NUM_UTILIZATION_CHUNKS) {
|
|
211
|
+
throw new Error(
|
|
212
|
+
`Invalid chunk index: ${chunkIndex} (must be 0-${NUM_UTILIZATION_CHUNKS - 1})`,
|
|
213
|
+
)
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
const startBucket = chunkIndex * BUCKETS_PER_CHUNK
|
|
217
|
+
const endBucket = startBucket + BUCKETS_PER_CHUNK
|
|
218
|
+
|
|
219
|
+
// Extract the slice of counters for this chunk
|
|
220
|
+
const chunkCounters = dataCounters.slice(startBucket, endBucket)
|
|
221
|
+
|
|
222
|
+
// Serialize to little-endian bytes
|
|
223
|
+
return serializeUint32Array(chunkCounters)
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
/**
|
|
227
|
+
* Merge a 4KB chunk back into the dataCounters array
|
|
228
|
+
* @param dataCounters - Full array of 65,536 counters (modified in place)
|
|
229
|
+
* @param chunkIndex - Index of chunk to merge (0-63)
|
|
230
|
+
* @param chunkData - 4KB Uint8Array containing serialized counters
|
|
231
|
+
*/
|
|
232
|
+
export function mergeChunk(
|
|
233
|
+
dataCounters: Uint32Array,
|
|
234
|
+
chunkIndex: number,
|
|
235
|
+
chunkData: Uint8Array,
|
|
236
|
+
): void {
|
|
237
|
+
if (chunkIndex < 0 || chunkIndex >= NUM_UTILIZATION_CHUNKS) {
|
|
238
|
+
throw new Error(
|
|
239
|
+
`Invalid chunk index: ${chunkIndex} (must be 0-${NUM_UTILIZATION_CHUNKS - 1})`,
|
|
240
|
+
)
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
if (chunkData.length !== CHUNK_SIZE) {
|
|
244
|
+
throw new Error(
|
|
245
|
+
`Invalid chunk data length: ${chunkData.length} (expected ${CHUNK_SIZE})`,
|
|
246
|
+
)
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// Deserialize the chunk data
|
|
250
|
+
const chunkCounters = deserializeUint32Array(chunkData)
|
|
251
|
+
|
|
252
|
+
if (chunkCounters.length !== BUCKETS_PER_CHUNK) {
|
|
253
|
+
throw new Error(
|
|
254
|
+
`Invalid chunk counters length: ${chunkCounters.length} (expected ${BUCKETS_PER_CHUNK})`,
|
|
255
|
+
)
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// Merge back into dataCounters
|
|
259
|
+
const startBucket = chunkIndex * BUCKETS_PER_CHUNK
|
|
260
|
+
dataCounters.set(chunkCounters, startBucket)
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// ============================================================================
|
|
264
|
+
// Dirty Chunk Tracking
|
|
265
|
+
// ============================================================================
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Tracks which utilization chunks have been modified and need uploading
|
|
269
|
+
*/
|
|
270
|
+
export class DirtyChunkTracker {
|
|
271
|
+
private dirtyChunks: Set<number>
|
|
272
|
+
|
|
273
|
+
constructor() {
|
|
274
|
+
this.dirtyChunks = new Set()
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/**
|
|
278
|
+
* Mark a bucket as dirty (marks its containing chunk)
|
|
279
|
+
* @param bucketIndex - Bucket index (0-65535)
|
|
280
|
+
*/
|
|
281
|
+
markDirty(bucketIndex: number): void {
|
|
282
|
+
const chunkIndex = getChunkIndexForBucket(bucketIndex)
|
|
283
|
+
this.dirtyChunks.add(chunkIndex)
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
/**
|
|
287
|
+
* Mark a chunk as clean (uploaded successfully)
|
|
288
|
+
* @param chunkIndex - Chunk index (0-63)
|
|
289
|
+
*/
|
|
290
|
+
markClean(chunkIndex: number): void {
|
|
291
|
+
this.dirtyChunks.delete(chunkIndex)
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
/**
|
|
295
|
+
* Get array of dirty chunk indices
|
|
296
|
+
* @returns Sorted array of chunk indices that need uploading
|
|
297
|
+
*/
|
|
298
|
+
getDirtyChunks(): number[] {
|
|
299
|
+
return Array.from(this.dirtyChunks).sort((a, b) => a - b)
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
/**
|
|
303
|
+
* Check if there are any dirty chunks
|
|
304
|
+
* @returns true if there are chunks waiting to be uploaded
|
|
305
|
+
*/
|
|
306
|
+
hasDirtyChunks(): boolean {
|
|
307
|
+
return this.dirtyChunks.size > 0
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
/**
|
|
311
|
+
* Clear all dirty markers
|
|
312
|
+
*/
|
|
313
|
+
clear(): void {
|
|
314
|
+
this.dirtyChunks.clear()
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Get number of dirty chunks
|
|
319
|
+
* @returns Count of chunks waiting to be uploaded
|
|
320
|
+
*/
|
|
321
|
+
get count(): number {
|
|
322
|
+
return this.dirtyChunks.size
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// ============================================================================
|
|
327
|
+
// SOC Identifier Generation for Swarm Storage
|
|
328
|
+
// ============================================================================
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Create a topic for batch utilization storage
|
|
332
|
+
* Topic format: `batch-utilization:{batchId}`
|
|
333
|
+
*
|
|
334
|
+
* @param batchId - Batch ID
|
|
335
|
+
* @returns Topic for this batch's utilization data
|
|
336
|
+
*/
|
|
337
|
+
export function makeBatchUtilizationTopic(batchId: BatchId): Topic {
|
|
338
|
+
const topicString = `batch-utilization:${batchId.toHex()}`
|
|
339
|
+
const encoder = new TextEncoder()
|
|
340
|
+
const hash = Binary.keccak256(encoder.encode(topicString))
|
|
341
|
+
return new Topic(hash)
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
/**
|
|
345
|
+
* Create an identifier for a specific utilization chunk
|
|
346
|
+
* Identifier: Keccak256(topic || chunkIndex)
|
|
347
|
+
*
|
|
348
|
+
* @param topic - Batch utilization topic
|
|
349
|
+
* @param chunkIndex - Chunk index (0-63)
|
|
350
|
+
* @returns Identifier for this chunk
|
|
351
|
+
*/
|
|
352
|
+
export function makeChunkIdentifier(
|
|
353
|
+
topic: Topic,
|
|
354
|
+
chunkIndex: number,
|
|
355
|
+
): Identifier {
|
|
356
|
+
if (chunkIndex < 0 || chunkIndex >= NUM_UTILIZATION_CHUNKS) {
|
|
357
|
+
throw new Error(
|
|
358
|
+
`Invalid chunk index: ${chunkIndex} (must be 0-${NUM_UTILIZATION_CHUNKS - 1})`,
|
|
359
|
+
)
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// Encode chunk index as 32-bit big-endian
|
|
363
|
+
const chunkIndexBytes = new Uint8Array(4)
|
|
364
|
+
const view = new DataView(chunkIndexBytes.buffer)
|
|
365
|
+
view.setUint32(0, chunkIndex, false) // false = big-endian
|
|
366
|
+
|
|
367
|
+
// Hash: topic || chunkIndex
|
|
368
|
+
const hash = Binary.keccak256(
|
|
369
|
+
Binary.concatBytes(topic.toUint8Array(), chunkIndexBytes),
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
return new Identifier(hash)
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
// ============================================================================
|
|
376
|
+
// Chunk Upload/Download for Swarm Storage
|
|
377
|
+
// ============================================================================
|
|
378
|
+
|
|
379
|
+
/**
|
|
380
|
+
* Upload an encrypted utilization chunk to Swarm as CAC
|
|
381
|
+
*
|
|
382
|
+
* Architecture: Just upload encrypted chunk data as CAC (immutable)
|
|
383
|
+
*
|
|
384
|
+
* @param bee - Bee client instance
|
|
385
|
+
* @param batchId - Batch ID (for logging)
|
|
386
|
+
* @param postageBatchId - Postage stamp batch ID
|
|
387
|
+
* @param chunkIndex - Chunk index (0-63)
|
|
388
|
+
* @param data - Chunk data to upload (4KB)
|
|
389
|
+
* @param encryptionKey - Encryption key (32 bytes)
|
|
390
|
+
* @returns CAC reference
|
|
391
|
+
*/
|
|
392
|
+
export async function uploadUtilizationChunk(
|
|
393
|
+
bee: Bee,
|
|
394
|
+
stamper: Stamper,
|
|
395
|
+
chunkIndex: number,
|
|
396
|
+
data: Uint8Array,
|
|
397
|
+
encryptionKey: Uint8Array,
|
|
398
|
+
): Promise<Uint8Array> {
|
|
399
|
+
void chunkIndex // Parameter kept for API compatibility
|
|
400
|
+
// Calculate CAC reference first (before upload)
|
|
401
|
+
const encryptedChunk = makeEncryptedContentAddressedChunk(data, encryptionKey)
|
|
402
|
+
const cacReference = encryptedChunk.address.toUint8Array()
|
|
403
|
+
|
|
404
|
+
const tagResponse = await bee.createTag()
|
|
405
|
+
const tag = tagResponse.uid
|
|
406
|
+
|
|
407
|
+
// Upload using unified interface (with deferred: false for fast return)
|
|
408
|
+
await uploadSingleChunkWithEncryption(
|
|
409
|
+
bee,
|
|
410
|
+
stamper,
|
|
411
|
+
data,
|
|
412
|
+
encryptionKey,
|
|
413
|
+
{ deferred: false, tag }, // fast, non-blocking upload
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
return cacReference
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
/**
|
|
420
|
+
* Download and decrypt a utilization chunk from Swarm by CAC reference
|
|
421
|
+
*
|
|
422
|
+
* @param bee - Bee client instance
|
|
423
|
+
* @param cacReference - CAC reference (32 bytes)
|
|
424
|
+
* @param chunkIndex - Chunk index (for logging)
|
|
425
|
+
* @param encryptionKey - Encryption key (32 bytes)
|
|
426
|
+
* @returns Decrypted chunk data (4KB) or undefined if not found
|
|
427
|
+
*/
|
|
428
|
+
export async function downloadUtilizationChunk(
|
|
429
|
+
bee: Bee,
|
|
430
|
+
cacReference: Uint8Array,
|
|
431
|
+
chunkIndex: number,
|
|
432
|
+
encryptionKey: Uint8Array,
|
|
433
|
+
): Promise<Uint8Array | undefined> {
|
|
434
|
+
if (encryptionKey.length !== 32) {
|
|
435
|
+
throw new Error(
|
|
436
|
+
`Invalid encryption key length: ${encryptionKey.length} (expected 32)`,
|
|
437
|
+
)
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
if (cacReference.length !== 32) {
|
|
441
|
+
throw new Error(
|
|
442
|
+
`Invalid CAC reference length: ${cacReference.length} (expected 32)`,
|
|
443
|
+
)
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
try {
|
|
447
|
+
// Download encrypted CAC from Swarm
|
|
448
|
+
const cacUrl = `${bee.url}/chunks/${Binary.uint8ArrayToHex(cacReference)}`
|
|
449
|
+
|
|
450
|
+
const cacResponse = await fetch(cacUrl, {
|
|
451
|
+
method: "GET",
|
|
452
|
+
})
|
|
453
|
+
|
|
454
|
+
if (cacResponse.status === 404) {
|
|
455
|
+
console.warn(
|
|
456
|
+
`[UtilChunk] CAC not found for chunk ${chunkIndex} (reference: ${Binary.uint8ArrayToHex(cacReference).substring(0, 16)}...)`,
|
|
457
|
+
)
|
|
458
|
+
return undefined
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
if (!cacResponse.ok) {
|
|
462
|
+
const text = await cacResponse.text()
|
|
463
|
+
throw new Error(
|
|
464
|
+
`Failed to download CAC: ${cacResponse.status} ${cacResponse.statusText}: ${text}`,
|
|
465
|
+
)
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
// Get the encrypted CAC data
|
|
469
|
+
void (await cacResponse.arrayBuffer())
|
|
470
|
+
|
|
471
|
+
// Decrypt the CAC data
|
|
472
|
+
// TODO: Implement decryption
|
|
473
|
+
// For now, this is a placeholder
|
|
474
|
+
throw new Error(
|
|
475
|
+
"Decryption not yet implemented - need to add decryptChunk function",
|
|
476
|
+
)
|
|
477
|
+
} catch (error) {
|
|
478
|
+
if (
|
|
479
|
+
error instanceof Error &&
|
|
480
|
+
error.message.includes("Decryption not yet implemented")
|
|
481
|
+
) {
|
|
482
|
+
throw error
|
|
483
|
+
}
|
|
484
|
+
return undefined
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
// ============================================================================
|
|
489
|
+
// Serialization
|
|
490
|
+
// ============================================================================
|
|
491
|
+
|
|
492
|
+
/**
|
|
493
|
+
* Serialize Uint32Array to bytes (little-endian)
|
|
494
|
+
*/
|
|
495
|
+
export function serializeUint32Array(arr: Uint32Array): Uint8Array {
|
|
496
|
+
const buffer = new ArrayBuffer(arr.length * 4)
|
|
497
|
+
const view = new DataView(buffer)
|
|
498
|
+
|
|
499
|
+
for (let i = 0; i < arr.length; i++) {
|
|
500
|
+
view.setUint32(i * 4, arr[i], true) // true = little-endian
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
return new Uint8Array(buffer)
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
/**
|
|
507
|
+
* Deserialize bytes to Uint32Array (little-endian)
|
|
508
|
+
*/
|
|
509
|
+
export function deserializeUint32Array(bytes: Uint8Array): Uint32Array {
|
|
510
|
+
if (bytes.length % 4 !== 0) {
|
|
511
|
+
throw new Error("Byte array length must be a multiple of 4")
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
const arr = new Uint32Array(bytes.length / 4)
|
|
515
|
+
const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength)
|
|
516
|
+
|
|
517
|
+
for (let i = 0; i < arr.length; i++) {
|
|
518
|
+
arr[i] = view.getUint32(i * 4, true) // true = little-endian
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
return arr
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
/**
|
|
525
|
+
* Split data into 4KB chunks
|
|
526
|
+
*/
|
|
527
|
+
export function splitIntoChunks(data: Uint8Array): ContentAddressedChunk[] {
|
|
528
|
+
const chunks: ContentAddressedChunk[] = []
|
|
529
|
+
|
|
530
|
+
for (let i = 0; i < data.length; i += CHUNK_SIZE) {
|
|
531
|
+
const end = Math.min(i + CHUNK_SIZE, data.length)
|
|
532
|
+
const chunkData = data.slice(i, end)
|
|
533
|
+
|
|
534
|
+
// Pad last chunk if needed
|
|
535
|
+
const paddedData = new Uint8Array(CHUNK_SIZE)
|
|
536
|
+
paddedData.set(chunkData)
|
|
537
|
+
|
|
538
|
+
chunks.push(makeContentAddressedChunk(paddedData))
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
return chunks
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
/**
|
|
545
|
+
* Reconstruct data from chunks
|
|
546
|
+
*/
|
|
547
|
+
export function reconstructFromChunks(
|
|
548
|
+
chunks: ContentAddressedChunk[],
|
|
549
|
+
originalLength: number,
|
|
550
|
+
): Uint8Array {
|
|
551
|
+
const result = new Uint8Array(originalLength)
|
|
552
|
+
let offset = 0
|
|
553
|
+
|
|
554
|
+
for (const chunk of chunks) {
|
|
555
|
+
const data = chunk.data
|
|
556
|
+
const copyLength = Math.min(data.length, originalLength - offset)
|
|
557
|
+
result.set(data.slice(0, copyLength), offset)
|
|
558
|
+
offset += copyLength
|
|
559
|
+
|
|
560
|
+
if (offset >= originalLength) break
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
return result
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
// ============================================================================
|
|
567
|
+
// Utilization State Management
|
|
568
|
+
// ============================================================================
|
|
569
|
+
|
|
570
|
+
/**
|
|
571
|
+
* Initialize a new batch utilization state
|
|
572
|
+
*
|
|
573
|
+
* Reserves slots 0-3 per bucket for utilization metadata chunks,
|
|
574
|
+
* and starts data chunks at slot 4 (DATA_COUNTER_START).
|
|
575
|
+
*
|
|
576
|
+
* With 65,536 buckets and ~64 utilization chunks, the probability
|
|
577
|
+
* of any bucket getting 4+ utilization chunks is negligible (< 0.0000001%).
|
|
578
|
+
*/
|
|
579
|
+
export function initializeBatchUtilization(
|
|
580
|
+
batchId: BatchId,
|
|
581
|
+
): BatchUtilizationState {
|
|
582
|
+
const dataCounters = new Uint32Array(NUM_BUCKETS)
|
|
583
|
+
|
|
584
|
+
// Initialize data counters to start at slot 4
|
|
585
|
+
// Slots 0-3 are reserved for utilization metadata chunks
|
|
586
|
+
dataCounters.fill(DATA_COUNTER_START)
|
|
587
|
+
|
|
588
|
+
// Initialize metadata for all 64 chunks
|
|
589
|
+
const chunks: ChunkMetadata[] = []
|
|
590
|
+
for (let i = 0; i < NUM_UTILIZATION_CHUNKS; i++) {
|
|
591
|
+
chunks.push({
|
|
592
|
+
index: i,
|
|
593
|
+
contentHash: "", // Will be set on first upload
|
|
594
|
+
lastUpload: 0, // Never uploaded
|
|
595
|
+
dirty: true, // Mark as dirty for initial upload
|
|
596
|
+
})
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
// Create topic for this batch
|
|
600
|
+
const topic = makeBatchUtilizationTopic(batchId)
|
|
601
|
+
|
|
602
|
+
return {
|
|
603
|
+
batchId,
|
|
604
|
+
dataCounters,
|
|
605
|
+
chunks,
|
|
606
|
+
topic,
|
|
607
|
+
lastSync: Date.now(),
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
/**
|
|
612
|
+
* Calculate max slots per bucket based on batch depth
|
|
613
|
+
*/
|
|
614
|
+
export function calculateMaxSlotsPerBucket(batchDepth: number): number {
|
|
615
|
+
return Math.pow(2, batchDepth - BUCKET_DEPTH)
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
/**
|
|
619
|
+
* Check if a bucket has capacity for more chunks
|
|
620
|
+
*/
|
|
621
|
+
export function hasBucketCapacity(
|
|
622
|
+
dataCounter: number,
|
|
623
|
+
batchDepth: number,
|
|
624
|
+
): boolean {
|
|
625
|
+
const maxSlots = calculateMaxSlotsPerBucket(batchDepth)
|
|
626
|
+
return dataCounter < maxSlots
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
// ============================================================================
|
|
630
|
+
// Pre-calculation Algorithm
|
|
631
|
+
// ============================================================================
|
|
632
|
+
|
|
633
|
+
/**
|
|
634
|
+
* Pre-calculate utilization update after writing data chunks.
|
|
635
|
+
*
|
|
636
|
+
* This solves the circular dependency problem:
|
|
637
|
+
* 1. Assign buckets/slots to data chunks
|
|
638
|
+
* 2. Update data counters
|
|
639
|
+
* 3. Serialize data counters into utilization chunks
|
|
640
|
+
* 4. Calculate where utilization chunks will land
|
|
641
|
+
* 5. Assign slots 0-N to utilization chunks per bucket
|
|
642
|
+
*
|
|
643
|
+
* Note: Utilization chunks always start from slot 0 since mutable stamps
|
|
644
|
+
* allow overwriting. No need to track previous positions.
|
|
645
|
+
*
|
|
646
|
+
* @param state - Current utilization state
|
|
647
|
+
* @param dataChunks - Data chunks to be written
|
|
648
|
+
* @param batchDepth - Batch depth parameter
|
|
649
|
+
* @returns Updated state and utilization chunks to upload
|
|
650
|
+
*/
|
|
651
|
+
export function calculateUtilizationUpdate(
|
|
652
|
+
state: BatchUtilizationState,
|
|
653
|
+
dataChunks: ContentAddressedChunk[],
|
|
654
|
+
batchDepth: number,
|
|
655
|
+
): UtilizationUpdate {
|
|
656
|
+
// Step 1: Copy current data counters (immutable update)
|
|
657
|
+
const newDataCounters = new Uint32Array(state.dataCounters)
|
|
658
|
+
|
|
659
|
+
// Step 2: Assign buckets and slots to data chunks
|
|
660
|
+
const dataChunksWithBuckets: ChunkWithBucket[] = []
|
|
661
|
+
|
|
662
|
+
for (const chunk of dataChunks) {
|
|
663
|
+
const bucket = toBucket(chunk.address.toUint8Array())
|
|
664
|
+
const slot = newDataCounters[bucket]
|
|
665
|
+
|
|
666
|
+
// Check capacity
|
|
667
|
+
if (!hasBucketCapacity(slot, batchDepth)) {
|
|
668
|
+
throw new Error(`Bucket ${bucket} is full (slot ${slot})`)
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
dataChunksWithBuckets.push({ chunk, bucket, slot })
|
|
672
|
+
newDataCounters[bucket]++
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
// Step 3: Serialize updated data counters
|
|
676
|
+
const serialized = serializeUint32Array(newDataCounters)
|
|
677
|
+
const utilizationChunksRaw = splitIntoChunks(serialized)
|
|
678
|
+
|
|
679
|
+
// Step 4: Calculate bucket assignments for utilization chunks
|
|
680
|
+
// Count chunks per bucket for THIS update only (start from 0)
|
|
681
|
+
const bucketChunkCount = new Uint32Array(NUM_BUCKETS)
|
|
682
|
+
const utilizationChunks: ChunkWithBucket[] = []
|
|
683
|
+
|
|
684
|
+
for (const chunk of utilizationChunksRaw) {
|
|
685
|
+
const bucket = toBucket(chunk.address.toUint8Array())
|
|
686
|
+
const slot = bucketChunkCount[bucket] // Start from 0 each time
|
|
687
|
+
|
|
688
|
+
utilizationChunks.push({ chunk, bucket, slot })
|
|
689
|
+
bucketChunkCount[bucket]++
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
return {
|
|
693
|
+
dataCounters: newDataCounters,
|
|
694
|
+
utilizationChunks,
|
|
695
|
+
}
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
// ============================================================================
|
|
699
|
+
// Stamper Integration
|
|
700
|
+
// ============================================================================
|
|
701
|
+
|
|
702
|
+
/**
|
|
703
|
+
* Create a Stamper with custom bucket state for mutable stamping
|
|
704
|
+
*
|
|
705
|
+
* @param privateKey - Private key for signing
|
|
706
|
+
* @param batchId - Batch ID
|
|
707
|
+
* @param bucketState - Custom bucket heights (for resuming or mutable overwrites)
|
|
708
|
+
* @param batchDepth - Batch depth parameter
|
|
709
|
+
*/
|
|
710
|
+
export function createStamper(
|
|
711
|
+
privateKey: Uint8Array | string,
|
|
712
|
+
batchId: BatchId,
|
|
713
|
+
bucketState: Uint32Array,
|
|
714
|
+
batchDepth: number,
|
|
715
|
+
): Stamper {
|
|
716
|
+
return Stamper.fromState(privateKey, batchId, bucketState, batchDepth)
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
/**
|
|
720
|
+
* Prepare bucket state for stamping chunks with specific slots
|
|
721
|
+
*
|
|
722
|
+
* @param chunksWithBuckets - Chunks with assigned buckets and slots
|
|
723
|
+
* @returns Bucket state array for Stamper
|
|
724
|
+
*/
|
|
725
|
+
export function prepareBucketState(
|
|
726
|
+
chunksWithBuckets: ChunkWithBucket[],
|
|
727
|
+
): Uint32Array {
|
|
728
|
+
const bucketState = new Uint32Array(NUM_BUCKETS)
|
|
729
|
+
|
|
730
|
+
// Set each bucket height to the slot we want to write to
|
|
731
|
+
for (const { bucket, slot } of chunksWithBuckets) {
|
|
732
|
+
// Use the highest slot we need for this bucket
|
|
733
|
+
bucketState[bucket] = Math.max(bucketState[bucket], slot)
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
return bucketState
|
|
737
|
+
}
|
|
738
|
+
|
|
739
|
+
/**
|
|
740
|
+
* Convert utilization data counters to Stamper bucket state
|
|
741
|
+
*
|
|
742
|
+
* Each dataCounter[bucket] represents the number of slots used in that bucket.
|
|
743
|
+
* The Stamper's bucket state should start at the next available slot.
|
|
744
|
+
*
|
|
745
|
+
* @param dataCounters - Current utilization counters (65536 buckets)
|
|
746
|
+
* @returns Bucket state array for Stamper (65536 entries)
|
|
747
|
+
*/
|
|
748
|
+
export function utilizationToBucketState(
|
|
749
|
+
dataCounters: Uint32Array,
|
|
750
|
+
): Uint32Array {
|
|
751
|
+
const bucketState = new Uint32Array(NUM_BUCKETS)
|
|
752
|
+
|
|
753
|
+
for (let bucket = 0; bucket < NUM_BUCKETS; bucket++) {
|
|
754
|
+
// Each counter represents slots used
|
|
755
|
+
// Stamper should start at the next slot
|
|
756
|
+
bucketState[bucket] = dataCounters[bucket]
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
return bucketState
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
// ============================================================================
|
|
763
|
+
// Storage Operations (Async with Cache Hierarchy)
|
|
764
|
+
// ============================================================================
|
|
765
|
+
|
|
766
|
+
/**
|
|
767
|
+
* Load utilization state with cache hierarchy
|
|
768
|
+
*
|
|
769
|
+
* Load order:
|
|
770
|
+
* 1. Try IndexedDB cache (all 64 chunks)
|
|
771
|
+
* 2. If incomplete, download missing chunks from Swarm
|
|
772
|
+
* 3. If not found, initialize new state
|
|
773
|
+
* 4. Cache downloaded chunks in IndexedDB
|
|
774
|
+
*
|
|
775
|
+
* @param batchId - Batch ID
|
|
776
|
+
* @param options - Load options with bee, owner, encryption key, and cache
|
|
777
|
+
* @returns Utilization state
|
|
778
|
+
*/
|
|
779
|
+
export async function loadUtilizationState(
|
|
780
|
+
batchId: BatchId,
|
|
781
|
+
options: {
|
|
782
|
+
bee: Bee
|
|
783
|
+
owner: EthAddress
|
|
784
|
+
encryptionKey: Uint8Array
|
|
785
|
+
cache: UtilizationStoreDB
|
|
786
|
+
},
|
|
787
|
+
): Promise<BatchUtilizationState> {
|
|
788
|
+
const { cache } = options
|
|
789
|
+
// TODO: Use bee, owner, encryptionKey when state feed is implemented
|
|
790
|
+
const { bee: _bee, owner: _owner, encryptionKey: _encryptionKey } = options
|
|
791
|
+
|
|
792
|
+
// Step 1: Try loading from IndexedDB cache
|
|
793
|
+
const cachedChunks = await cache.getAllChunks(batchId.toHex())
|
|
794
|
+
|
|
795
|
+
// Step 2: If we have all chunks in cache, reconstruct state
|
|
796
|
+
if (cachedChunks.length === NUM_UTILIZATION_CHUNKS) {
|
|
797
|
+
try {
|
|
798
|
+
const dataCounters = new Uint32Array(NUM_BUCKETS)
|
|
799
|
+
const chunks: ChunkMetadata[] = []
|
|
800
|
+
|
|
801
|
+
// Reconstruct dataCounters from cached chunks
|
|
802
|
+
for (const cached of cachedChunks) {
|
|
803
|
+
mergeChunk(dataCounters, cached.chunkIndex, cached.data)
|
|
804
|
+
|
|
805
|
+
chunks.push({
|
|
806
|
+
index: cached.chunkIndex,
|
|
807
|
+
contentHash: cached.contentHash,
|
|
808
|
+
lastUpload: cached.lastAccess, // Use lastAccess as lastUpload
|
|
809
|
+
dirty: false, // Not dirty if loaded from cache
|
|
810
|
+
})
|
|
811
|
+
}
|
|
812
|
+
|
|
813
|
+
const topic = makeBatchUtilizationTopic(batchId)
|
|
814
|
+
|
|
815
|
+
return {
|
|
816
|
+
batchId,
|
|
817
|
+
dataCounters,
|
|
818
|
+
chunks,
|
|
819
|
+
topic,
|
|
820
|
+
lastSync: Date.now(),
|
|
821
|
+
}
|
|
822
|
+
} catch (error) {
|
|
823
|
+
console.warn(`[BatchUtil] Failed to reconstruct from cache:`, error)
|
|
824
|
+
// Fall through to Swarm download
|
|
825
|
+
}
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
// Step 3: Download missing chunks from Swarm
|
|
829
|
+
|
|
830
|
+
const dataCounters = new Uint32Array(NUM_BUCKETS)
|
|
831
|
+
const chunks: ChunkMetadata[] = []
|
|
832
|
+
|
|
833
|
+
for (let i = 0; i < NUM_UTILIZATION_CHUNKS; i++) {
|
|
834
|
+
// Check if we have this chunk in cache
|
|
835
|
+
const cached = cachedChunks.find((c) => c.chunkIndex === i)
|
|
836
|
+
|
|
837
|
+
if (cached) {
|
|
838
|
+
// Use cached chunk
|
|
839
|
+
mergeChunk(dataCounters, i, cached.data)
|
|
840
|
+
|
|
841
|
+
chunks.push({
|
|
842
|
+
index: i,
|
|
843
|
+
contentHash: cached.contentHash,
|
|
844
|
+
lastUpload: cached.lastAccess,
|
|
845
|
+
dirty: false,
|
|
846
|
+
})
|
|
847
|
+
continue
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
// TODO: Download from Swarm using state feed (not yet implemented)
|
|
851
|
+
// For now, initialize with defaults
|
|
852
|
+
const defaultCounters = new Uint32Array(BUCKETS_PER_CHUNK)
|
|
853
|
+
defaultCounters.fill(DATA_COUNTER_START)
|
|
854
|
+
|
|
855
|
+
const chunkData = serializeUint32Array(defaultCounters)
|
|
856
|
+
mergeChunk(dataCounters, i, chunkData)
|
|
857
|
+
|
|
858
|
+
chunks.push({
|
|
859
|
+
index: i,
|
|
860
|
+
contentHash: "", // Will be set on first upload
|
|
861
|
+
lastUpload: 0,
|
|
862
|
+
dirty: true, // Mark as dirty for upload
|
|
863
|
+
})
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
const topic = makeBatchUtilizationTopic(batchId)
|
|
867
|
+
|
|
868
|
+
return {
|
|
869
|
+
batchId,
|
|
870
|
+
dataCounters,
|
|
871
|
+
chunks,
|
|
872
|
+
topic,
|
|
873
|
+
lastSync: Date.now(),
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
/**
|
|
878
|
+
* Save utilization state with incremental upload
|
|
879
|
+
*
|
|
880
|
+
* Only uploads dirty chunks to minimize network traffic.
|
|
881
|
+
* Updates IndexedDB cache with new chunk data.
|
|
882
|
+
*
|
|
883
|
+
* @param state - Current utilization state (modified in place)
|
|
884
|
+
* @param options - Save options
|
|
885
|
+
*/
|
|
886
|
+
export async function saveUtilizationState(
|
|
887
|
+
state: BatchUtilizationState,
|
|
888
|
+
options: {
|
|
889
|
+
bee: Bee
|
|
890
|
+
stamper: Stamper
|
|
891
|
+
encryptionKey: Uint8Array
|
|
892
|
+
cache: UtilizationStoreDB
|
|
893
|
+
tracker: DirtyChunkTracker
|
|
894
|
+
},
|
|
895
|
+
): Promise<void> {
|
|
896
|
+
const { bee, stamper, encryptionKey, cache, tracker } = options
|
|
897
|
+
|
|
898
|
+
// Get dirty chunks from tracker
|
|
899
|
+
const dirtyChunkIndices = tracker.getDirtyChunks()
|
|
900
|
+
|
|
901
|
+
if (dirtyChunkIndices.length === 0) {
|
|
902
|
+
return
|
|
903
|
+
}
|
|
904
|
+
|
|
905
|
+
for (const chunkIndex of dirtyChunkIndices) {
|
|
906
|
+
const chunkMetadata = state.chunks[chunkIndex]
|
|
907
|
+
|
|
908
|
+
// Extract chunk data from dataCounters
|
|
909
|
+
const chunkData = extractChunk(state.dataCounters, chunkIndex)
|
|
910
|
+
|
|
911
|
+
try {
|
|
912
|
+
// Upload to Swarm as encrypted CAC
|
|
913
|
+
const cacReference = await uploadUtilizationChunk(
|
|
914
|
+
bee,
|
|
915
|
+
stamper,
|
|
916
|
+
chunkIndex,
|
|
917
|
+
chunkData,
|
|
918
|
+
encryptionKey,
|
|
919
|
+
)
|
|
920
|
+
|
|
921
|
+
const cacReferenceHex = Binary.uint8ArrayToHex(cacReference)
|
|
922
|
+
|
|
923
|
+
// Skip if reference unchanged (deduplication)
|
|
924
|
+
if (chunkMetadata.contentHash === cacReferenceHex) {
|
|
925
|
+
tracker.markClean(chunkIndex)
|
|
926
|
+
continue
|
|
927
|
+
}
|
|
928
|
+
|
|
929
|
+
// Update metadata
|
|
930
|
+
chunkMetadata.contentHash = cacReferenceHex
|
|
931
|
+
chunkMetadata.lastUpload = Date.now()
|
|
932
|
+
chunkMetadata.dirty = false
|
|
933
|
+
|
|
934
|
+
// Update IndexedDB cache
|
|
935
|
+
await cache.putChunk({
|
|
936
|
+
batchId: state.batchId.toHex(),
|
|
937
|
+
chunkIndex,
|
|
938
|
+
data: chunkData,
|
|
939
|
+
contentHash: cacReferenceHex,
|
|
940
|
+
lastAccess: Date.now(),
|
|
941
|
+
})
|
|
942
|
+
|
|
943
|
+
// Mark chunk as clean
|
|
944
|
+
tracker.markClean(chunkIndex)
|
|
945
|
+
} catch (error) {
|
|
946
|
+
console.error(`[BatchUtil] Failed to upload chunk ${chunkIndex}:`, error)
|
|
947
|
+
// Keep it marked as dirty for retry
|
|
948
|
+
throw error
|
|
949
|
+
}
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
// Update lastSync timestamp
|
|
953
|
+
state.lastSync = Date.now()
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
// ============================================================================
|
|
957
|
+
// High-level API
|
|
958
|
+
// ============================================================================
|
|
959
|
+
|
|
960
|
+
/**
|
|
961
|
+
* Update utilization state after writing data chunks
|
|
962
|
+
*
|
|
963
|
+
* This function:
|
|
964
|
+
* 1. Loads current state (from cache or Swarm)
|
|
965
|
+
* 2. Updates bucket counters for new data chunks
|
|
966
|
+
* 3. Marks affected utilization chunks as dirty
|
|
967
|
+
* 4. Returns state and tracker for later upload
|
|
968
|
+
*
|
|
969
|
+
* @param batchId - Batch ID
|
|
970
|
+
* @param dataChunks - Data chunks that were written
|
|
971
|
+
* @param batchDepth - Batch depth parameter
|
|
972
|
+
* @param options - Load options for state retrieval
|
|
973
|
+
* @returns Updated state and dirty chunk tracker
|
|
974
|
+
*/
|
|
975
|
+
export async function updateAfterWrite(
|
|
976
|
+
batchId: BatchId,
|
|
977
|
+
dataChunks: ContentAddressedChunk[],
|
|
978
|
+
batchDepth: number,
|
|
979
|
+
options: {
|
|
980
|
+
bee: Bee
|
|
981
|
+
owner: EthAddress
|
|
982
|
+
encryptionKey: Uint8Array
|
|
983
|
+
cache: UtilizationStoreDB
|
|
984
|
+
},
|
|
985
|
+
): Promise<{
|
|
986
|
+
state: BatchUtilizationState
|
|
987
|
+
tracker: DirtyChunkTracker
|
|
988
|
+
}> {
|
|
989
|
+
// Load current state
|
|
990
|
+
const state = await loadUtilizationState(batchId, options)
|
|
991
|
+
|
|
992
|
+
// Create tracker for dirty chunks
|
|
993
|
+
const tracker = new DirtyChunkTracker()
|
|
994
|
+
|
|
995
|
+
// Assign buckets and slots to data chunks
|
|
996
|
+
for (const chunk of dataChunks) {
|
|
997
|
+
const bucket = toBucket(chunk.address.toUint8Array())
|
|
998
|
+
const slot = state.dataCounters[bucket]
|
|
999
|
+
|
|
1000
|
+
// Check capacity
|
|
1001
|
+
if (!hasBucketCapacity(slot, batchDepth)) {
|
|
1002
|
+
throw new Error(`Bucket ${bucket} is full (slot ${slot})`)
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
// Increment counter
|
|
1006
|
+
state.dataCounters[bucket]++
|
|
1007
|
+
|
|
1008
|
+
// Mark the utilization chunk containing this bucket as dirty
|
|
1009
|
+
tracker.markDirty(bucket)
|
|
1010
|
+
}
|
|
1011
|
+
|
|
1012
|
+
// Log dirty chunks
|
|
1013
|
+
const dirtyChunks = tracker.getDirtyChunks()
|
|
1014
|
+
|
|
1015
|
+
// Mark chunks as dirty in state metadata
|
|
1016
|
+
for (const chunkIndex of dirtyChunks) {
|
|
1017
|
+
state.chunks[chunkIndex].dirty = true
|
|
1018
|
+
}
|
|
1019
|
+
|
|
1020
|
+
return {
|
|
1021
|
+
state,
|
|
1022
|
+
tracker,
|
|
1023
|
+
}
|
|
1024
|
+
}
|
|
1025
|
+
|
|
1026
|
+
/**
|
|
1027
|
+
* Calculate current utilization fraction for a batch
|
|
1028
|
+
*
|
|
1029
|
+
* @param state - Current utilization state
|
|
1030
|
+
* @param batchDepth - Batch depth parameter
|
|
1031
|
+
* @returns Utilization as decimal fraction (0-1)
|
|
1032
|
+
*/
|
|
1033
|
+
export function calculateUtilization(
|
|
1034
|
+
state: BatchUtilizationState,
|
|
1035
|
+
batchDepth: number,
|
|
1036
|
+
): number {
|
|
1037
|
+
const maxSlots = calculateMaxSlotsPerBucket(batchDepth)
|
|
1038
|
+
const maxBucketUsage = Math.max(...Array.from(state.dataCounters))
|
|
1039
|
+
|
|
1040
|
+
// Utilization is based on the fullest bucket
|
|
1041
|
+
return Math.min(1, maxBucketUsage / maxSlots)
|
|
1042
|
+
}
|
|
1043
|
+
|
|
1044
|
+
// ============================================================================
|
|
1045
|
+
// Utilization-Aware Stamper (Wrapper with Auto-Tracking)
|
|
1046
|
+
// ============================================================================
|
|
1047
|
+
|
|
1048
|
+
/**
|
|
1049
|
+
* Stamper wrapper that maintains bucket state from utilization data
|
|
1050
|
+
*
|
|
1051
|
+
* This class wraps the cafe-utility Stamper and:
|
|
1052
|
+
* - Loads bucket state from cached utilization data on creation
|
|
1053
|
+
* - Tracks which buckets/slots are used during stamping
|
|
1054
|
+
* - Provides a flush() method to persist updates back to cache
|
|
1055
|
+
*
|
|
1056
|
+
* This ensures the Stamper always has accurate knowledge of which
|
|
1057
|
+
* buckets/slots are already used, preventing overwrites.
|
|
1058
|
+
*/
|
|
1059
|
+
export class UtilizationAwareStamper implements Stamper {
|
|
1060
|
+
private stamper: Stamper
|
|
1061
|
+
private utilizationState: BatchUtilizationState
|
|
1062
|
+
private cache: UtilizationStoreDB
|
|
1063
|
+
private dirty: boolean = false
|
|
1064
|
+
private dirtyBuckets: Set<number> = new Set()
|
|
1065
|
+
|
|
1066
|
+
readonly batchId: BatchId
|
|
1067
|
+
readonly depth: number
|
|
1068
|
+
|
|
1069
|
+
// Delegate Stamper properties to underlying stamper
|
|
1070
|
+
get signer() {
|
|
1071
|
+
return this.stamper.signer
|
|
1072
|
+
}
|
|
1073
|
+
get buckets() {
|
|
1074
|
+
return this.stamper.buckets
|
|
1075
|
+
}
|
|
1076
|
+
get maxSlot() {
|
|
1077
|
+
return this.stamper.maxSlot
|
|
1078
|
+
}
|
|
1079
|
+
|
|
1080
|
+
private constructor(
|
|
1081
|
+
stamper: Stamper,
|
|
1082
|
+
batchId: BatchId,
|
|
1083
|
+
depth: number,
|
|
1084
|
+
cache: UtilizationStoreDB,
|
|
1085
|
+
utilizationState: BatchUtilizationState,
|
|
1086
|
+
) {
|
|
1087
|
+
this.stamper = stamper
|
|
1088
|
+
this.batchId = batchId
|
|
1089
|
+
this.depth = depth
|
|
1090
|
+
this.cache = cache
|
|
1091
|
+
this.utilizationState = utilizationState
|
|
1092
|
+
}
|
|
1093
|
+
|
|
1094
|
+
/**
|
|
1095
|
+
* Create a UtilizationAwareStamper with bucket state from cache
|
|
1096
|
+
*
|
|
1097
|
+
* @param privateKey - Signer private key
|
|
1098
|
+
* @param batchId - Postage batch ID
|
|
1099
|
+
* @param depth - Batch depth
|
|
1100
|
+
* @param cache - Utilization cache database
|
|
1101
|
+
* @param _owner - Owner address (required for validation, reserved for future Swarm upload)
|
|
1102
|
+
* @param _encryptionKey - Encryption key (required for validation, reserved for future Swarm upload)
|
|
1103
|
+
* @returns New UtilizationAwareStamper instance
|
|
1104
|
+
*/
|
|
1105
|
+
static async create(
|
|
1106
|
+
privateKey: Uint8Array | string,
|
|
1107
|
+
batchId: BatchId,
|
|
1108
|
+
depth: number,
|
|
1109
|
+
cache: UtilizationStoreDB,
|
|
1110
|
+
_owner: EthAddress,
|
|
1111
|
+
_encryptionKey: Uint8Array,
|
|
1112
|
+
): Promise<UtilizationAwareStamper> {
|
|
1113
|
+
// Initialize utilization state (always, since owner is now required)
|
|
1114
|
+
const utilizationState = initializeBatchUtilization(batchId)
|
|
1115
|
+
let bucketState: Uint32Array
|
|
1116
|
+
|
|
1117
|
+
// Try to load utilization state from cache
|
|
1118
|
+
try {
|
|
1119
|
+
const cachedChunks = await cache.getAllChunks(batchId.toHex())
|
|
1120
|
+
if (cachedChunks.length > 0) {
|
|
1121
|
+
// Merge cached chunks into state
|
|
1122
|
+
for (const cached of cachedChunks) {
|
|
1123
|
+
mergeChunk(
|
|
1124
|
+
utilizationState.dataCounters,
|
|
1125
|
+
cached.chunkIndex,
|
|
1126
|
+
cached.data,
|
|
1127
|
+
)
|
|
1128
|
+
}
|
|
1129
|
+
} else {
|
|
1130
|
+
}
|
|
1131
|
+
} catch (error) {
|
|
1132
|
+
console.warn(
|
|
1133
|
+
`[UtilizationAwareStamper] Failed to load state from cache, starting with fresh state:`,
|
|
1134
|
+
error,
|
|
1135
|
+
)
|
|
1136
|
+
}
|
|
1137
|
+
|
|
1138
|
+
// Convert utilization counters to bucket state
|
|
1139
|
+
bucketState = utilizationToBucketState(utilizationState.dataCounters)
|
|
1140
|
+
|
|
1141
|
+
// Create underlying stamper with bucket state
|
|
1142
|
+
const stamper = Stamper.fromState(privateKey, batchId, bucketState, depth)
|
|
1143
|
+
|
|
1144
|
+
return new UtilizationAwareStamper(
|
|
1145
|
+
stamper,
|
|
1146
|
+
batchId,
|
|
1147
|
+
depth,
|
|
1148
|
+
cache,
|
|
1149
|
+
utilizationState,
|
|
1150
|
+
)
|
|
1151
|
+
}
|
|
1152
|
+
|
|
1153
|
+
/**
|
|
1154
|
+
* Stamp a chunk (implements Stamper interface)
|
|
1155
|
+
*
|
|
1156
|
+
* Delegates to underlying stamper and tracks which buckets are used.
|
|
1157
|
+
*
|
|
1158
|
+
* @param chunk - Chunk to stamp
|
|
1159
|
+
* @returns Envelope with batch ID and signature
|
|
1160
|
+
*/
|
|
1161
|
+
stamp(chunk: CafeChunk): EnvelopeWithBatchId {
|
|
1162
|
+
const envelope = this.stamper.stamp(chunk)
|
|
1163
|
+
|
|
1164
|
+
// Extract bucket from envelope index
|
|
1165
|
+
// The index is 8 bytes: first 4 bytes = bucket (big-endian), last 4 bytes = slot
|
|
1166
|
+
const view = new DataView(
|
|
1167
|
+
envelope.index.buffer,
|
|
1168
|
+
envelope.index.byteOffset,
|
|
1169
|
+
envelope.index.byteLength,
|
|
1170
|
+
)
|
|
1171
|
+
const bucket = view.getUint32(0, false) // false = big-endian
|
|
1172
|
+
|
|
1173
|
+
// Update utilization state (increment counter for this bucket)
|
|
1174
|
+
this.utilizationState.dataCounters[bucket]++
|
|
1175
|
+
|
|
1176
|
+
// Mark bucket as dirty for eventual flush
|
|
1177
|
+
this.dirtyBuckets.add(bucket)
|
|
1178
|
+
this.dirty = true
|
|
1179
|
+
|
|
1180
|
+
return envelope
|
|
1181
|
+
}
|
|
1182
|
+
|
|
1183
|
+
/**
|
|
1184
|
+
* Get bucket state (implements Stamper interface)
|
|
1185
|
+
*/
|
|
1186
|
+
getState(): Uint32Array {
|
|
1187
|
+
return this.stamper.getState()
|
|
1188
|
+
}
|
|
1189
|
+
|
|
1190
|
+
/**
|
|
1191
|
+
* Flush dirty utilization chunks to cache
|
|
1192
|
+
*
|
|
1193
|
+
* This persists any bucket state changes made during stamping.
|
|
1194
|
+
* Should be called after all stamping operations are complete.
|
|
1195
|
+
*/
|
|
1196
|
+
async flush(): Promise<void> {
|
|
1197
|
+
if (!this.dirty) {
|
|
1198
|
+
return
|
|
1199
|
+
}
|
|
1200
|
+
|
|
1201
|
+
// Mark utilization chunks as dirty for the affected buckets
|
|
1202
|
+
const dirtyChunkIndexes = new Set<number>()
|
|
1203
|
+
for (const bucket of this.dirtyBuckets) {
|
|
1204
|
+
const chunkIndex = getChunkIndexForBucket(bucket)
|
|
1205
|
+
dirtyChunkIndexes.add(chunkIndex)
|
|
1206
|
+
this.utilizationState.chunks[chunkIndex].dirty = true
|
|
1207
|
+
}
|
|
1208
|
+
|
|
1209
|
+
// Create dirty chunk tracker
|
|
1210
|
+
const tracker = new DirtyChunkTracker()
|
|
1211
|
+
for (const chunkIndex of dirtyChunkIndexes) {
|
|
1212
|
+
// Mark any bucket in this chunk as dirty (the tracker groups by chunk)
|
|
1213
|
+
const firstBucket = chunkIndex * BUCKETS_PER_CHUNK
|
|
1214
|
+
tracker.markDirty(firstBucket)
|
|
1215
|
+
}
|
|
1216
|
+
|
|
1217
|
+
// Save dirty chunks to cache
|
|
1218
|
+
try {
|
|
1219
|
+
// Note: This requires owner and encryptionKey which we don't have here
|
|
1220
|
+
// The caller should use saveUtilizationState directly if they want to upload to Swarm
|
|
1221
|
+
// For now, just update the cache
|
|
1222
|
+
for (const chunkIndex of dirtyChunkIndexes) {
|
|
1223
|
+
const chunkData = extractChunk(
|
|
1224
|
+
this.utilizationState.dataCounters,
|
|
1225
|
+
chunkIndex,
|
|
1226
|
+
)
|
|
1227
|
+
|
|
1228
|
+
await this.cache.putChunk({
|
|
1229
|
+
batchId: this.batchId.toHex(),
|
|
1230
|
+
chunkIndex,
|
|
1231
|
+
data: chunkData,
|
|
1232
|
+
contentHash: calculateContentHash(chunkData),
|
|
1233
|
+
lastAccess: Date.now(),
|
|
1234
|
+
})
|
|
1235
|
+
this.utilizationState.chunks[chunkIndex].dirty = false
|
|
1236
|
+
}
|
|
1237
|
+
|
|
1238
|
+
// Update lastSync timestamp
|
|
1239
|
+
this.utilizationState.lastSync = Date.now()
|
|
1240
|
+
} catch (error) {
|
|
1241
|
+
console.error(
|
|
1242
|
+
`[UtilizationAwareStamper] Failed to flush to cache:`,
|
|
1243
|
+
error,
|
|
1244
|
+
)
|
|
1245
|
+
throw error
|
|
1246
|
+
}
|
|
1247
|
+
|
|
1248
|
+
// Clear dirty flags
|
|
1249
|
+
this.dirtyBuckets.clear()
|
|
1250
|
+
this.dirty = false
|
|
1251
|
+
}
|
|
1252
|
+
|
|
1253
|
+
/**
|
|
1254
|
+
* Get current utilization state
|
|
1255
|
+
*
|
|
1256
|
+
* @returns Current utilization state
|
|
1257
|
+
*/
|
|
1258
|
+
getUtilizationState(): BatchUtilizationState {
|
|
1259
|
+
return this.utilizationState
|
|
1260
|
+
}
|
|
1261
|
+
|
|
1262
|
+
/**
|
|
1263
|
+
* Apply utilization update from another tab.
|
|
1264
|
+
* Updates local bucket counters to match leader's state.
|
|
1265
|
+
*
|
|
1266
|
+
* @param buckets Array of bucket indices and their new counter values
|
|
1267
|
+
*/
|
|
1268
|
+
applyUtilizationUpdate(
|
|
1269
|
+
buckets: Array<{ index: number; value: number }>,
|
|
1270
|
+
): void {
|
|
1271
|
+
for (const { index, value } of buckets) {
|
|
1272
|
+
if (index >= 0 && index < NUM_BUCKETS) {
|
|
1273
|
+
// Only update if the incoming value is higher (monotonic increase)
|
|
1274
|
+
if (value > this.utilizationState.dataCounters[index]) {
|
|
1275
|
+
this.utilizationState.dataCounters[index] = value
|
|
1276
|
+
}
|
|
1277
|
+
}
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
// Update stamper bucket state to match
|
|
1281
|
+
const bucketState = utilizationToBucketState(
|
|
1282
|
+
this.utilizationState.dataCounters,
|
|
1283
|
+
)
|
|
1284
|
+
this.stamper = Stamper.fromState(
|
|
1285
|
+
this.stamper.signer,
|
|
1286
|
+
this.batchId,
|
|
1287
|
+
bucketState,
|
|
1288
|
+
this.depth,
|
|
1289
|
+
)
|
|
1290
|
+
|
|
1291
|
+
// Note: Do NOT clear dirtyBuckets here - those represent local writes
|
|
1292
|
+
// that still need to be flushed. Only flush() should clear them.
|
|
1293
|
+
}
|
|
1294
|
+
|
|
1295
|
+
/**
|
|
1296
|
+
* Get bucket counter values for broadcasting to other tabs.
|
|
1297
|
+
* Returns only the dirty buckets with their current values.
|
|
1298
|
+
*
|
|
1299
|
+
* IMPORTANT: Call this BEFORE flush() as flush() clears dirtyBuckets.
|
|
1300
|
+
*
|
|
1301
|
+
* @returns Array of bucket index/value pairs for broadcasting
|
|
1302
|
+
*/
|
|
1303
|
+
getBucketUpdatesForBroadcast(): Array<{ index: number; value: number }> {
|
|
1304
|
+
return Array.from(this.dirtyBuckets).map((index) => ({
|
|
1305
|
+
index,
|
|
1306
|
+
value: this.utilizationState.dataCounters[index],
|
|
1307
|
+
}))
|
|
1308
|
+
}
|
|
1309
|
+
}
|