@libp2p/circuit-relay-v2 0.0.0-97ab31c0c
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +4 -0
- package/README.md +69 -0
- package/dist/index.min.js +45 -0
- package/dist/src/constants.d.ts +55 -0
- package/dist/src/constants.d.ts.map +1 -0
- package/dist/src/constants.js +61 -0
- package/dist/src/constants.js.map +1 -0
- package/dist/src/index.d.ts +56 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/index.js +39 -0
- package/dist/src/index.js.map +1 -0
- package/dist/src/pb/index.d.ts +93 -0
- package/dist/src/pb/index.d.ts.map +1 -0
- package/dist/src/pb/index.js +425 -0
- package/dist/src/pb/index.js.map +1 -0
- package/dist/src/server/advert-service.d.ts +46 -0
- package/dist/src/server/advert-service.d.ts.map +1 -0
- package/dist/src/server/advert-service.js +72 -0
- package/dist/src/server/advert-service.js.map +1 -0
- package/dist/src/server/index.d.ts +67 -0
- package/dist/src/server/index.d.ts.map +1 -0
- package/dist/src/server/index.js +313 -0
- package/dist/src/server/index.js.map +1 -0
- package/dist/src/server/reservation-store.d.ts +49 -0
- package/dist/src/server/reservation-store.d.ts.map +1 -0
- package/dist/src/server/reservation-store.js +65 -0
- package/dist/src/server/reservation-store.js.map +1 -0
- package/dist/src/server/reservation-voucher.d.ts +18 -0
- package/dist/src/server/reservation-voucher.d.ts.map +1 -0
- package/dist/src/server/reservation-voucher.js +36 -0
- package/dist/src/server/reservation-voucher.js.map +1 -0
- package/dist/src/transport/discovery.d.ts +48 -0
- package/dist/src/transport/discovery.d.ts.map +1 -0
- package/dist/src/transport/discovery.js +97 -0
- package/dist/src/transport/discovery.js.map +1 -0
- package/dist/src/transport/index.d.ts +58 -0
- package/dist/src/transport/index.d.ts.map +1 -0
- package/dist/src/transport/index.js +279 -0
- package/dist/src/transport/index.js.map +1 -0
- package/dist/src/transport/listener.d.ts +11 -0
- package/dist/src/transport/listener.d.ts.map +1 -0
- package/dist/src/transport/listener.js +66 -0
- package/dist/src/transport/listener.js.map +1 -0
- package/dist/src/transport/reservation-store.d.ts +74 -0
- package/dist/src/transport/reservation-store.d.ts.map +1 -0
- package/dist/src/transport/reservation-store.js +209 -0
- package/dist/src/transport/reservation-store.js.map +1 -0
- package/dist/src/utils.d.ts +14 -0
- package/dist/src/utils.d.ts.map +1 -0
- package/dist/src/utils.js +106 -0
- package/dist/src/utils.js.map +1 -0
- package/package.json +83 -0
- package/src/constants.ts +79 -0
- package/src/index.ts +64 -0
- package/src/pb/index.proto +67 -0
- package/src/pb/index.ts +539 -0
- package/src/server/advert-service.ts +109 -0
- package/src/server/index.ts +446 -0
- package/src/server/reservation-store.ts +116 -0
- package/src/server/reservation-voucher.ts +51 -0
- package/src/transport/discovery.ts +138 -0
- package/src/transport/index.ts +399 -0
- package/src/transport/listener.ts +98 -0
- package/src/transport/reservation-store.ts +312 -0
- package/src/utils.ts +134 -0
@@ -0,0 +1,312 @@
|
|
1
|
+
import { TypedEventEmitter, type TypedEventTarget } from '@libp2p/interface/events'
|
2
|
+
import { PeerMap } from '@libp2p/peer-collections'
|
3
|
+
import { PeerJobQueue } from '@libp2p/utils/peer-job-queue'
|
4
|
+
import { multiaddr } from '@multiformats/multiaddr'
|
5
|
+
import { pbStream } from 'it-protobuf-stream'
|
6
|
+
import { DEFAULT_RESERVATION_CONCURRENCY, RELAY_TAG, RELAY_V2_HOP_CODEC } from '../constants.js'
|
7
|
+
import { HopMessage, Status } from '../pb/index.js'
|
8
|
+
import { getExpirationMilliseconds } from '../utils.js'
|
9
|
+
import type { Reservation } from '../pb/index.js'
|
10
|
+
import type { Libp2pEvents, AbortOptions, ComponentLogger, Logger } from '@libp2p/interface'
|
11
|
+
import type { Connection } from '@libp2p/interface/connection'
|
12
|
+
import type { PeerId } from '@libp2p/interface/peer-id'
|
13
|
+
import type { PeerStore } from '@libp2p/interface/peer-store'
|
14
|
+
import type { Startable } from '@libp2p/interface/startable'
|
15
|
+
import type { ConnectionManager } from '@libp2p/interface-internal/connection-manager'
|
16
|
+
import type { TransportManager } from '@libp2p/interface-internal/transport-manager'
|
17
|
+
|
18
|
+
// allow refreshing a relay reservation if it will expire in the next 10 minutes
|
19
|
+
const REFRESH_WINDOW = (60 * 1000) * 10
|
20
|
+
|
21
|
+
// try to refresh relay reservations 5 minutes before expiry
|
22
|
+
const REFRESH_TIMEOUT = (60 * 1000) * 5
|
23
|
+
|
24
|
+
// minimum duration before which a reservation must not be refreshed
|
25
|
+
const REFRESH_TIMEOUT_MIN = 30 * 1000
|
26
|
+
|
27
|
+
export interface RelayStoreComponents {
|
28
|
+
peerId: PeerId
|
29
|
+
connectionManager: ConnectionManager
|
30
|
+
transportManager: TransportManager
|
31
|
+
peerStore: PeerStore
|
32
|
+
events: TypedEventTarget<Libp2pEvents>
|
33
|
+
logger: ComponentLogger
|
34
|
+
}
|
35
|
+
|
36
|
+
export interface RelayStoreInit {
|
37
|
+
/**
|
38
|
+
* Multiple relays may be discovered simultaneously - to prevent listening
|
39
|
+
* on too many relays, this value controls how many to attempt to reserve a
|
40
|
+
* slot on at once. If set to more than one, we may end up listening on
|
41
|
+
* more relays than the `maxReservations` value, but on networks with poor
|
42
|
+
* connectivity the user may wish to attempt to reserve on multiple relays
|
43
|
+
* simultaneously. (default: 1)
|
44
|
+
*/
|
45
|
+
reservationConcurrency?: number
|
46
|
+
|
47
|
+
/**
|
48
|
+
* How many discovered relays to allow in the reservation store
|
49
|
+
*/
|
50
|
+
discoverRelays?: number
|
51
|
+
|
52
|
+
/**
|
53
|
+
* Limit the number of potential relays we will dial (default: 100)
|
54
|
+
*/
|
55
|
+
maxReservationQueueLength?: number
|
56
|
+
|
57
|
+
/**
|
58
|
+
* When creating a reservation it must complete within this number of ms
|
59
|
+
* (default: 5000)
|
60
|
+
*/
|
61
|
+
reservationCompletionTimeout?: number
|
62
|
+
}
|
63
|
+
|
64
|
+
export type RelayType = 'discovered' | 'configured'
|
65
|
+
|
66
|
+
interface RelayEntry {
|
67
|
+
timeout: ReturnType<typeof setTimeout>
|
68
|
+
type: RelayType
|
69
|
+
reservation: Reservation
|
70
|
+
}
|
71
|
+
|
72
|
+
export interface ReservationStoreEvents {
|
73
|
+
'relay:not-enough-relays': CustomEvent
|
74
|
+
'relay:removed': CustomEvent<PeerId>
|
75
|
+
}
|
76
|
+
|
77
|
+
export class ReservationStore extends TypedEventEmitter<ReservationStoreEvents> implements Startable {
|
78
|
+
private readonly peerId: PeerId
|
79
|
+
private readonly connectionManager: ConnectionManager
|
80
|
+
private readonly transportManager: TransportManager
|
81
|
+
private readonly peerStore: PeerStore
|
82
|
+
private readonly events: TypedEventTarget<Libp2pEvents>
|
83
|
+
private readonly reserveQueue: PeerJobQueue
|
84
|
+
private readonly reservations: PeerMap<RelayEntry>
|
85
|
+
private readonly maxDiscoveredRelays: number
|
86
|
+
private readonly maxReservationQueueLength: number
|
87
|
+
private readonly reservationCompletionTimeout: number
|
88
|
+
private started: boolean
|
89
|
+
private readonly log: Logger
|
90
|
+
|
91
|
+
constructor (components: RelayStoreComponents, init?: RelayStoreInit) {
|
92
|
+
super()
|
93
|
+
|
94
|
+
this.log = components.logger.forComponent('libp2p:circuit-relay:transport:reservation-store')
|
95
|
+
this.peerId = components.peerId
|
96
|
+
this.connectionManager = components.connectionManager
|
97
|
+
this.transportManager = components.transportManager
|
98
|
+
this.peerStore = components.peerStore
|
99
|
+
this.events = components.events
|
100
|
+
this.reservations = new PeerMap()
|
101
|
+
this.maxDiscoveredRelays = init?.discoverRelays ?? 0
|
102
|
+
this.maxReservationQueueLength = init?.maxReservationQueueLength ?? 100
|
103
|
+
this.reservationCompletionTimeout = init?.reservationCompletionTimeout ?? 10000
|
104
|
+
this.started = false
|
105
|
+
|
106
|
+
// ensure we don't listen on multiple relays simultaneously
|
107
|
+
this.reserveQueue = new PeerJobQueue({
|
108
|
+
concurrency: init?.reservationConcurrency ?? DEFAULT_RESERVATION_CONCURRENCY
|
109
|
+
})
|
110
|
+
|
111
|
+
// When a peer disconnects, if we had a reservation on that peer
|
112
|
+
// remove the reservation and multiaddr and maybe trigger search
|
113
|
+
// for new relays
|
114
|
+
this.events.addEventListener('peer:disconnect', (evt) => {
|
115
|
+
this.#removeRelay(evt.detail)
|
116
|
+
})
|
117
|
+
}
|
118
|
+
|
119
|
+
isStarted (): boolean {
|
120
|
+
return this.started
|
121
|
+
}
|
122
|
+
|
123
|
+
async start (): Promise<void> {
|
124
|
+
this.started = true
|
125
|
+
}
|
126
|
+
|
127
|
+
async stop (): Promise<void> {
|
128
|
+
this.reserveQueue.clear()
|
129
|
+
this.reservations.forEach(({ timeout }) => {
|
130
|
+
clearTimeout(timeout)
|
131
|
+
})
|
132
|
+
this.reservations.clear()
|
133
|
+
this.started = false
|
134
|
+
}
|
135
|
+
|
136
|
+
/**
|
137
|
+
* If the number of current relays is beneath the configured `maxReservations`
|
138
|
+
* value, and the passed peer id is not our own, and we have a non-relayed connection
|
139
|
+
* to the remote, and the remote peer speaks the hop protocol, try to reserve a slot
|
140
|
+
* on the remote peer
|
141
|
+
*/
|
142
|
+
async addRelay (peerId: PeerId, type: RelayType): Promise<void> {
|
143
|
+
if (this.peerId.equals(peerId)) {
|
144
|
+
this.log('not trying to use self as relay')
|
145
|
+
return
|
146
|
+
}
|
147
|
+
|
148
|
+
if (this.reserveQueue.size > this.maxReservationQueueLength) {
|
149
|
+
this.log('not adding relay as the queue is full')
|
150
|
+
return
|
151
|
+
}
|
152
|
+
|
153
|
+
if (this.reserveQueue.hasJob(peerId)) {
|
154
|
+
this.log('relay peer is already in the reservation queue')
|
155
|
+
return
|
156
|
+
}
|
157
|
+
|
158
|
+
this.log('add relay %p', peerId)
|
159
|
+
|
160
|
+
await this.reserveQueue.add(async () => {
|
161
|
+
try {
|
162
|
+
// allow refresh of an existing reservation if it is about to expire
|
163
|
+
const existingReservation = this.reservations.get(peerId)
|
164
|
+
|
165
|
+
if (existingReservation != null) {
|
166
|
+
if (getExpirationMilliseconds(existingReservation.reservation.expire) > REFRESH_WINDOW) {
|
167
|
+
this.log('already have reservation on relay peer %p and it expires in more than 10 minutes', peerId)
|
168
|
+
return
|
169
|
+
}
|
170
|
+
|
171
|
+
clearTimeout(existingReservation.timeout)
|
172
|
+
this.reservations.delete(peerId)
|
173
|
+
}
|
174
|
+
|
175
|
+
if (type === 'discovered' && [...this.reservations.values()].reduce((acc, curr) => {
|
176
|
+
if (curr.type === 'discovered') {
|
177
|
+
acc++
|
178
|
+
}
|
179
|
+
|
180
|
+
return acc
|
181
|
+
}, 0) >= this.maxDiscoveredRelays) {
|
182
|
+
this.log('already have enough discovered relays')
|
183
|
+
return
|
184
|
+
}
|
185
|
+
|
186
|
+
const signal = AbortSignal.timeout(this.reservationCompletionTimeout)
|
187
|
+
|
188
|
+
const connection = await this.connectionManager.openConnection(peerId, {
|
189
|
+
signal
|
190
|
+
})
|
191
|
+
|
192
|
+
if (connection.remoteAddr.protoNames().includes('p2p-circuit')) {
|
193
|
+
this.log('not creating reservation over relayed connection')
|
194
|
+
return
|
195
|
+
}
|
196
|
+
|
197
|
+
const reservation = await this.#createReservation(connection, {
|
198
|
+
signal
|
199
|
+
})
|
200
|
+
|
201
|
+
this.log('created reservation on relay peer %p', peerId)
|
202
|
+
|
203
|
+
const expiration = getExpirationMilliseconds(reservation.expire)
|
204
|
+
|
205
|
+
// sets a lower bound on the timeout, and also don't let it go over
|
206
|
+
// 2^31 - 1 (setTimeout will only accept signed 32 bit integers)
|
207
|
+
const timeoutDuration = Math.min(Math.max(expiration - REFRESH_TIMEOUT, REFRESH_TIMEOUT_MIN), Math.pow(2, 31) - 1)
|
208
|
+
|
209
|
+
const timeout = setTimeout(() => {
|
210
|
+
this.addRelay(peerId, type).catch(err => {
|
211
|
+
this.log.error('could not refresh reservation to relay %p', peerId, err)
|
212
|
+
})
|
213
|
+
}, timeoutDuration)
|
214
|
+
|
215
|
+
// we've managed to create a reservation successfully
|
216
|
+
this.reservations.set(peerId, {
|
217
|
+
timeout,
|
218
|
+
reservation,
|
219
|
+
type
|
220
|
+
})
|
221
|
+
|
222
|
+
// ensure we don't close the connection to the relay
|
223
|
+
await this.peerStore.merge(peerId, {
|
224
|
+
tags: {
|
225
|
+
[RELAY_TAG]: {
|
226
|
+
value: 1,
|
227
|
+
ttl: expiration
|
228
|
+
}
|
229
|
+
}
|
230
|
+
})
|
231
|
+
|
232
|
+
// listen on multiaddr that only the circuit transport is listening for
|
233
|
+
await this.transportManager.listen([multiaddr(`/p2p/${peerId.toString()}/p2p-circuit`)])
|
234
|
+
} catch (err) {
|
235
|
+
this.log.error('could not reserve slot on %p', peerId, err)
|
236
|
+
|
237
|
+
// cancel the renewal timeout if it's been set
|
238
|
+
const reservation = this.reservations.get(peerId)
|
239
|
+
|
240
|
+
if (reservation != null) {
|
241
|
+
clearTimeout(reservation.timeout)
|
242
|
+
}
|
243
|
+
|
244
|
+
// if listening failed, remove the reservation
|
245
|
+
this.reservations.delete(peerId)
|
246
|
+
}
|
247
|
+
}, {
|
248
|
+
peerId
|
249
|
+
})
|
250
|
+
}
|
251
|
+
|
252
|
+
hasReservation (peerId: PeerId): boolean {
|
253
|
+
return this.reservations.has(peerId)
|
254
|
+
}
|
255
|
+
|
256
|
+
getReservation (peerId: PeerId): Reservation | undefined {
|
257
|
+
return this.reservations.get(peerId)?.reservation
|
258
|
+
}
|
259
|
+
|
260
|
+
async #createReservation (connection: Connection, options: AbortOptions): Promise<Reservation> {
|
261
|
+
options.signal?.throwIfAborted()
|
262
|
+
|
263
|
+
this.log('requesting reservation from %p', connection.remotePeer)
|
264
|
+
const stream = await connection.newStream(RELAY_V2_HOP_CODEC, options)
|
265
|
+
const pbstr = pbStream(stream)
|
266
|
+
const hopstr = pbstr.pb(HopMessage)
|
267
|
+
await hopstr.write({ type: HopMessage.Type.RESERVE }, options)
|
268
|
+
|
269
|
+
let response: HopMessage
|
270
|
+
|
271
|
+
try {
|
272
|
+
response = await hopstr.read(options)
|
273
|
+
} catch (err: any) {
|
274
|
+
this.log.error('error parsing reserve message response from %p because', connection.remotePeer, err)
|
275
|
+
throw err
|
276
|
+
} finally {
|
277
|
+
await stream.close()
|
278
|
+
}
|
279
|
+
|
280
|
+
if (response.status === Status.OK && (response.reservation != null)) {
|
281
|
+
return response.reservation
|
282
|
+
}
|
283
|
+
|
284
|
+
const errMsg = `reservation failed with status ${response.status ?? 'undefined'}`
|
285
|
+
this.log.error(errMsg)
|
286
|
+
|
287
|
+
throw new Error(errMsg)
|
288
|
+
}
|
289
|
+
|
290
|
+
/**
|
291
|
+
* Remove listen relay
|
292
|
+
*/
|
293
|
+
#removeRelay (peerId: PeerId): void {
|
294
|
+
const existingReservation = this.reservations.get(peerId)
|
295
|
+
|
296
|
+
if (existingReservation == null) {
|
297
|
+
return
|
298
|
+
}
|
299
|
+
|
300
|
+
this.log('connection to relay %p closed, removing reservation from local store', peerId)
|
301
|
+
|
302
|
+
clearTimeout(existingReservation.timeout)
|
303
|
+
this.reservations.delete(peerId)
|
304
|
+
|
305
|
+
this.safeDispatchEvent('relay:removed', { detail: peerId })
|
306
|
+
|
307
|
+
if (this.reservations.size < this.maxDiscoveredRelays) {
|
308
|
+
this.log('not enough relays %d/%d', this.reservations.size, this.maxDiscoveredRelays)
|
309
|
+
this.safeDispatchEvent('relay:not-enough-relays', {})
|
310
|
+
}
|
311
|
+
}
|
312
|
+
}
|
package/src/utils.ts
ADDED
@@ -0,0 +1,134 @@
|
|
1
|
+
import { CodeError } from '@libp2p/interface/errors'
|
2
|
+
import { anySignal } from 'any-signal'
|
3
|
+
import { CID } from 'multiformats/cid'
|
4
|
+
import { sha256 } from 'multiformats/hashes/sha2'
|
5
|
+
import { ERR_TRANSFER_LIMIT_EXCEEDED } from './constants.js'
|
6
|
+
import type { Limit } from './pb/index.js'
|
7
|
+
import type { LoggerOptions } from '@libp2p/interface'
|
8
|
+
import type { Stream } from '@libp2p/interface/connection'
|
9
|
+
import type { Source } from 'it-stream-types'
|
10
|
+
import type { Uint8ArrayList } from 'uint8arraylist'
|
11
|
+
|
12
|
+
async function * countStreamBytes (source: Source<Uint8Array | Uint8ArrayList>, limit: { remaining: bigint }, options: LoggerOptions): AsyncGenerator<Uint8Array | Uint8ArrayList, void, unknown> {
|
13
|
+
const limitBytes = limit.remaining
|
14
|
+
|
15
|
+
for await (const buf of source) {
|
16
|
+
const len = BigInt(buf.byteLength)
|
17
|
+
|
18
|
+
if ((limit.remaining - len) < 0) {
|
19
|
+
// this is a safe downcast since len is guarantee to be in the range for a number
|
20
|
+
const remaining = Number(limit.remaining)
|
21
|
+
limit.remaining = 0n
|
22
|
+
|
23
|
+
try {
|
24
|
+
if (remaining !== 0) {
|
25
|
+
yield buf.subarray(0, remaining)
|
26
|
+
}
|
27
|
+
} catch (err: any) {
|
28
|
+
options.log.error(err)
|
29
|
+
}
|
30
|
+
|
31
|
+
throw new CodeError(`data limit of ${limitBytes} bytes exceeded`, ERR_TRANSFER_LIMIT_EXCEEDED)
|
32
|
+
}
|
33
|
+
|
34
|
+
limit.remaining -= len
|
35
|
+
yield buf
|
36
|
+
}
|
37
|
+
}
|
38
|
+
|
39
|
+
export function createLimitedRelay (src: Stream, dst: Stream, abortSignal: AbortSignal, limit: Limit | undefined, options: LoggerOptions): void {
|
40
|
+
function abortStreams (err: Error): void {
|
41
|
+
src.abort(err)
|
42
|
+
dst.abort(err)
|
43
|
+
clearTimeout(timeout)
|
44
|
+
}
|
45
|
+
|
46
|
+
const abortController = new AbortController()
|
47
|
+
const signal = anySignal([abortSignal, abortController.signal])
|
48
|
+
|
49
|
+
let timeout: ReturnType<typeof setTimeout> | undefined
|
50
|
+
|
51
|
+
if (limit?.duration != null) {
|
52
|
+
timeout = setTimeout(() => {
|
53
|
+
abortController.abort()
|
54
|
+
}, limit.duration)
|
55
|
+
}
|
56
|
+
|
57
|
+
let srcDstFinished = false
|
58
|
+
let dstSrcFinished = false
|
59
|
+
|
60
|
+
let dataLimit: { remaining: bigint } | undefined
|
61
|
+
|
62
|
+
if (limit?.data != null) {
|
63
|
+
dataLimit = {
|
64
|
+
remaining: limit.data
|
65
|
+
}
|
66
|
+
}
|
67
|
+
|
68
|
+
queueMicrotask(() => {
|
69
|
+
const onAbort = (): void => {
|
70
|
+
dst.abort(new CodeError(`duration limit of ${limit?.duration} ms exceeded`, ERR_TRANSFER_LIMIT_EXCEEDED))
|
71
|
+
}
|
72
|
+
|
73
|
+
signal.addEventListener('abort', onAbort, { once: true })
|
74
|
+
|
75
|
+
void dst.sink(dataLimit == null ? src.source : countStreamBytes(src.source, dataLimit, options))
|
76
|
+
.catch(err => {
|
77
|
+
options.log.error('error while relaying streams src -> dst', err)
|
78
|
+
abortStreams(err)
|
79
|
+
})
|
80
|
+
.finally(() => {
|
81
|
+
srcDstFinished = true
|
82
|
+
|
83
|
+
if (dstSrcFinished) {
|
84
|
+
signal.removeEventListener('abort', onAbort)
|
85
|
+
signal.clear()
|
86
|
+
clearTimeout(timeout)
|
87
|
+
}
|
88
|
+
})
|
89
|
+
})
|
90
|
+
|
91
|
+
queueMicrotask(() => {
|
92
|
+
const onAbort = (): void => {
|
93
|
+
src.abort(new CodeError(`duration limit of ${limit?.duration} ms exceeded`, ERR_TRANSFER_LIMIT_EXCEEDED))
|
94
|
+
}
|
95
|
+
|
96
|
+
signal.addEventListener('abort', onAbort, { once: true })
|
97
|
+
|
98
|
+
void src.sink(dataLimit == null ? dst.source : countStreamBytes(dst.source, dataLimit, options))
|
99
|
+
.catch(err => {
|
100
|
+
options.log.error('error while relaying streams dst -> src', err)
|
101
|
+
abortStreams(err)
|
102
|
+
})
|
103
|
+
.finally(() => {
|
104
|
+
dstSrcFinished = true
|
105
|
+
|
106
|
+
if (srcDstFinished) {
|
107
|
+
signal.removeEventListener('abort', onAbort)
|
108
|
+
signal.clear()
|
109
|
+
clearTimeout(timeout)
|
110
|
+
}
|
111
|
+
})
|
112
|
+
})
|
113
|
+
}
|
114
|
+
|
115
|
+
/**
|
116
|
+
* Convert a namespace string into a cid
|
117
|
+
*/
|
118
|
+
export async function namespaceToCid (namespace: string): Promise<CID> {
|
119
|
+
const bytes = new TextEncoder().encode(namespace)
|
120
|
+
const hash = await sha256.digest(bytes)
|
121
|
+
|
122
|
+
return CID.createV0(hash)
|
123
|
+
}
|
124
|
+
|
125
|
+
/**
|
126
|
+
* returns number of ms between now and expiration time
|
127
|
+
*/
|
128
|
+
export function getExpirationMilliseconds (expireTimeSeconds: bigint): number {
|
129
|
+
const expireTimeMillis = expireTimeSeconds * BigInt(1000)
|
130
|
+
const currentTime = new Date().getTime()
|
131
|
+
|
132
|
+
// downcast to number to use with setTimeout
|
133
|
+
return Number(expireTimeMillis - BigInt(currentTime))
|
134
|
+
}
|