undici 7.0.0-alpha.4 → 7.0.0-alpha.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/docs/docs/api/Agent.md +14 -14
- package/docs/docs/api/BalancedPool.md +15 -15
- package/docs/docs/api/CacheStore.md +22 -3
- package/docs/docs/api/Client.md +11 -11
- package/docs/docs/api/Dispatcher.md +8 -8
- package/docs/docs/api/EnvHttpProxyAgent.md +12 -12
- package/docs/docs/api/MockAgent.md +3 -3
- package/docs/docs/api/MockClient.md +4 -4
- package/docs/docs/api/MockPool.md +2 -2
- package/docs/docs/api/Pool.md +14 -14
- package/docs/docs/api/PoolStats.md +1 -1
- package/docs/docs/api/ProxyAgent.md +3 -3
- package/docs/docs/api/RetryHandler.md +2 -2
- package/docs/docs/api/WebSocket.md +1 -1
- package/docs/docs/api/api-lifecycle.md +9 -9
- package/lib/cache/memory-cache-store.js +86 -230
- package/lib/handler/cache-handler.js +16 -5
- package/lib/interceptor/cache.js +119 -19
- package/lib/util/cache.js +35 -0
- package/lib/util/timers.js +19 -1
- package/package.json +1 -1
- package/types/cache-interceptor.d.ts +27 -34
|
@@ -58,33 +58,33 @@ stateDiagram-v2
|
|
|
58
58
|
|
|
59
59
|
### idle
|
|
60
60
|
|
|
61
|
-
The **idle** state is the initial state of a `Client` instance. While an `origin` is required for instantiating a `Client` instance, the underlying socket connection will not be established until a request is queued using [`Client.dispatch()`](Client.md#clientdispatchoptions-handlers). By calling `Client.dispatch()` directly or using one of the multiple implementations ([`Client.connect()`](Client.md#clientconnectoptions-callback), [`Client.pipeline()`](Client.md#clientpipelineoptions-handler), [`Client.request()`](Client.md#clientrequestoptions-callback), [`Client.stream()`](Client.md#clientstreamoptions-factory-callback), and [`Client.upgrade()`](Client.md#clientupgradeoptions-callback)), the `Client` instance will transition from **idle** to [**pending**](#pending) and then most likely directly to [**processing**](#processing).
|
|
61
|
+
The **idle** state is the initial state of a `Client` instance. While an `origin` is required for instantiating a `Client` instance, the underlying socket connection will not be established until a request is queued using [`Client.dispatch()`](/docs/docs/api/Client.md#clientdispatchoptions-handlers). By calling `Client.dispatch()` directly or using one of the multiple implementations ([`Client.connect()`](Client.md#clientconnectoptions-callback), [`Client.pipeline()`](Client.md#clientpipelineoptions-handler), [`Client.request()`](Client.md#clientrequestoptions-callback), [`Client.stream()`](Client.md#clientstreamoptions-factory-callback), and [`Client.upgrade()`](/docs/docs/api/Client.md#clientupgradeoptions-callback)), the `Client` instance will transition from **idle** to [**pending**](/docs/docs/api/Client.md#pending) and then most likely directly to [**processing**](/docs/docs/api/Client.md#processing).
|
|
62
62
|
|
|
63
|
-
Calling [`Client.close()`](Client.md#clientclosecallback) or [`Client.destroy()`](Client.md#clientdestroyerror-callback) transitions directly to the [**destroyed**](#destroyed) state since the `Client` instance will have no queued requests in this state.
|
|
63
|
+
Calling [`Client.close()`](/docs/docs/api/Client.md#clientclosecallback) or [`Client.destroy()`](Client.md#clientdestroyerror-callback) transitions directly to the [**destroyed**](/docs/docs/api/Client.md#destroyed) state since the `Client` instance will have no queued requests in this state.
|
|
64
64
|
|
|
65
65
|
### pending
|
|
66
66
|
|
|
67
|
-
The **pending** state signifies a non-processing `Client`. Upon entering this state, the `Client` establishes a socket connection and emits the [`'connect'`](Client.md#event-connect) event signalling a connection was successfully established with the `origin` provided during `Client` instantiation. The internal queue is initially empty, and requests can start queueing.
|
|
67
|
+
The **pending** state signifies a non-processing `Client`. Upon entering this state, the `Client` establishes a socket connection and emits the [`'connect'`](/docs/docs/api/Client.md#event-connect) event signalling a connection was successfully established with the `origin` provided during `Client` instantiation. The internal queue is initially empty, and requests can start queueing.
|
|
68
68
|
|
|
69
|
-
Calling [`Client.close()`](Client.md#clientclosecallback) with queued requests, transitions the `Client` to the [**processing**](#processing) state. Without queued requests, it transitions to the [**destroyed**](#destroyed) state.
|
|
69
|
+
Calling [`Client.close()`](/docs/docs/api/Client.md#clientclosecallback) with queued requests, transitions the `Client` to the [**processing**](/docs/docs/api/Client.md#processing) state. Without queued requests, it transitions to the [**destroyed**](/docs/docs/api/Client.md#destroyed) state.
|
|
70
70
|
|
|
71
|
-
Calling [`Client.destroy()`](Client.md#clientdestroyerror-callback) transitions directly to the [**destroyed**](#destroyed) state regardless of existing requests.
|
|
71
|
+
Calling [`Client.destroy()`](/docs/docs/api/Client.md#clientdestroyerror-callback) transitions directly to the [**destroyed**](/docs/docs/api/Client.md#destroyed) state regardless of existing requests.
|
|
72
72
|
|
|
73
73
|
### processing
|
|
74
74
|
|
|
75
|
-
The **processing** state is a state machine within itself. It initializes to the [**processing.running**](#running) state. The [`Client.dispatch()`](Client.md#clientdispatchoptions-handlers), [`Client.close()`](Client.md#clientclosecallback), and [`Client.destroy()`](Client.md#clientdestroyerror-callback) can be called at any time while the `Client` is in this state. `Client.dispatch()` will add more requests to the queue while existing requests continue to be processed. `Client.close()` will transition to the [**processing.closing**](#closing) state. And `Client.destroy()` will transition to [**destroyed**](#destroyed).
|
|
75
|
+
The **processing** state is a state machine within itself. It initializes to the [**processing.running**](/docs/docs/api/Client.md#running) state. The [`Client.dispatch()`](/docs/docs/api/Client.md#clientdispatchoptions-handlers), [`Client.close()`](Client.md#clientclosecallback), and [`Client.destroy()`](Client.md#clientdestroyerror-callback) can be called at any time while the `Client` is in this state. `Client.dispatch()` will add more requests to the queue while existing requests continue to be processed. `Client.close()` will transition to the [**processing.closing**](/docs/docs/api/Client.md#closing) state. And `Client.destroy()` will transition to [**destroyed**](/docs/docs/api/Client.md#destroyed).
|
|
76
76
|
|
|
77
77
|
#### running
|
|
78
78
|
|
|
79
|
-
In the **processing.running** sub-state, queued requests are being processed in a FIFO order. If a request body requires draining, the *needDrain* event transitions to the [**processing.busy**](#busy) sub-state. The *close* event transitions the Client to the [**process.closing**](#closing) sub-state. If all queued requests are processed and neither [`Client.close()`](Client.md#clientclosecallback) nor [`Client.destroy()`](Client.md#clientdestroyerror-callback) are called, then the [**processing**](#processing) machine will trigger a *keepalive* event transitioning the `Client` back to the [**pending**](#pending) state. During this time, the `Client` is waiting for the socket connection to timeout, and once it does, it triggers the *timeout* event and transitions to the [**idle**](#idle) state.
|
|
79
|
+
In the **processing.running** sub-state, queued requests are being processed in a FIFO order. If a request body requires draining, the *needDrain* event transitions to the [**processing.busy**](/docs/docs/api/Client.md#busy) sub-state. The *close* event transitions the Client to the [**process.closing**](/docs/docs/api/Client.md#closing) sub-state. If all queued requests are processed and neither [`Client.close()`](/docs/docs/api/Client.md#clientclosecallback) nor [`Client.destroy()`](Client.md#clientdestroyerror-callback) are called, then the [**processing**](/docs/docs/api/Client.md#processing) machine will trigger a *keepalive* event transitioning the `Client` back to the [**pending**](/docs/docs/api/Client.md#pending) state. During this time, the `Client` is waiting for the socket connection to timeout, and once it does, it triggers the *timeout* event and transitions to the [**idle**](/docs/docs/api/Client.md#idle) state.
|
|
80
80
|
|
|
81
81
|
#### busy
|
|
82
82
|
|
|
83
|
-
This sub-state is only entered when a request body is an instance of [Stream](https://nodejs.org/api/stream.html) and requires draining. The `Client` cannot process additional requests while in this state and must wait until the currently processing request body is completely drained before transitioning back to [**processing.running**](#running).
|
|
83
|
+
This sub-state is only entered when a request body is an instance of [Stream](https://nodejs.org/api/stream.html) and requires draining. The `Client` cannot process additional requests while in this state and must wait until the currently processing request body is completely drained before transitioning back to [**processing.running**](/docs/docs/api/Client.md#running).
|
|
84
84
|
|
|
85
85
|
#### closing
|
|
86
86
|
|
|
87
|
-
This sub-state is only entered when a `Client` instance has queued requests and the [`Client.close()`](Client.md#clientclosecallback) method is called. In this state, the `Client` instance continues to process requests as usual, with the one exception that no additional requests can be queued. Once all of the queued requests are processed, the `Client` will trigger the *done* event gracefully entering the [**destroyed**](#destroyed) state without an error.
|
|
87
|
+
This sub-state is only entered when a `Client` instance has queued requests and the [`Client.close()`](/docs/docs/api/Client.md#clientclosecallback) method is called. In this state, the `Client` instance continues to process requests as usual, with the one exception that no additional requests can be queued. Once all of the queued requests are processed, the `Client` will trigger the *done* event gracefully entering the [**destroyed**](/docs/docs/api/Client.md#destroyed) state without an error.
|
|
88
88
|
|
|
89
89
|
### destroyed
|
|
90
90
|
|
|
@@ -1,28 +1,26 @@
|
|
|
1
1
|
'use strict'
|
|
2
2
|
|
|
3
3
|
const { Writable } = require('node:stream')
|
|
4
|
+
const { nowAbsolute } = require('../util/timers.js')
|
|
4
5
|
|
|
5
6
|
/**
|
|
7
|
+
* @typedef {import('../../types/cache-interceptor.d.ts').default.CacheKey} CacheKey
|
|
8
|
+
* @typedef {import('../../types/cache-interceptor.d.ts').default.CacheValue} CacheValue
|
|
6
9
|
* @typedef {import('../../types/cache-interceptor.d.ts').default.CacheStore} CacheStore
|
|
10
|
+
* @typedef {import('../../types/cache-interceptor.d.ts').default.GetResult} GetResult
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
/**
|
|
7
14
|
* @implements {CacheStore}
|
|
8
|
-
*
|
|
9
|
-
* @typedef {{
|
|
10
|
-
* locked: boolean
|
|
11
|
-
* opts: import('../../types/cache-interceptor.d.ts').default.CachedResponse
|
|
12
|
-
* body?: Buffer[]
|
|
13
|
-
* }} MemoryStoreValue
|
|
14
15
|
*/
|
|
15
16
|
class MemoryCacheStore {
|
|
16
17
|
#maxCount = Infinity
|
|
17
|
-
|
|
18
|
+
#maxSize = Infinity
|
|
18
19
|
#maxEntrySize = Infinity
|
|
19
20
|
|
|
20
|
-
#
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
* @type {Map<string, Map<string, MemoryStoreValue[]>>}
|
|
24
|
-
*/
|
|
25
|
-
#data = new Map()
|
|
21
|
+
#size = 0
|
|
22
|
+
#count = 0
|
|
23
|
+
#entries = new Map()
|
|
26
24
|
|
|
27
25
|
/**
|
|
28
26
|
* @param {import('../../types/cache-interceptor.d.ts').default.MemoryCacheStoreOpts | undefined} [opts]
|
|
@@ -44,6 +42,17 @@ class MemoryCacheStore {
|
|
|
44
42
|
this.#maxCount = opts.maxCount
|
|
45
43
|
}
|
|
46
44
|
|
|
45
|
+
if (opts.maxSize !== undefined) {
|
|
46
|
+
if (
|
|
47
|
+
typeof opts.maxSize !== 'number' ||
|
|
48
|
+
!Number.isInteger(opts.maxSize) ||
|
|
49
|
+
opts.maxSize < 0
|
|
50
|
+
) {
|
|
51
|
+
throw new TypeError('MemoryCacheStore options.maxSize must be a non-negative integer')
|
|
52
|
+
}
|
|
53
|
+
this.#maxSize = opts.maxSize
|
|
54
|
+
}
|
|
55
|
+
|
|
47
56
|
if (opts.maxEntrySize !== undefined) {
|
|
48
57
|
if (
|
|
49
58
|
typeof opts.maxEntrySize !== 'number' ||
|
|
@@ -57,12 +66,8 @@ class MemoryCacheStore {
|
|
|
57
66
|
}
|
|
58
67
|
}
|
|
59
68
|
|
|
60
|
-
get isFull () {
|
|
61
|
-
return this.#entryCount >= this.#maxCount
|
|
62
|
-
}
|
|
63
|
-
|
|
64
69
|
/**
|
|
65
|
-
* @param {import('../../types/cache-interceptor.d.ts').default.CacheKey}
|
|
70
|
+
* @param {import('../../types/cache-interceptor.d.ts').default.CacheKey} req
|
|
66
71
|
* @returns {import('../../types/cache-interceptor.d.ts').default.GetResult | undefined}
|
|
67
72
|
*/
|
|
68
73
|
get (key) {
|
|
@@ -70,256 +75,107 @@ class MemoryCacheStore {
|
|
|
70
75
|
throw new TypeError(`expected key to be object, got ${typeof key}`)
|
|
71
76
|
}
|
|
72
77
|
|
|
73
|
-
const
|
|
74
|
-
if (!values) {
|
|
75
|
-
return undefined
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
const value = this.#findValue(key, values)
|
|
79
|
-
|
|
80
|
-
if (!value || value.locked) {
|
|
81
|
-
return undefined
|
|
82
|
-
}
|
|
78
|
+
const topLevelKey = `${key.origin}:${key.path}`
|
|
83
79
|
|
|
84
|
-
|
|
80
|
+
const now = nowAbsolute()
|
|
81
|
+
const entry = this.#entries.get(topLevelKey)?.find((entry) => (
|
|
82
|
+
entry.deleteAt > now &&
|
|
83
|
+
entry.method === key.method &&
|
|
84
|
+
(entry.vary == null || Object.keys(entry.vary).every(headerName => entry.vary[headerName] === key.headers?.[headerName]))
|
|
85
|
+
))
|
|
86
|
+
|
|
87
|
+
return entry == null
|
|
88
|
+
? undefined
|
|
89
|
+
: {
|
|
90
|
+
statusMessage: entry.statusMessage,
|
|
91
|
+
statusCode: entry.statusCode,
|
|
92
|
+
rawHeaders: entry.rawHeaders,
|
|
93
|
+
body: entry.body,
|
|
94
|
+
etag: entry.etag,
|
|
95
|
+
cachedAt: entry.cachedAt,
|
|
96
|
+
staleAt: entry.staleAt,
|
|
97
|
+
deleteAt: entry.deleteAt
|
|
98
|
+
}
|
|
85
99
|
}
|
|
86
100
|
|
|
87
101
|
/**
|
|
88
102
|
* @param {import('../../types/cache-interceptor.d.ts').default.CacheKey} key
|
|
89
|
-
* @param {import('../../types/cache-interceptor.d.ts').default.
|
|
103
|
+
* @param {import('../../types/cache-interceptor.d.ts').default.CacheValue} val
|
|
90
104
|
* @returns {Writable | undefined}
|
|
91
105
|
*/
|
|
92
|
-
createWriteStream (key,
|
|
106
|
+
createWriteStream (key, val) {
|
|
93
107
|
if (typeof key !== 'object') {
|
|
94
108
|
throw new TypeError(`expected key to be object, got ${typeof key}`)
|
|
95
109
|
}
|
|
96
|
-
if (typeof
|
|
97
|
-
throw new TypeError(`expected value to be object, got ${typeof
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
if (this.isFull) {
|
|
101
|
-
return undefined
|
|
110
|
+
if (typeof val !== 'object') {
|
|
111
|
+
throw new TypeError(`expected value to be object, got ${typeof val}`)
|
|
102
112
|
}
|
|
103
113
|
|
|
104
|
-
const
|
|
105
|
-
|
|
106
|
-
/**
|
|
107
|
-
* @type {(MemoryStoreValue & { index: number }) | undefined}
|
|
108
|
-
*/
|
|
109
|
-
let value = this.#findValue(key, values)
|
|
110
|
-
let valueIndex = value?.index
|
|
111
|
-
if (!value) {
|
|
112
|
-
// The value doesn't already exist, meaning we haven't cached this
|
|
113
|
-
// response before. Let's assign it a value and insert it into our data
|
|
114
|
-
// property.
|
|
115
|
-
|
|
116
|
-
if (this.isFull) {
|
|
117
|
-
// Or not, we don't have space to add another response
|
|
118
|
-
return undefined
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
this.#entryCount++
|
|
122
|
-
|
|
123
|
-
value = {
|
|
124
|
-
locked: true,
|
|
125
|
-
opts
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
// We want to sort our responses in decending order by their deleteAt
|
|
129
|
-
// timestamps so that deleting expired responses is faster
|
|
130
|
-
if (
|
|
131
|
-
values.length === 0 ||
|
|
132
|
-
opts.deleteAt < values[values.length - 1].deleteAt
|
|
133
|
-
) {
|
|
134
|
-
// Our value is either the only response for this path or our deleteAt
|
|
135
|
-
// time is sooner than all the other responses
|
|
136
|
-
values.push(value)
|
|
137
|
-
valueIndex = values.length - 1
|
|
138
|
-
} else if (opts.deleteAt >= values[0].deleteAt) {
|
|
139
|
-
// Our deleteAt is later than everyone elses
|
|
140
|
-
values.unshift(value)
|
|
141
|
-
valueIndex = 0
|
|
142
|
-
} else {
|
|
143
|
-
// We're neither in the front or the end, let's just binary search to
|
|
144
|
-
// find our stop we need to be in
|
|
145
|
-
let startIndex = 0
|
|
146
|
-
let endIndex = values.length
|
|
147
|
-
while (true) {
|
|
148
|
-
if (startIndex === endIndex) {
|
|
149
|
-
values.splice(startIndex, 0, value)
|
|
150
|
-
break
|
|
151
|
-
}
|
|
152
|
-
|
|
153
|
-
const middleIndex = Math.floor((startIndex + endIndex) / 2)
|
|
154
|
-
const middleValue = values[middleIndex]
|
|
155
|
-
if (opts.deleteAt === middleIndex) {
|
|
156
|
-
values.splice(middleIndex, 0, value)
|
|
157
|
-
valueIndex = middleIndex
|
|
158
|
-
break
|
|
159
|
-
} else if (opts.deleteAt > middleValue.opts.deleteAt) {
|
|
160
|
-
endIndex = middleIndex
|
|
161
|
-
continue
|
|
162
|
-
} else {
|
|
163
|
-
startIndex = middleIndex
|
|
164
|
-
continue
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
}
|
|
168
|
-
} else {
|
|
169
|
-
// Check if there's already another request writing to the value or
|
|
170
|
-
// a request reading from it
|
|
171
|
-
if (value.locked) {
|
|
172
|
-
return undefined
|
|
173
|
-
}
|
|
174
|
-
|
|
175
|
-
// Empty it so we can overwrite it
|
|
176
|
-
value.body = []
|
|
177
|
-
}
|
|
114
|
+
const topLevelKey = `${key.origin}:${key.path}`
|
|
178
115
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
* @type {Buffer[] | null}
|
|
182
|
-
*/
|
|
183
|
-
let body = key.method !== 'HEAD' ? [] : null
|
|
184
|
-
const maxEntrySize = this.#maxEntrySize
|
|
116
|
+
const store = this
|
|
117
|
+
const entry = { ...key, ...val, body: [], size: 0 }
|
|
185
118
|
|
|
186
|
-
|
|
119
|
+
return new Writable({
|
|
187
120
|
write (chunk, encoding, callback) {
|
|
188
|
-
if (key.method === 'HEAD') {
|
|
189
|
-
throw new Error('HEAD request shouldn\'t have a body')
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
if (!body) {
|
|
193
|
-
return callback()
|
|
194
|
-
}
|
|
195
|
-
|
|
196
121
|
if (typeof chunk === 'string') {
|
|
197
122
|
chunk = Buffer.from(chunk, encoding)
|
|
198
123
|
}
|
|
199
124
|
|
|
200
|
-
|
|
125
|
+
entry.size += chunk.byteLength
|
|
201
126
|
|
|
202
|
-
if (
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
return callback()
|
|
127
|
+
if (entry.size >= store.#maxEntrySize) {
|
|
128
|
+
this.destroy()
|
|
129
|
+
} else {
|
|
130
|
+
entry.body.push(chunk)
|
|
207
131
|
}
|
|
208
132
|
|
|
209
|
-
|
|
210
|
-
callback()
|
|
133
|
+
callback(null)
|
|
211
134
|
},
|
|
212
135
|
final (callback) {
|
|
213
|
-
|
|
214
|
-
if (
|
|
215
|
-
|
|
136
|
+
let entries = store.#entries.get(topLevelKey)
|
|
137
|
+
if (!entries) {
|
|
138
|
+
entries = []
|
|
139
|
+
store.#entries.set(topLevelKey, entries)
|
|
140
|
+
}
|
|
141
|
+
entries.push(entry)
|
|
142
|
+
|
|
143
|
+
store.#size += entry.size
|
|
144
|
+
store.#count += 1
|
|
145
|
+
|
|
146
|
+
if (store.#size > store.#maxSize || store.#count > store.#maxCount) {
|
|
147
|
+
for (const [key, entries] of store.#entries) {
|
|
148
|
+
for (const entry of entries.splice(0, entries.length / 2)) {
|
|
149
|
+
store.#size -= entry.size
|
|
150
|
+
store.#count -= 1
|
|
151
|
+
}
|
|
152
|
+
if (entries.length === 0) {
|
|
153
|
+
store.#entries.delete(key)
|
|
154
|
+
}
|
|
155
|
+
}
|
|
216
156
|
}
|
|
217
157
|
|
|
218
|
-
callback()
|
|
158
|
+
callback(null)
|
|
219
159
|
}
|
|
220
160
|
})
|
|
221
|
-
|
|
222
|
-
return writable
|
|
223
161
|
}
|
|
224
162
|
|
|
225
163
|
/**
|
|
226
|
-
* @param {
|
|
164
|
+
* @param {CacheKey} key
|
|
227
165
|
*/
|
|
228
166
|
delete (key) {
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
/**
|
|
233
|
-
* Gets all of the requests of the same origin, path, and method. Does not
|
|
234
|
-
* take the `vary` property into account.
|
|
235
|
-
* @param {import('../../types/cache-interceptor.d.ts').default.CacheKey} key
|
|
236
|
-
* @param {boolean} [makeIfDoesntExist=false]
|
|
237
|
-
* @returns {MemoryStoreValue[] | undefined}
|
|
238
|
-
*/
|
|
239
|
-
#getValuesForRequest (key, makeIfDoesntExist) {
|
|
240
|
-
// https://www.rfc-editor.org/rfc/rfc9111.html#section-2-3
|
|
241
|
-
const topLevelKey = `${key.origin}:${key.path}`
|
|
242
|
-
let cachedPaths = this.#data.get(topLevelKey)
|
|
243
|
-
if (!cachedPaths) {
|
|
244
|
-
if (!makeIfDoesntExist) {
|
|
245
|
-
return undefined
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
cachedPaths = new Map()
|
|
249
|
-
this.#data.set(topLevelKey, cachedPaths)
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
let value = cachedPaths.get(key.method)
|
|
253
|
-
if (!value && makeIfDoesntExist) {
|
|
254
|
-
value = []
|
|
255
|
-
cachedPaths.set(key.method, value)
|
|
167
|
+
if (typeof key !== 'object') {
|
|
168
|
+
throw new TypeError(`expected key to be object, got ${typeof key}`)
|
|
256
169
|
}
|
|
257
170
|
|
|
258
|
-
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
/**
|
|
262
|
-
* Given a list of values of a certain request, this decides the best value
|
|
263
|
-
* to respond with.
|
|
264
|
-
* @param {import('../../types/cache-interceptor.d.ts').default.CacheKey} req
|
|
265
|
-
* @param {MemoryStoreValue[]} values
|
|
266
|
-
* @returns {(MemoryStoreValue & { index: number }) | undefined}
|
|
267
|
-
*/
|
|
268
|
-
#findValue (req, values) {
|
|
269
|
-
/**
|
|
270
|
-
* @type {MemoryStoreValue | undefined}
|
|
271
|
-
*/
|
|
272
|
-
let value
|
|
273
|
-
const now = Date.now()
|
|
274
|
-
for (let i = values.length - 1; i >= 0; i--) {
|
|
275
|
-
const current = values[i]
|
|
276
|
-
const currentCacheValue = current.opts
|
|
277
|
-
if (now >= currentCacheValue.deleteAt) {
|
|
278
|
-
// We've reached expired values, let's delete them
|
|
279
|
-
this.#entryCount -= values.length - i
|
|
280
|
-
values.length = i
|
|
281
|
-
break
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
let matches = true
|
|
285
|
-
|
|
286
|
-
if (currentCacheValue.vary) {
|
|
287
|
-
if (!req.headers) {
|
|
288
|
-
matches = false
|
|
289
|
-
break
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
for (const key in currentCacheValue.vary) {
|
|
293
|
-
if (currentCacheValue.vary[key] !== req.headers[key]) {
|
|
294
|
-
matches = false
|
|
295
|
-
break
|
|
296
|
-
}
|
|
297
|
-
}
|
|
298
|
-
}
|
|
171
|
+
const topLevelKey = `${key.origin}:${key.path}`
|
|
299
172
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
index: i
|
|
304
|
-
}
|
|
305
|
-
break
|
|
306
|
-
}
|
|
173
|
+
for (const entry of this.#entries.get(topLevelKey) ?? []) {
|
|
174
|
+
this.#size -= entry.size
|
|
175
|
+
this.#count -= 1
|
|
307
176
|
}
|
|
308
|
-
|
|
309
|
-
return value
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
/**
|
|
314
|
-
* @param {any[]} array Array to modify
|
|
315
|
-
* @param {number} idx Index to delete
|
|
316
|
-
*/
|
|
317
|
-
function shiftAtIndex (array, idx) {
|
|
318
|
-
for (let i = idx + 1; idx < array.length; i++) {
|
|
319
|
-
array[i - 1] = array[i]
|
|
177
|
+
this.#entries.delete(topLevelKey)
|
|
320
178
|
}
|
|
321
|
-
|
|
322
|
-
array.length--
|
|
323
179
|
}
|
|
324
180
|
|
|
325
181
|
module.exports = MemoryCacheStore
|
|
@@ -4,8 +4,10 @@ const util = require('../core/util')
|
|
|
4
4
|
const DecoratorHandler = require('../handler/decorator-handler')
|
|
5
5
|
const {
|
|
6
6
|
parseCacheControlHeader,
|
|
7
|
-
parseVaryHeader
|
|
7
|
+
parseVaryHeader,
|
|
8
|
+
isEtagUsable
|
|
8
9
|
} = require('../util/cache')
|
|
10
|
+
const { nowAbsolute } = require('../util/timers.js')
|
|
9
11
|
|
|
10
12
|
function noop () {}
|
|
11
13
|
|
|
@@ -121,7 +123,7 @@ class CacheHandler extends DecoratorHandler {
|
|
|
121
123
|
return downstreamOnHeaders()
|
|
122
124
|
}
|
|
123
125
|
|
|
124
|
-
const now =
|
|
126
|
+
const now = nowAbsolute()
|
|
125
127
|
const staleAt = determineStaleAt(now, headers, cacheControlDirectives)
|
|
126
128
|
if (staleAt) {
|
|
127
129
|
const varyDirectives = this.#cacheKey.headers && headers.vary
|
|
@@ -135,7 +137,10 @@ class CacheHandler extends DecoratorHandler {
|
|
|
135
137
|
cacheControlDirectives
|
|
136
138
|
)
|
|
137
139
|
|
|
138
|
-
|
|
140
|
+
/**
|
|
141
|
+
* @type {import('../../types/cache-interceptor.d.ts').default.CacheValue}
|
|
142
|
+
*/
|
|
143
|
+
const value = {
|
|
139
144
|
statusCode,
|
|
140
145
|
statusMessage,
|
|
141
146
|
rawHeaders: strippedHeaders,
|
|
@@ -143,7 +148,13 @@ class CacheHandler extends DecoratorHandler {
|
|
|
143
148
|
cachedAt: now,
|
|
144
149
|
staleAt,
|
|
145
150
|
deleteAt
|
|
146
|
-
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
if (typeof headers.etag === 'string' && isEtagUsable(headers.etag)) {
|
|
154
|
+
value.etag = headers.etag
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
this.#writeStream = this.#store.createWriteStream(this.#cacheKey, value)
|
|
147
158
|
|
|
148
159
|
if (this.#writeStream) {
|
|
149
160
|
const handler = this
|
|
@@ -300,7 +311,7 @@ function determineStaleAt (now, headers, cacheControlDirectives) {
|
|
|
300
311
|
// https://www.rfc-editor.org/rfc/rfc9111.html#section-5.3
|
|
301
312
|
const expiresDate = new Date(headers.expire)
|
|
302
313
|
if (expiresDate instanceof Date && !isNaN(expiresDate)) {
|
|
303
|
-
return now + (
|
|
314
|
+
return now + (nowAbsolute() - expiresDate.getTime())
|
|
304
315
|
}
|
|
305
316
|
}
|
|
306
317
|
|