opal-up 0.0.4 → 0.0.5

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Authored by Alex Hultman, 2018-2020.
2
+ * Authored by Alex Hultman, 2018-2024.
3
3
  * Intellectual property of third-party.
4
4
 
5
5
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,21 +20,22 @@
20
20
 
21
21
  // todo: HttpParser is in need of a few clean-ups and refactorings
22
22
 
23
- /* The HTTP parser is an independent module subject to unit testing / fuzz testing */
23
+ /* The HTTP parser is an independent module subject to unit testing / fuzz
24
+ * testing */
24
25
 
25
- #include <string>
26
- #include <cstring>
26
+ #include "ChunkedEncoding.h"
27
+ #include "MoveOnlyFunction.h"
27
28
  #include <algorithm>
28
29
  #include <climits>
29
- #include <string_view>
30
+ #include <cstring>
30
31
  #include <map>
31
- #include "MoveOnlyFunction.h"
32
- #include "ChunkedEncoding.h"
32
+ #include <string>
33
+ #include <string_view>
33
34
 
34
35
  #include "BloomFilter.h"
36
+ #include "HttpErrors.h"
35
37
  #include "ProxyParser.h"
36
38
  #include "QueryParser.h"
37
- #include "HttpErrors.h"
38
39
 
39
40
  namespace uWS {
40
41
 
@@ -43,638 +44,705 @@ static const unsigned int MINIMUM_HTTP_POST_PADDING = 32;
43
44
  static void *FULLPTR = (void *)~(uintptr_t)0;
44
45
 
45
46
  /* STL needs one of these */
46
- template <typename T>
47
- std::optional<T *> optional_ptr(T *ptr) {
48
- return ptr ? std::optional<T *>(ptr) : std::nullopt;
47
+ template <typename T> std::optional<T *> optional_ptr(T *ptr) {
48
+ return ptr ? std::optional<T *>(ptr) : std::nullopt;
49
49
  }
50
50
 
51
- static const size_t MAX_FALLBACK_SIZE = (size_t) atoi(optional_ptr(getenv("UWS_HTTP_MAX_HEADERS_SIZE")).value_or((char *) "4096"));
51
+ static const size_t MAX_FALLBACK_SIZE = (size_t)atoi(
52
+ optional_ptr(getenv("UWS_HTTP_MAX_HEADERS_SIZE")).value_or((char *)"4096"));
52
53
  #ifndef UWS_HTTP_MAX_HEADERS_COUNT
53
54
  #define UWS_HTTP_MAX_HEADERS_COUNT 100
54
55
  #endif
55
56
 
56
57
  struct HttpRequest {
57
58
 
58
- friend struct HttpParser;
59
+ friend struct HttpParser;
59
60
 
60
61
  private:
61
- struct Header {
62
- std::string_view key, value;
63
- } headers[UWS_HTTP_MAX_HEADERS_COUNT];
64
- bool ancientHttp;
65
- unsigned int querySeparator;
66
- bool didYield;
67
- BloomFilter bf;
68
- std::pair<int, std::string_view *> currentParameters;
69
- std::map<std::string, unsigned short, std::less<>> *currentParameterOffsets = nullptr;
62
+ struct Header {
63
+ std::string_view key, value;
64
+ } headers[UWS_HTTP_MAX_HEADERS_COUNT];
65
+ bool ancientHttp;
66
+ unsigned int querySeparator;
67
+ bool didYield;
68
+ BloomFilter bf;
69
+ std::pair<int, std::string_view *> currentParameters;
70
+ std::map<std::string, unsigned short, std::less<>> *currentParameterOffsets =
71
+ nullptr;
70
72
 
71
73
  public:
72
- bool isAncient() {
73
- return ancientHttp;
74
- }
75
-
76
- bool getYield() {
77
- return didYield;
78
- }
79
-
80
- /* Iteration over headers (key, value) */
81
- struct HeaderIterator {
82
- Header *ptr;
83
-
84
- bool operator!=(const HeaderIterator &other) const {
85
- /* Comparison with end is a special case */
86
- if (ptr != other.ptr) {
87
- return other.ptr || ptr->key.length();
88
- }
89
- return false;
90
- }
91
-
92
- HeaderIterator &operator++() {
93
- ptr++;
94
- return *this;
95
- }
74
+ bool isAncient() { return ancientHttp; }
96
75
 
97
- std::pair<std::string_view, std::string_view> operator*() const {
98
- return {ptr->key, ptr->value};
99
- }
100
- };
101
-
102
- HeaderIterator begin() {
103
- return {headers + 1};
104
- }
76
+ bool getYield() { return didYield; }
105
77
 
106
- HeaderIterator end() {
107
- return {nullptr};
108
- }
78
+ /* Iteration over headers (key, value) */
79
+ struct HeaderIterator {
80
+ Header *ptr;
109
81
 
110
- /* If you do not want to handle this route */
111
- void setYield(bool yield) {
112
- didYield = yield;
113
- }
114
-
115
- std::string_view getHeader(std::string_view lowerCasedHeader) {
116
- if (bf.mightHave(lowerCasedHeader)) {
117
- for (Header *h = headers; (++h)->key.length(); ) {
118
- if (h->key.length() == lowerCasedHeader.length() && !strncmp(h->key.data(), lowerCasedHeader.data(), lowerCasedHeader.length())) {
119
- return h->value;
120
- }
121
- }
122
- }
123
- return std::string_view(nullptr, 0);
82
+ bool operator!=(const HeaderIterator &other) const {
83
+ /* Comparison with end is a special case */
84
+ if (ptr != other.ptr) {
85
+ return other.ptr || ptr->key.length();
86
+ }
87
+ return false;
124
88
  }
125
89
 
126
- std::string_view getUrl() {
127
- return std::string_view(headers->value.data(), querySeparator);
90
+ HeaderIterator &operator++() {
91
+ ptr++;
92
+ return *this;
128
93
  }
129
94
 
130
- std::string_view getFullUrl() {
131
- return std::string_view(headers->value.data(), headers->value.length());
95
+ std::pair<std::string_view, std::string_view> operator*() const {
96
+ return {ptr->key, ptr->value};
132
97
  }
98
+ };
133
99
 
134
- /* Hack: this should be getMethod */
135
- std::string_view getCaseSensitiveMethod() {
136
- return std::string_view(headers->key.data(), headers->key.length());
137
- }
100
+ HeaderIterator begin() { return {headers + 1}; }
138
101
 
139
- std::string_view getMethod() {
140
- /* Compatibility hack: lower case method (todo: remove when major version bumps) */
141
- for (unsigned int i = 0; i < headers->key.length(); i++) {
142
- ((char *) headers->key.data())[i] |= 32;
143
- }
102
+ HeaderIterator end() { return {nullptr}; }
144
103
 
145
- return std::string_view(headers->key.data(), headers->key.length());
146
- }
104
+ /* If you do not want to handle this route */
105
+ void setYield(bool yield) { didYield = yield; }
147
106
 
148
- /* Returns the raw querystring as a whole, still encoded */
149
- std::string_view getQuery() {
150
- if (querySeparator < headers->value.length()) {
151
- /* Strip the initial ? */
152
- return std::string_view(headers->value.data() + querySeparator + 1, headers->value.length() - querySeparator - 1);
153
- } else {
154
- return std::string_view(nullptr, 0);
107
+ std::string_view getHeader(std::string_view lowerCasedHeader) {
108
+ if (bf.mightHave(lowerCasedHeader)) {
109
+ for (Header *h = headers; (++h)->key.length();) {
110
+ if (h->key.length() == lowerCasedHeader.length() &&
111
+ !strncmp(h->key.data(), lowerCasedHeader.data(),
112
+ lowerCasedHeader.length())) {
113
+ return h->value;
155
114
  }
115
+ }
156
116
  }
157
-
158
- /* Finds and decodes the URI component. */
159
- std::string_view getQuery(std::string_view key) {
160
- /* Raw querystring including initial '?' sign */
161
- std::string_view queryString = std::string_view(headers->value.data() + querySeparator, headers->value.length() - querySeparator);
162
-
163
- return getDecodedQueryValue(key, queryString);
117
+ return std::string_view(nullptr, 0);
118
+ }
119
+
120
+ std::string_view getUrl() {
121
+ return std::string_view(headers->value.data(), querySeparator);
122
+ }
123
+
124
+ std::string_view getFullUrl() {
125
+ return std::string_view(headers->value.data(), headers->value.length());
126
+ }
127
+
128
+ /* Hack: this should be getMethod */
129
+ std::string_view getCaseSensitiveMethod() {
130
+ return std::string_view(headers->key.data(), headers->key.length());
131
+ }
132
+
133
+ std::string_view getMethod() {
134
+ /* Compatibility hack: lower case method (todo: remove when major version
135
+ * bumps) */
136
+ for (unsigned int i = 0; i < headers->key.length(); i++) {
137
+ ((char *)headers->key.data())[i] |= 32;
164
138
  }
165
139
 
166
- void setParameters(std::pair<int, std::string_view *> parameters) {
167
- currentParameters = parameters;
140
+ return std::string_view(headers->key.data(), headers->key.length());
141
+ }
142
+
143
+ /* Returns the raw querystring as a whole, still encoded */
144
+ std::string_view getQuery() {
145
+ if (querySeparator < headers->value.length()) {
146
+ /* Strip the initial ? */
147
+ return std::string_view(headers->value.data() + querySeparator + 1,
148
+ headers->value.length() - querySeparator - 1);
149
+ } else {
150
+ return std::string_view(nullptr, 0);
168
151
  }
169
-
170
- void setParameterOffsets(std::map<std::string, unsigned short, std::less<>> *offsets) {
171
- currentParameterOffsets = offsets;
152
+ }
153
+
154
+ /* Finds and decodes the URI component. */
155
+ std::string_view getQuery(std::string_view key) {
156
+ /* Raw querystring including initial '?' sign */
157
+ std::string_view queryString =
158
+ std::string_view(headers->value.data() + querySeparator,
159
+ headers->value.length() - querySeparator);
160
+
161
+ return getDecodedQueryValue(key, queryString);
162
+ }
163
+
164
+ void setParameters(std::pair<int, std::string_view *> parameters) {
165
+ currentParameters = parameters;
166
+ }
167
+
168
+ void setParameterOffsets(
169
+ std::map<std::string, unsigned short, std::less<>> *offsets) {
170
+ currentParameterOffsets = offsets;
171
+ }
172
+
173
+ std::string_view getParameter(std::string_view name) {
174
+ if (!currentParameterOffsets) {
175
+ return {nullptr, 0};
172
176
  }
173
-
174
- std::string_view getParameter(std::string_view name) {
175
- if (!currentParameterOffsets) {
176
- return {nullptr, 0};
177
- }
178
- auto it = currentParameterOffsets->find(name);
179
- if (it == currentParameterOffsets->end()) {
180
- return {nullptr, 0};
181
- }
182
- return getParameter(it->second);
177
+ auto it = currentParameterOffsets->find(name);
178
+ if (it == currentParameterOffsets->end()) {
179
+ return {nullptr, 0};
183
180
  }
184
-
185
- std::string_view getParameter(unsigned short index) {
186
- if (currentParameters.first < (int) index) {
187
- return {};
188
- } else {
189
- return currentParameters.second[index];
190
- }
181
+ return getParameter(it->second);
182
+ }
183
+
184
+ std::string_view getParameter(unsigned short index) {
185
+ if (currentParameters.first < (int)index) {
186
+ return {};
187
+ } else {
188
+ return currentParameters.second[index];
191
189
  }
192
-
190
+ }
193
191
  };
194
192
 
195
193
  struct HttpParser {
196
194
 
197
195
  private:
198
- std::string fallback;
199
- /* This guy really has only 30 bits since we reserve two highest bits to chunked encoding parsing state */
200
- uint64_t remainingStreamingBytes = 0;
201
-
202
- /* Returns UINT_MAX on error. Maximum 999999999 is allowed. */
203
- static uint64_t toUnsignedInteger(std::string_view str) {
204
- /* We assume at least 64-bit integer giving us safely 999999999999999999 (18 number of 9s) */
205
- if (str.length() > 18) {
206
- return UINT_MAX;
207
- }
196
+ std::string fallback;
197
+ /* This guy really has only 30 bits since we reserve two highest bits to
198
+ * chunked encoding parsing state */
199
+ uint64_t remainingStreamingBytes = 0;
200
+
201
+ /* Returns UINT64_MAX on error. Maximum 999999999 is allowed. */
202
+ static uint64_t toUnsignedInteger(std::string_view str) {
203
+ /* We assume at least 64-bit integer giving us safely 999999999999999999 (18
204
+ * number of 9s) */
205
+ if (str.length() > 18) {
206
+ return UINT64_MAX;
207
+ }
208
208
 
209
- uint64_t unsignedIntegerValue = 0;
210
- for (char c : str) {
211
- /* As long as the letter is 0-9 we cannot overflow. */
212
- if (c < '0' || c > '9') {
213
- return UINT_MAX;
214
- }
215
- unsignedIntegerValue = unsignedIntegerValue * 10ull + ((unsigned int) c - (unsigned int) '0');
216
- }
217
- return unsignedIntegerValue;
209
+ uint64_t unsignedIntegerValue = 0;
210
+ for (char c : str) {
211
+ /* As long as the letter is 0-9 we cannot overflow. */
212
+ if (c < '0' || c > '9') {
213
+ return UINT64_MAX;
214
+ }
215
+ unsignedIntegerValue =
216
+ unsignedIntegerValue * 10ull + ((unsigned int)c - (unsigned int)'0');
218
217
  }
219
-
220
- /* RFC 9110 16.3.1 Field Name Registry (TLDR; alnum + hyphen is allowed)
221
- * [...] It MUST conform to the field-name syntax defined in Section 5.1,
222
- * and it SHOULD be restricted to just letters, digits,
223
- * and hyphen ('-') characters, with the first character being a letter. */
224
- static inline bool isFieldNameByte(unsigned char x) {
225
- return (x == '-') |
226
- ((x > '/') & (x < ':')) |
227
- ((x > '@') & (x < '[')) |
228
- ((x > 96) & (x < '{'));
218
+ return unsignedIntegerValue;
219
+ }
220
+
221
+ /* RFC 9110 5.6.2. Tokens */
222
+ static inline bool isFieldNameByte(unsigned char c) {
223
+ return (c > 32) & (c < 127) & (c != '(') & (c != ')') & (c != ',') &
224
+ (c != '/') & (c != ':') & (c != ';') & (c != '<') & (c != '=') &
225
+ (c != '>') & (c != '?') & (c != '@') & (c != '[') & (c != '\\') &
226
+ (c != ']') & (c != '{') & (c != '}');
227
+ }
228
+
229
+ static inline uint64_t hasLess(uint64_t x, uint64_t n) {
230
+ return (((x) - ~0ULL / 255 * (n)) & ~(x) & ~0ULL / 255 * 128);
231
+ }
232
+
233
+ static inline uint64_t hasMore(uint64_t x, uint64_t n) {
234
+ return ((((x) + ~0ULL / 255 * (127 - (n))) | (x)) & ~0ULL / 255 * 128);
235
+ }
236
+
237
+ static inline uint64_t hasBetween(uint64_t x, uint64_t m, uint64_t n) {
238
+ return (((~0ULL / 255 * (127 + (n)) - ((x) & ~0ULL / 255 * 127)) & ~(x) &
239
+ (((x) & ~0ULL / 255 * 127) + ~0ULL / 255 * (127 - (m)))) &
240
+ ~0ULL / 255 * 128);
241
+ }
242
+
243
+ static inline bool notFieldNameWord(uint64_t x) {
244
+ return hasLess(x, '-') | hasBetween(x, '-', '0') | hasBetween(x, '9', 'A') |
245
+ hasBetween(x, 'Z', 'a') | hasMore(x, 'z');
246
+ }
247
+
248
+ static inline void *consumeFieldName(char *p) {
249
+ // for (; true; p += 8) {
250
+ // uint64_t word;
251
+ // memcpy(&word, p, sizeof(uint64_t));
252
+ // if (notFieldNameWord(word)) {
253
+ while (isFieldNameByte(*(unsigned char *)p)) {
254
+ *(p++) |= 0x20;
229
255
  }
230
-
231
- static inline uint64_t hasLess(uint64_t x, uint64_t n) {
232
- return (((x)-~0ULL/255*(n))&~(x)&~0ULL/255*128);
256
+ return (void *)p;
257
+ //}
258
+ // word |= 0x2020202020202020ull;
259
+ // memcpy(p, &word, sizeof(uint64_t));
260
+ //}
261
+ }
262
+
263
+ /* Puts method as key, target as value and returns non-null (or nullptr on
264
+ * error). */
265
+ static inline char *consumeRequestLine(char *data,
266
+ HttpRequest::Header &header) {
267
+ /* Scan until single SP, assume next is / (origin request) */
268
+ char *start = data;
269
+ /* This catches the post padded CR and fails */
270
+ while (data[0] > 32)
271
+ data++;
272
+ if (data[0] == 32 && data[1] == '/') {
273
+ header.key = {start, (size_t)(data - start)};
274
+ data++;
275
+ /* Scan for less than 33 (catches post padded CR and fails) */
276
+ start = data;
277
+ for (; true; data += 8) {
278
+ uint64_t word;
279
+ memcpy(&word, data, sizeof(uint64_t));
280
+ if (hasLess(word, 33)) {
281
+ while (*(unsigned char *)data > 32)
282
+ data++;
283
+ /* Now we stand on space */
284
+ header.value = {start, (size_t)(data - start)};
285
+ /* Check that the following is http 1.1 */
286
+ if (memcmp(" HTTP/1.1\r\n", data, 11) == 0) {
287
+ return data + 11;
288
+ }
289
+ return nullptr;
290
+ }
291
+ }
233
292
  }
234
-
235
- static inline uint64_t hasMore(uint64_t x, uint64_t n) {
236
- return (( ((x)+~0ULL/255*(127-(n))) |(x))&~0ULL/255*128);
293
+ return nullptr;
294
+ }
295
+
296
+ /* RFC 9110: 5.5 Field Values (TLDR; anything above 31 is allowed; htab (9) is
297
+ * also allowed) Field values are usually constrained to the range of US-ASCII
298
+ * characters [...] Field values containing CR, LF, or NUL characters are
299
+ * invalid and dangerous [...] Field values containing other CTL characters
300
+ * are also invalid. */
301
+ static inline void *tryConsumeFieldValue(char *p) {
302
+ for (; true; p += 8) {
303
+ uint64_t word;
304
+ memcpy(&word, p, sizeof(uint64_t));
305
+ if (hasLess(word, 32)) {
306
+ while (*(unsigned char *)p > 31)
307
+ p++;
308
+ return (void *)p;
309
+ }
237
310
  }
238
-
239
- static inline uint64_t hasBetween(uint64_t x, uint64_t m, uint64_t n) {
240
- return (( (~0ULL/255*(127+(n))-((x)&~0ULL/255*127)) &~(x)& (((x)&~0ULL/255*127)+~0ULL/255*(127-(m))) )&~0ULL/255*128);
311
+ }
312
+
313
+ /* End is only used for the proxy parser. The HTTP parser recognizes "\ra" as
314
+ * invalid "\r\n" scan and breaks. */
315
+ static unsigned int getHeaders(char *postPaddedBuffer, char *end,
316
+ struct HttpRequest::Header *headers,
317
+ void *reserved, unsigned int &err) {
318
+ char *preliminaryKey, *preliminaryValue, *start = postPaddedBuffer;
319
+
320
+ #ifdef UWS_WITH_PROXY
321
+ /* ProxyParser is passed as reserved parameter */
322
+ ProxyParser *pp = (ProxyParser *)reserved;
323
+
324
+ /* Parse PROXY protocol */
325
+ auto [done, offset] =
326
+ pp->parse({postPaddedBuffer, (size_t)(end - postPaddedBuffer)});
327
+ if (!done) {
328
+ /* We do not reset the ProxyParser (on filure) since it is tied to this
329
+ * connection, which is really only supposed to ever get one PROXY frame
330
+ * anyways. We do however allow multiple PROXY frames to be sent
331
+ * (overwrites former). */
332
+ return 0;
333
+ } else {
334
+ /* We have consumed this data so skip it */
335
+ postPaddedBuffer += offset;
241
336
  }
337
+ #else
338
+ /* This one is unused */
339
+ (void)reserved;
340
+ (void)end;
341
+ #endif
242
342
 
243
- static inline bool notFieldNameWord(uint64_t x) {
244
- return hasLess(x, '-') |
245
- hasBetween(x, '-', '0') |
246
- hasBetween(x, '9', 'A') |
247
- hasBetween(x, 'Z', 'a') |
248
- hasMore(x, 'z');
343
+ /* It is critical for fallback buffering logic that we only return with
344
+ * success if we managed to parse a complete HTTP request (minus data).
345
+ * Returning success for PROXY means we can end up succeeding, yet leaving
346
+ * bytes in the fallback buffer which is then removed, and our counters to
347
+ * flip due to overflow and we end up with a crash */
348
+
349
+ /* The request line is different from the field names / field values */
350
+ if (!(postPaddedBuffer =
351
+ consumeRequestLine(postPaddedBuffer, headers[0]))) {
352
+ /* Error - invalid request line */
353
+ /* Assuming it is 505 HTTP Version Not Supported */
354
+ err = HTTP_ERROR_505_HTTP_VERSION_NOT_SUPPORTED;
355
+ return 0;
249
356
  }
250
-
251
- static inline void *consumeFieldName(char *p) {
252
- for (; true; p += 8) {
253
- uint64_t word;
254
- memcpy(&word, p, sizeof(uint64_t));
255
- if (notFieldNameWord(word)) {
256
- while (isFieldNameByte(*(unsigned char *)p)) {
257
- *(p++) |= 0x20;
258
- }
259
- return (void *)p;
260
- }
261
- word |= 0x2020202020202020ull;
262
- memcpy(p, &word, sizeof(uint64_t));
357
+ headers++;
358
+
359
+ for (unsigned int i = 1; i < UWS_HTTP_MAX_HEADERS_COUNT - 1; i++) {
360
+ /* Lower case and consume the field name */
361
+ preliminaryKey = postPaddedBuffer;
362
+ postPaddedBuffer = (char *)consumeFieldName(postPaddedBuffer);
363
+ headers->key = std::string_view(
364
+ preliminaryKey, (size_t)(postPaddedBuffer - preliminaryKey));
365
+
366
+ /* We should not accept whitespace between key and colon, so colon must
367
+ * foloow immediately */
368
+ if (postPaddedBuffer[0] != ':') {
369
+ /* Error: invalid chars in field name */
370
+ return 0;
371
+ }
372
+ postPaddedBuffer++;
373
+
374
+ preliminaryValue = postPaddedBuffer;
375
+ /* The goal of this call is to find next "\r\n", or any invalid field
376
+ * value chars, fast */
377
+ while (true) {
378
+ postPaddedBuffer = (char *)tryConsumeFieldValue(postPaddedBuffer);
379
+ /* If this is not CR then we caught some stinky invalid char on the way
380
+ */
381
+ if (postPaddedBuffer[0] != '\r') {
382
+ /* If TAB then keep searching */
383
+ if (postPaddedBuffer[0] == '\t') {
384
+ postPaddedBuffer++;
385
+ continue;
386
+ }
387
+ /* Error - invalid chars in field value */
388
+ return 0;
263
389
  }
264
- }
265
-
266
- /* Puts method as key, target as value and returns non-null (or nullptr on error). */
267
- static inline char *consumeRequestLine(char *data, HttpRequest::Header &header) {
268
- /* Scan until single SP, assume next is / (origin request) */
269
- char *start = data;
270
- /* This catches the post padded CR and fails */
271
- while (data[0] > 32) data++;
272
- if (data[0] == 32 && data[1] == '/') {
273
- header.key = {start, (size_t) (data - start)};
274
- data++;
275
- /* Scan for less than 33 (catches post padded CR and fails) */
276
- start = data;
277
- for (; true; data += 8) {
278
- uint64_t word;
279
- memcpy(&word, data, sizeof(uint64_t));
280
- if (hasLess(word, 33)) {
281
- while (*(unsigned char *)data > 32) data++;
282
- /* Now we stand on space */
283
- header.value = {start, (size_t) (data - start)};
284
- /* Check that the following is http 1.1 */
285
- if (memcmp(" HTTP/1.1\r\n", data, 11) == 0) {
286
- return data + 11;
287
- }
288
- return nullptr;
289
- }
290
- }
390
+ break;
391
+ }
392
+ /* We fence end[0] with \r, followed by end[1] being something that is
393
+ * "not \n", to signify "not found". This way we can have this one single
394
+ * check to see if we found \r\n WITHIN our allowed search space. */
395
+ if (postPaddedBuffer[1] == '\n') {
396
+ /* Store this header, it is valid */
397
+ headers->value = std::string_view(
398
+ preliminaryValue, (size_t)(postPaddedBuffer - preliminaryValue));
399
+ postPaddedBuffer += 2;
400
+
401
+ /* Trim trailing whitespace (SP, HTAB) */
402
+ while (headers->value.length() && headers->value.back() < 33) {
403
+ headers->value.remove_suffix(1);
291
404
  }
292
- return nullptr;
293
- }
294
405
 
295
- /* RFC 9110: 5.5 Field Values (TLDR; anything above 31 is allowed; htab (9) is also allowed)
296
- * Field values are usually constrained to the range of US-ASCII characters [...]
297
- * Field values containing CR, LF, or NUL characters are invalid and dangerous [...]
298
- * Field values containing other CTL characters are also invalid. */
299
- static inline void *tryConsumeFieldValue(char *p) {
300
- for (; true; p += 8) {
301
- uint64_t word;
302
- memcpy(&word, p, sizeof(uint64_t));
303
- if (hasLess(word, 32)) {
304
- while (*(unsigned char *)p > 31) p++;
305
- return (void *)p;
306
- }
406
+ /* Trim initial whitespace (SP, HTAB) */
407
+ while (headers->value.length() && headers->value.front() < 33) {
408
+ headers->value.remove_prefix(1);
307
409
  }
308
- }
309
410
 
310
- /* End is only used for the proxy parser. The HTTP parser recognizes "\ra" as invalid "\r\n" scan and breaks. */
311
- static unsigned int getHeaders(char *postPaddedBuffer, char *end, struct HttpRequest::Header *headers, void *reserved, unsigned int &err) {
312
- char *preliminaryKey, *preliminaryValue, *start = postPaddedBuffer;
313
-
314
- #ifdef UWS_WITH_PROXY
315
- /* ProxyParser is passed as reserved parameter */
316
- ProxyParser *pp = (ProxyParser *) reserved;
317
-
318
- /* Parse PROXY protocol */
319
- auto [done, offset] = pp->parse({postPaddedBuffer, (size_t) (end - postPaddedBuffer)});
320
- if (!done) {
321
- /* We do not reset the ProxyParser (on filure) since it is tied to this
322
- * connection, which is really only supposed to ever get one PROXY frame
323
- * anyways. We do however allow multiple PROXY frames to be sent (overwrites former). */
324
- return 0;
325
- } else {
326
- /* We have consumed this data so skip it */
327
- postPaddedBuffer += offset;
328
- }
329
- #else
330
- /* This one is unused */
331
- (void) reserved;
332
- (void) end;
333
- #endif
334
-
335
- /* It is critical for fallback buffering logic that we only return with success
336
- * if we managed to parse a complete HTTP request (minus data). Returning success
337
- * for PROXY means we can end up succeeding, yet leaving bytes in the fallback buffer
338
- * which is then removed, and our counters to flip due to overflow and we end up with a crash */
339
-
340
- /* The request line is different from the field names / field values */
341
- if (!(postPaddedBuffer = consumeRequestLine(postPaddedBuffer, headers[0]))) {
342
- /* Error - invalid request line */
343
- /* Assuming it is 505 HTTP Version Not Supported */
344
- err = HTTP_ERROR_505_HTTP_VERSION_NOT_SUPPORTED;
345
- return 0;
346
- }
347
411
  headers++;
348
412
 
349
- for (unsigned int i = 1; i < UWS_HTTP_MAX_HEADERS_COUNT - 1; i++) {
350
- /* Lower case and consume the field name */
351
- preliminaryKey = postPaddedBuffer;
352
- postPaddedBuffer = (char *) consumeFieldName(postPaddedBuffer);
353
- headers->key = std::string_view(preliminaryKey, (size_t) (postPaddedBuffer - preliminaryKey));
354
-
355
- /* We should not accept whitespace between key and colon, so colon must foloow immediately */
356
- if (postPaddedBuffer[0] != ':') {
357
- /* Error: invalid chars in field name */
358
- return 0;
359
- }
360
- postPaddedBuffer++;
361
-
362
- preliminaryValue = postPaddedBuffer;
363
- /* The goal of this call is to find next "\r\n", or any invalid field value chars, fast */
364
- while (true) {
365
- postPaddedBuffer = (char *) tryConsumeFieldValue(postPaddedBuffer);
366
- /* If this is not CR then we caught some stinky invalid char on the way */
367
- if (postPaddedBuffer[0] != '\r') {
368
- /* If TAB then keep searching */
369
- if (postPaddedBuffer[0] == '\t') {
370
- postPaddedBuffer++;
371
- continue;
372
- }
373
- /* Error - invalid chars in field value */
374
- return 0;
375
- }
376
- break;
377
- }
378
- /* We fence end[0] with \r, followed by end[1] being something that is "not \n", to signify "not found".
379
- * This way we can have this one single check to see if we found \r\n WITHIN our allowed search space. */
380
- if (postPaddedBuffer[1] == '\n') {
381
- /* Store this header, it is valid */
382
- headers->value = std::string_view(preliminaryValue, (size_t) (postPaddedBuffer - preliminaryValue));
383
- postPaddedBuffer += 2;
384
-
385
- /* Trim trailing whitespace (SP, HTAB) */
386
- while (headers->value.length() && headers->value.back() < 33) {
387
- headers->value.remove_suffix(1);
388
- }
389
-
390
- /* Trim initial whitespace (SP, HTAB) */
391
- while (headers->value.length() && headers->value.front() < 33) {
392
- headers->value.remove_prefix(1);
393
- }
394
-
395
- headers++;
396
-
397
- /* We definitely have at least one header (or request line), so check if we are done */
398
- if (*postPaddedBuffer == '\r') {
399
- if (postPaddedBuffer[1] == '\n') {
400
- /* This cann take the very last header space */
401
- headers->key = std::string_view(nullptr, 0);
402
- return (unsigned int) ((postPaddedBuffer + 2) - start);
403
- } else {
404
- /* \r\n\r plus non-\n letter is malformed request, or simply out of search space */
405
- return 0;
406
- }
407
- }
408
- } else {
409
- /* We are either out of search space or this is a malformed request */
410
- return 0;
411
- }
413
+ /* We definitely have at least one header (or request line), so check if
414
+ * we are done */
415
+ if (*postPaddedBuffer == '\r') {
416
+ if (postPaddedBuffer[1] == '\n') {
417
+ /* This cann take the very last header space */
418
+ headers->key = std::string_view(nullptr, 0);
419
+ return (unsigned int)((postPaddedBuffer + 2) - start);
420
+ } else {
421
+ /* \r\n\r plus non-\n letter is malformed request, or simply out of
422
+ * search space */
423
+ return 0;
424
+ }
412
425
  }
413
- /* We ran out of header space, too large request */
426
+ } else {
427
+ /* We are either out of search space or this is a malformed request */
414
428
  return 0;
429
+ }
415
430
  }
431
+ /* We ran out of header space, too large request */
432
+ return 0;
433
+ }
434
+
435
+ /* This is the only caller of getHeaders and is thus the deepest part of the
436
+ * parser. From here we return either [consumed, user] for "keep going", or
437
+ * [consumed, nullptr] for "break; I am closed or upgraded to websocket" or
438
+ * [whatever, fullptr] for "break and close me, I am a parser error!" */
439
+ template <int CONSUME_MINIMALLY>
440
+ std::pair<unsigned int, void *> fenceAndConsumePostPadded(
441
+ char *data, unsigned int length, void *user, void *reserved,
442
+ HttpRequest *req,
443
+ MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler,
444
+ MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
445
+
446
+ /* How much data we CONSUMED (to throw away) */
447
+ unsigned int consumedTotal = 0;
448
+ unsigned int err = 0;
449
+
450
+ /* Fence two bytes past end of our buffer (buffer has post padded margins).
451
+ * This is to always catch scan for \r but not for \r\n. */
452
+ data[length] = '\r';
453
+ data[length + 1] =
454
+ 'a'; /* Anything that is not \n, to trigger "invalid request" */
455
+
456
+ for (unsigned int consumed;
457
+ length && (consumed = getHeaders(data, data + length, req->headers,
458
+ reserved, err));) {
459
+ data += consumed;
460
+ length -= consumed;
461
+ consumedTotal += consumed;
462
+
463
+ /* Store HTTP version (ancient 1.0 or 1.1) */
464
+ req->ancientHttp = false;
465
+
466
+ /* Add all headers to bloom filter */
467
+ req->bf.reset();
468
+ for (HttpRequest::Header *h = req->headers; (++h)->key.length();) {
469
+ req->bf.add(h->key);
470
+ }
471
+
472
+ /* Break if no host header (but we can have empty string which is
473
+ * different from nullptr) */
474
+ if (!req->getHeader("host").data()) {
475
+ return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
476
+ }
477
+
478
+ /* RFC 9112 6.3
479
+ * If a message is received with both a Transfer-Encoding and a
480
+ * Content-Length header field, the Transfer-Encoding overrides the
481
+ * Content-Length. Such a message might indicate an attempt to perform
482
+ * request smuggling (Section 11.2) or response splitting (Section 11.1)
483
+ * and ought to be handled as an error. */
484
+ std::string_view transferEncodingString =
485
+ req->getHeader("transfer-encoding");
486
+ std::string_view contentLengthString = req->getHeader("content-length");
487
+ if (transferEncodingString.length() && contentLengthString.length()) {
488
+ /* Returning fullptr is the same as calling the errorHandler */
489
+ /* We could be smart and set an error in the context along with this, to
490
+ * indicate what http error response we might want to return */
491
+ return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
492
+ }
493
+
494
+ /* Parse query */
495
+ const char *querySeparatorPtr = (const char *)memchr(
496
+ req->headers->value.data(), '?', req->headers->value.length());
497
+ req->querySeparator =
498
+ (unsigned int)((querySeparatorPtr
499
+ ? querySeparatorPtr
500
+ : req->headers->value.data() +
501
+ req->headers->value.length()) -
502
+ req->headers->value.data());
503
+
504
+ /* If returned socket is not what we put in we need
505
+ * to break here as we either have upgraded to
506
+ * WebSockets or otherwise closed the socket. */
507
+ void *returnedUser = requestHandler(user, req);
508
+ if (returnedUser != user) {
509
+ /* We are upgraded to WebSocket or otherwise broken */
510
+ return {consumedTotal, returnedUser};
511
+ }
512
+
513
+ /* The rules at play here according to RFC 9112 for requests are
514
+ * essentially: If both content-length and transfer-encoding then invalid
515
+ * message; must break. If has transfer-encoding then must be chunked
516
+ * regardless of value. If content-length then fixed length even if 0. If
517
+ * none of the above then fixed length is 0. */
518
+
519
+ /* RFC 9112 6.3
520
+ * If a message is received with both a Transfer-Encoding and a
521
+ * Content-Length header field, the Transfer-Encoding overrides the
522
+ * Content-Length. */
523
+ if (transferEncodingString.length()) {
524
+
525
+ /* If a proxy sent us the transfer-encoding header that 100% means it
526
+ * must be chunked or else the proxy is not RFC 9112 compliant.
527
+ * Therefore it is always better to assume this is the case, since that
528
+ * entirely eliminates all forms of transfer-encoding obfuscation
529
+ * tricks. We just rely on the header. */
530
+
531
+ /* RFC 9112 6.3
532
+ * If a Transfer-Encoding header field is present in a request and the
533
+ * chunked transfer coding is not the final encoding, the message body
534
+ * length cannot be determined reliably; the server MUST respond with
535
+ * the 400 (Bad Request) status code and then close the connection. */
536
+
537
+ /* In this case we fail later by having the wrong interpretation
538
+ * (assuming chunked). This could be made stricter but makes no
539
+ * difference either way, unless forwarding the identical message as a
540
+ * proxy. */
541
+
542
+ remainingStreamingBytes = STATE_IS_CHUNKED;
543
+ /* If consume minimally, we do not want to consume anything but we want
544
+ * to mark this as being chunked */
545
+ if (!CONSUME_MINIMALLY) {
546
+ /* Go ahead and parse it (todo: better heuristics for emitting FIN to
547
+ * the app level) */
548
+ std::string_view dataToConsume(data, length);
549
+ for (auto chunk :
550
+ uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
551
+ dataHandler(user, chunk, chunk.length() == 0);
552
+ }
553
+ if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) {
554
+ return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
555
+ }
556
+ unsigned int consumed =
557
+ (length - (unsigned int)dataToConsume.length());
558
+ data = (char *)dataToConsume.data();
559
+ length = (unsigned int)dataToConsume.length();
560
+ consumedTotal += consumed;
561
+ }
562
+ } else if (contentLengthString.length()) {
563
+ remainingStreamingBytes = toUnsignedInteger(contentLengthString);
564
+ if (remainingStreamingBytes == UINT64_MAX) {
565
+ /* Parser error */
566
+ return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
567
+ }
416
568
 
417
- /* This is the only caller of getHeaders and is thus the deepest part of the parser.
418
- * From here we return either [consumed, user] for "keep going",
419
- * or [consumed, nullptr] for "break; I am closed or upgraded to websocket"
420
- * or [whatever, fullptr] for "break and close me, I am a parser error!" */
421
- template <int CONSUME_MINIMALLY>
422
- std::pair<unsigned int, void *> fenceAndConsumePostPadded(char *data, unsigned int length, void *user, void *reserved, HttpRequest *req, MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
423
-
424
- /* How much data we CONSUMED (to throw away) */
425
- unsigned int consumedTotal = 0;
426
- unsigned int err = 0;
427
-
428
- /* Fence two bytes past end of our buffer (buffer has post padded margins).
429
- * This is to always catch scan for \r but not for \r\n. */
430
- data[length] = '\r';
431
- data[length + 1] = 'a'; /* Anything that is not \n, to trigger "invalid request" */
432
-
433
- for (unsigned int consumed; length && (consumed = getHeaders(data, data + length, req->headers, reserved, err)); ) {
434
- data += consumed;
435
- length -= consumed;
436
- consumedTotal += consumed;
437
-
438
- /* Store HTTP version (ancient 1.0 or 1.1) */
439
- req->ancientHttp = false;
440
-
441
- /* Add all headers to bloom filter */
442
- req->bf.reset();
443
- for (HttpRequest::Header *h = req->headers; (++h)->key.length(); ) {
444
- req->bf.add(h->key);
445
- }
446
-
447
- /* Break if no host header (but we can have empty string which is different from nullptr) */
448
- if (!req->getHeader("host").data()) {
449
- return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
450
- }
451
-
452
- /* RFC 9112 6.3
453
- * If a message is received with both a Transfer-Encoding and a Content-Length header field,
454
- * the Transfer-Encoding overrides the Content-Length. Such a message might indicate an attempt
455
- * to perform request smuggling (Section 11.2) or response splitting (Section 11.1) and
456
- * ought to be handled as an error. */
457
- std::string_view transferEncodingString = req->getHeader("transfer-encoding");
458
- std::string_view contentLengthString = req->getHeader("content-length");
459
- if (transferEncodingString.length() && contentLengthString.length()) {
460
- /* Returning fullptr is the same as calling the errorHandler */
461
- /* We could be smart and set an error in the context along with this, to indicate what
462
- * http error response we might want to return */
463
- return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
464
- }
465
-
466
- /* Parse query */
467
- const char *querySeparatorPtr = (const char *) memchr(req->headers->value.data(), '?', req->headers->value.length());
468
- req->querySeparator = (unsigned int) ((querySeparatorPtr ? querySeparatorPtr : req->headers->value.data() + req->headers->value.length()) - req->headers->value.data());
469
-
470
- /* If returned socket is not what we put in we need
471
- * to break here as we either have upgraded to
472
- * WebSockets or otherwise closed the socket. */
473
- void *returnedUser = requestHandler(user, req);
474
- if (returnedUser != user) {
475
- /* We are upgraded to WebSocket or otherwise broken */
476
- return {consumedTotal, returnedUser};
477
- }
478
-
479
- /* The rules at play here according to RFC 9112 for requests are essentially:
480
- * If both content-length and transfer-encoding then invalid message; must break.
481
- * If has transfer-encoding then must be chunked regardless of value.
482
- * If content-length then fixed length even if 0.
483
- * If none of the above then fixed length is 0. */
484
-
485
- /* RFC 9112 6.3
486
- * If a message is received with both a Transfer-Encoding and a Content-Length header field,
487
- * the Transfer-Encoding overrides the Content-Length. */
488
- if (transferEncodingString.length()) {
489
-
490
- /* If a proxy sent us the transfer-encoding header that 100% means it must be chunked or else the proxy is
491
- * not RFC 9112 compliant. Therefore it is always better to assume this is the case, since that entirely eliminates
492
- * all forms of transfer-encoding obfuscation tricks. We just rely on the header. */
493
-
494
- /* RFC 9112 6.3
495
- * If a Transfer-Encoding header field is present in a request and the chunked transfer coding is not the
496
- * final encoding, the message body length cannot be determined reliably; the server MUST respond with the
497
- * 400 (Bad Request) status code and then close the connection. */
498
-
499
- /* In this case we fail later by having the wrong interpretation (assuming chunked).
500
- * This could be made stricter but makes no difference either way, unless forwarding the identical message as a proxy. */
501
-
502
- remainingStreamingBytes = STATE_IS_CHUNKED;
503
- /* If consume minimally, we do not want to consume anything but we want to mark this as being chunked */
504
- if (!CONSUME_MINIMALLY) {
505
- /* Go ahead and parse it (todo: better heuristics for emitting FIN to the app level) */
506
- std::string_view dataToConsume(data, length);
507
- for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
508
- dataHandler(user, chunk, chunk.length() == 0);
509
- }
510
- if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) {
511
- return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
512
- }
513
- unsigned int consumed = (length - (unsigned int) dataToConsume.length());
514
- data = (char *) dataToConsume.data();
515
- length = (unsigned int) dataToConsume.length();
516
- consumedTotal += consumed;
517
- }
518
- } else if (contentLengthString.length()) {
519
- remainingStreamingBytes = toUnsignedInteger(contentLengthString);
520
- if (remainingStreamingBytes == UINT_MAX) {
521
- /* Parser error */
522
- return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
523
- }
524
-
525
- if (!CONSUME_MINIMALLY) {
526
- unsigned int emittable = (unsigned int) std::min<uint64_t>(remainingStreamingBytes, length);
527
- dataHandler(user, std::string_view(data, emittable), emittable == remainingStreamingBytes);
528
- remainingStreamingBytes -= emittable;
529
-
530
- data += emittable;
531
- length -= emittable;
532
- consumedTotal += emittable;
533
- }
534
- } else {
535
- /* If we came here without a body; emit an empty data chunk to signal no data */
536
- dataHandler(user, {}, true);
537
- }
569
+ if (!CONSUME_MINIMALLY) {
570
+ unsigned int emittable =
571
+ (unsigned int)std::min<uint64_t>(remainingStreamingBytes, length);
572
+ dataHandler(user, std::string_view(data, emittable),
573
+ emittable == remainingStreamingBytes);
574
+ remainingStreamingBytes -= emittable;
538
575
 
539
- /* Consume minimally should break as easrly as possible */
540
- if (CONSUME_MINIMALLY) {
541
- break;
542
- }
576
+ data += emittable;
577
+ length -= emittable;
578
+ consumedTotal += emittable;
543
579
  }
544
- /* Whenever we return FULLPTR, the interpretation of "consumed" should be the HttpError enum. */
545
- if (err) {
546
- return {err, FULLPTR};
547
- }
548
- return {consumedTotal, user};
580
+ } else {
581
+ /* If we came here without a body; emit an empty data chunk to signal no
582
+ * data */
583
+ dataHandler(user, {}, true);
584
+ }
585
+
586
+ /* Consume minimally should break as easrly as possible */
587
+ if (CONSUME_MINIMALLY) {
588
+ break;
589
+ }
590
+ }
591
+ /* Whenever we return FULLPTR, the interpretation of "consumed" should be
592
+ * the HttpError enum. */
593
+ if (err) {
594
+ return {err, FULLPTR};
549
595
  }
596
+ return {consumedTotal, user};
597
+ }
550
598
 
551
599
  public:
552
- std::pair<unsigned int, void *> consumePostPadded(char *data, unsigned int length, void *user, void *reserved, MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
553
-
554
- /* This resets BloomFilter by construction, but later we also reset it again.
555
- * Optimize this to skip resetting twice (req could be made global) */
556
- HttpRequest req;
557
-
558
- if (remainingStreamingBytes) {
559
-
560
- /* It's either chunked or with a content-length */
561
- if (isParsingChunkedEncoding(remainingStreamingBytes)) {
562
- std::string_view dataToConsume(data, length);
563
- for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
564
- dataHandler(user, chunk, chunk.length() == 0);
565
- }
566
- if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) {
567
- return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
568
- }
569
- data = (char *) dataToConsume.data();
570
- length = (unsigned int) dataToConsume.length();
571
- } else {
572
- // this is exactly the same as below!
573
- // todo: refactor this
574
- if (remainingStreamingBytes >= length) {
575
- void *returnedUser = dataHandler(user, std::string_view(data, length), remainingStreamingBytes == length);
576
- remainingStreamingBytes -= length;
577
- return {0, returnedUser};
578
- } else {
579
- void *returnedUser = dataHandler(user, std::string_view(data, remainingStreamingBytes), true);
580
-
581
- data += (unsigned int) remainingStreamingBytes;
582
- length -= (unsigned int) remainingStreamingBytes;
583
-
584
- remainingStreamingBytes = 0;
585
-
586
- if (returnedUser != user) {
587
- return {0, returnedUser};
588
- }
589
- }
590
- }
600
+ std::pair<unsigned int, void *> consumePostPadded(
601
+ char *data, unsigned int length, void *user, void *reserved,
602
+ MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler,
603
+ MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
604
+
605
+ /* This resets BloomFilter by construction, but later we also reset it
606
+ * again. Optimize this to skip resetting twice (req could be made global)
607
+ */
608
+ HttpRequest req;
609
+
610
+ if (remainingStreamingBytes) {
611
+
612
+ /* It's either chunked or with a content-length */
613
+ if (isParsingChunkedEncoding(remainingStreamingBytes)) {
614
+ std::string_view dataToConsume(data, length);
615
+ for (auto chunk :
616
+ uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
617
+ dataHandler(user, chunk, chunk.length() == 0);
618
+ }
619
+ if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) {
620
+ return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
621
+ }
622
+ data = (char *)dataToConsume.data();
623
+ length = (unsigned int)dataToConsume.length();
624
+ } else {
625
+ // this is exactly the same as below!
626
+ // todo: refactor this
627
+ if (remainingStreamingBytes >= length) {
628
+ void *returnedUser = dataHandler(user, std::string_view(data, length),
629
+ remainingStreamingBytes == length);
630
+ remainingStreamingBytes -= length;
631
+ return {0, returnedUser};
632
+ } else {
633
+ void *returnedUser = dataHandler(
634
+ user, std::string_view(data, remainingStreamingBytes), true);
591
635
 
592
- } else if (fallback.length()) {
593
- unsigned int had = (unsigned int) fallback.length();
636
+ data += (unsigned int)remainingStreamingBytes;
637
+ length -= (unsigned int)remainingStreamingBytes;
594
638
 
595
- size_t maxCopyDistance = std::min<size_t>(MAX_FALLBACK_SIZE - fallback.length(), (size_t) length);
639
+ remainingStreamingBytes = 0;
596
640
 
597
- /* We don't want fallback to be short string optimized, since we want to move it */
598
- fallback.reserve(fallback.length() + maxCopyDistance + std::max<unsigned int>(MINIMUM_HTTP_POST_PADDING, sizeof(std::string)));
599
- fallback.append(data, maxCopyDistance);
641
+ if (returnedUser != user) {
642
+ return {0, returnedUser};
643
+ }
644
+ }
645
+ }
646
+
647
+ } else if (fallback.length()) {
648
+ unsigned int had = (unsigned int)fallback.length();
649
+
650
+ size_t maxCopyDistance = std::min<size_t>(
651
+ MAX_FALLBACK_SIZE - fallback.length(), (size_t)length);
652
+
653
+ /* We don't want fallback to be short string optimized, since we want to
654
+ * move it */
655
+ fallback.reserve(fallback.length() + maxCopyDistance +
656
+ std::max<unsigned int>(MINIMUM_HTTP_POST_PADDING,
657
+ sizeof(std::string)));
658
+ fallback.append(data, maxCopyDistance);
659
+
660
+ // break here on break
661
+ std::pair<unsigned int, void *> consumed =
662
+ fenceAndConsumePostPadded<true>(
663
+ fallback.data(), (unsigned int)fallback.length(), user, reserved,
664
+ &req, requestHandler, dataHandler);
665
+ if (consumed.second != user) {
666
+ return consumed;
667
+ }
668
+
669
+ if (consumed.first) {
670
+
671
+ /* This logic assumes that we consumed everything in fallback buffer.
672
+ * This is critically important, as we will get an integer overflow in
673
+ * case of "had" being larger than what we consumed, and that we would
674
+ * drop data */
675
+ fallback.clear();
676
+ data += consumed.first - had;
677
+ length -= consumed.first - had;
600
678
 
601
- // break here on break
602
- std::pair<unsigned int, void *> consumed = fenceAndConsumePostPadded<true>(fallback.data(), (unsigned int) fallback.length(), user, reserved, &req, requestHandler, dataHandler);
603
- if (consumed.second != user) {
604
- return consumed;
679
+ if (remainingStreamingBytes) {
680
+ /* It's either chunked or with a content-length */
681
+ if (isParsingChunkedEncoding(remainingStreamingBytes)) {
682
+ std::string_view dataToConsume(data, length);
683
+ for (auto chunk :
684
+ uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
685
+ dataHandler(user, chunk, chunk.length() == 0);
686
+ }
687
+ if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) {
688
+ return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
605
689
  }
690
+ data = (char *)dataToConsume.data();
691
+ length = (unsigned int)dataToConsume.length();
692
+ } else {
693
+ // this is exactly the same as above!
694
+ if (remainingStreamingBytes >= (unsigned int)length) {
695
+ void *returnedUser =
696
+ dataHandler(user, std::string_view(data, length),
697
+ remainingStreamingBytes == (unsigned int)length);
698
+ remainingStreamingBytes -= length;
699
+ return {0, returnedUser};
700
+ } else {
701
+ void *returnedUser = dataHandler(
702
+ user, std::string_view(data, remainingStreamingBytes), true);
606
703
 
607
- if (consumed.first) {
608
-
609
- /* This logic assumes that we consumed everything in fallback buffer.
610
- * This is critically important, as we will get an integer overflow in case
611
- * of "had" being larger than what we consumed, and that we would drop data */
612
- fallback.clear();
613
- data += consumed.first - had;
614
- length -= consumed.first - had;
615
-
616
- if (remainingStreamingBytes) {
617
- /* It's either chunked or with a content-length */
618
- if (isParsingChunkedEncoding(remainingStreamingBytes)) {
619
- std::string_view dataToConsume(data, length);
620
- for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
621
- dataHandler(user, chunk, chunk.length() == 0);
622
- }
623
- if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) {
624
- return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR};
625
- }
626
- data = (char *) dataToConsume.data();
627
- length = (unsigned int) dataToConsume.length();
628
- } else {
629
- // this is exactly the same as above!
630
- if (remainingStreamingBytes >= (unsigned int) length) {
631
- void *returnedUser = dataHandler(user, std::string_view(data, length), remainingStreamingBytes == (unsigned int) length);
632
- remainingStreamingBytes -= length;
633
- return {0, returnedUser};
634
- } else {
635
- void *returnedUser = dataHandler(user, std::string_view(data, remainingStreamingBytes), true);
636
-
637
- data += (unsigned int) remainingStreamingBytes;
638
- length -= (unsigned int) remainingStreamingBytes;
639
-
640
- remainingStreamingBytes = 0;
641
-
642
- if (returnedUser != user) {
643
- return {0, returnedUser};
644
- }
645
- }
646
- }
647
- }
704
+ data += (unsigned int)remainingStreamingBytes;
705
+ length -= (unsigned int)remainingStreamingBytes;
648
706
 
649
- } else {
650
- if (fallback.length() == MAX_FALLBACK_SIZE) {
651
- return {HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, FULLPTR};
652
- }
653
- return {0, user};
707
+ remainingStreamingBytes = 0;
708
+
709
+ if (returnedUser != user) {
710
+ return {0, returnedUser};
711
+ }
654
712
  }
713
+ }
655
714
  }
656
715
 
657
- std::pair<unsigned int, void *> consumed = fenceAndConsumePostPadded<false>(data, length, user, reserved, &req, requestHandler, dataHandler);
658
- if (consumed.second != user) {
659
- return consumed;
716
+ } else {
717
+ if (fallback.length() == MAX_FALLBACK_SIZE) {
718
+ return {HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, FULLPTR};
660
719
  }
720
+ return {0, user};
721
+ }
722
+ }
661
723
 
662
- data += consumed.first;
663
- length -= consumed.first;
724
+ std::pair<unsigned int, void *> consumed = fenceAndConsumePostPadded<false>(
725
+ data, length, user, reserved, &req, requestHandler, dataHandler);
726
+ if (consumed.second != user) {
727
+ return consumed;
728
+ }
664
729
 
665
- if (length) {
666
- if (length < MAX_FALLBACK_SIZE) {
667
- fallback.append(data, length);
668
- } else {
669
- return {HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, FULLPTR};
670
- }
671
- }
730
+ data += consumed.first;
731
+ length -= consumed.first;
672
732
 
673
- // added for now
674
- return {0, user};
733
+ if (length) {
734
+ if (length < MAX_FALLBACK_SIZE) {
735
+ fallback.append(data, length);
736
+ } else {
737
+ return {HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, FULLPTR};
738
+ }
675
739
  }
740
+
741
+ // added for now
742
+ return {0, user};
743
+ }
676
744
  };
677
745
 
678
- }
746
+ } // namespace uWS
679
747
 
680
748
  #endif // UWS_HTTPPARSER_H