kcar 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.document +7 -0
- data/.gitignore +20 -0
- data/COPYING +339 -0
- data/GIT-VERSION-GEN +40 -0
- data/GNUmakefile +197 -0
- data/LICENSE +55 -0
- data/README +86 -0
- data/Rakefile +149 -0
- data/TODO +6 -0
- data/ext/kcar/c_util.h +105 -0
- data/ext/kcar/ext_help.h +82 -0
- data/ext/kcar/extconf.rb +14 -0
- data/ext/kcar/kcar.rl +656 -0
- data/ext/kcar/kcar_http_common.rl +56 -0
- data/kcar.gemspec +40 -0
- data/lib/kcar.rb +11 -0
- data/lib/kcar/parser.rb +39 -0
- data/lib/kcar/response.rb +168 -0
- data/setup.rb +1586 -0
- data/test/test_parser.rb +257 -0
- data/test/test_response.rb +415 -0
- metadata +96 -0
data/ext/kcar/extconf.rb
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
# -*- encoding: binary -*-
|
2
|
+
require 'mkmf'
|
3
|
+
|
4
|
+
dir_config("kcar")
|
5
|
+
|
6
|
+
have_macro("SIZEOF_OFF_T", "ruby.h") or check_sizeof("off_t", "sys/types.h")
|
7
|
+
have_macro("SIZEOF_LONG", "ruby.h") or check_sizeof("long", "sys/types.h")
|
8
|
+
have_func("rb_str_set_len", "ruby.h")
|
9
|
+
have_func("rb_str_modify", "ruby.h")
|
10
|
+
|
11
|
+
# -fPIC is needed for Rubinius, MRI already uses it regardless
|
12
|
+
with_cflags($CFLAGS + " -fPIC ") do
|
13
|
+
create_makefile("kcar_ext")
|
14
|
+
end
|
data/ext/kcar/kcar.rl
ADDED
@@ -0,0 +1,656 @@
|
|
1
|
+
/**
|
2
|
+
* Copyright (c) 2009, 2010 Eric Wong (all bugs are Eric's fault)
|
3
|
+
* Copyright (c) 2005 Zed A. Shaw
|
4
|
+
* You can redistribute it and/or modify it under the same terms as Ruby.
|
5
|
+
*/
|
6
|
+
#include "ruby.h"
|
7
|
+
#include "ext_help.h"
|
8
|
+
#include <assert.h>
|
9
|
+
#include <stdlib.h>
|
10
|
+
#include <string.h>
|
11
|
+
#include <sys/types.h>
|
12
|
+
#include "c_util.h"
|
13
|
+
|
14
|
+
static VALUE eParserError;
|
15
|
+
static ID id_sq, id_sq_set;
|
16
|
+
|
17
|
+
/** Defines common length and error messages for input length validation. */
|
18
|
+
#define DEF_MAX_LENGTH(N, length) \
|
19
|
+
static const size_t MAX_##N##_LENGTH = length; \
|
20
|
+
static const char MAX_##N##_LENGTH_ERR[] = \
|
21
|
+
"HTTP element " # N " is longer than the " # length " allowed length."
|
22
|
+
|
23
|
+
/**
|
24
|
+
* Validates the max length of given input and throws an ParserError
|
25
|
+
* exception if over.
|
26
|
+
*/
|
27
|
+
#define VALIDATE_MAX_LENGTH(len, N) do { \
|
28
|
+
if (len > MAX_##N##_LENGTH) \
|
29
|
+
rb_raise(eParserError, MAX_##N##_LENGTH_ERR); \
|
30
|
+
} while (0)
|
31
|
+
|
32
|
+
/* Defines the maximum allowed lengths for various input elements.*/
|
33
|
+
DEF_MAX_LENGTH(FIELD_NAME, 256);
|
34
|
+
DEF_MAX_LENGTH(FIELD_VALUE, 80 * 1024);
|
35
|
+
DEF_MAX_LENGTH(HEADER, (1024 * (80 + 32)));
|
36
|
+
DEF_MAX_LENGTH(REASON, 256);
|
37
|
+
|
38
|
+
#define UH_FL_CHUNKED 0x1
|
39
|
+
#define UH_FL_HASBODY 0x2
|
40
|
+
#define UH_FL_INBODY 0x4
|
41
|
+
#define UH_FL_HASTRAILER 0x8
|
42
|
+
#define UH_FL_INTRAILER 0x10
|
43
|
+
#define UH_FL_INCHUNK 0x20
|
44
|
+
#define UH_FL_KEEPALIVE 0x40
|
45
|
+
|
46
|
+
struct http_parser {
|
47
|
+
int cs; /* Ragel internal state */
|
48
|
+
unsigned int flags;
|
49
|
+
size_t mark;
|
50
|
+
size_t offset;
|
51
|
+
union { /* these 2 fields don't nest */
|
52
|
+
size_t field;
|
53
|
+
size_t query;
|
54
|
+
} start;
|
55
|
+
union {
|
56
|
+
size_t field_len; /* only used during header processing */
|
57
|
+
size_t dest_offset; /* only used during body processing */
|
58
|
+
} s;
|
59
|
+
VALUE cont; /* Qfalse: unset, Qnil: ignored header, T_STRING: append */
|
60
|
+
VALUE status; /* String or Qnil */
|
61
|
+
union {
|
62
|
+
off_t content;
|
63
|
+
off_t chunk;
|
64
|
+
} len;
|
65
|
+
};
|
66
|
+
|
67
|
+
#define REMAINING (unsigned long)(pe - p)
|
68
|
+
#define LEN(AT, FPC) (FPC - buffer - hp->AT)
|
69
|
+
#define MARK(M,FPC) (hp->M = (FPC) - buffer)
|
70
|
+
#define PTR_TO(F) (buffer + hp->F)
|
71
|
+
#define STR_NEW(M,FPC) rb_str_new(PTR_TO(M), LEN(M, FPC))
|
72
|
+
|
73
|
+
#define HP_FL_TEST(hp,fl) ((hp)->flags & (UH_FL_##fl))
|
74
|
+
#define HP_FL_SET(hp,fl) ((hp)->flags |= (UH_FL_##fl))
|
75
|
+
#define HP_FL_UNSET(hp,fl) ((hp)->flags &= ~(UH_FL_##fl))
|
76
|
+
#define HP_FL_ALL(hp,fl) (HP_FL_TEST(hp, fl) == (UH_FL_##fl))
|
77
|
+
|
78
|
+
static void finalize_header(struct http_parser *hp)
|
79
|
+
{
|
80
|
+
if ((HP_FL_TEST(hp, HASTRAILER) && ! HP_FL_TEST(hp, CHUNKED)))
|
81
|
+
rb_raise(eParserError, "trailer but not chunked");
|
82
|
+
}
|
83
|
+
|
84
|
+
/*
|
85
|
+
* handles values of the "Connection:" header, keepalive is implied
|
86
|
+
* for HTTP/1.1 but needs to be explicitly enabled with HTTP/1.0
|
87
|
+
* Additionally, we require GET/HEAD requests to support keepalive.
|
88
|
+
*/
|
89
|
+
static void hp_keepalive_connection(struct http_parser *hp, VALUE val)
|
90
|
+
{
|
91
|
+
/* REQUEST_METHOD is always set before any headers */
|
92
|
+
if (STR_CSTR_CASE_EQ(val, "keep-alive")) {
|
93
|
+
/* basically have HTTP/1.0 masquerade as HTTP/1.1+ */
|
94
|
+
HP_FL_SET(hp, KEEPALIVE);
|
95
|
+
} else if (STR_CSTR_CASE_EQ(val, "close")) {
|
96
|
+
/*
|
97
|
+
* it doesn't matter what HTTP version or request method we have,
|
98
|
+
* if a server says "Connection: close", we disable keepalive
|
99
|
+
*/
|
100
|
+
HP_FL_UNSET(hp, KEEPALIVE);
|
101
|
+
} else {
|
102
|
+
/*
|
103
|
+
* server could've sent anything, ignore it for now. Maybe
|
104
|
+
* "HP_FL_UNSET(hp, KEEPALIVE);" just in case?
|
105
|
+
* Raising an exception might be too mean...
|
106
|
+
*/
|
107
|
+
}
|
108
|
+
}
|
109
|
+
|
110
|
+
static void
|
111
|
+
http_version(struct http_parser *hp, VALUE hdr, const char *ptr, size_t len)
|
112
|
+
{
|
113
|
+
if (CONST_MEM_EQ("HTTP/1.1", ptr, len)) {
|
114
|
+
/* HTTP/1.1 implies keepalive unless "Connection: close" is set */
|
115
|
+
HP_FL_SET(hp, KEEPALIVE);
|
116
|
+
}
|
117
|
+
}
|
118
|
+
|
119
|
+
static void
|
120
|
+
status_phrase(struct http_parser *hp, VALUE hdr, const char *ptr, size_t len)
|
121
|
+
{
|
122
|
+
long nr;
|
123
|
+
|
124
|
+
hp->status = rb_str_new(ptr, len);
|
125
|
+
|
126
|
+
/* RSTRING_PTR is null terminated, ptr is not */
|
127
|
+
nr = strtol(RSTRING_PTR(hp->status), NULL, 10);
|
128
|
+
|
129
|
+
if (nr < 100 || nr > 999)
|
130
|
+
rb_raise(eParserError, "invalid status: %s", RSTRING_PTR(hp->status));
|
131
|
+
|
132
|
+
if ( !((nr >= 100 && nr <= 199) || nr == 204 || nr == 304) )
|
133
|
+
HP_FL_SET(hp, HASBODY);
|
134
|
+
}
|
135
|
+
|
136
|
+
static inline void invalid_if_trailer(struct http_parser *hp)
|
137
|
+
{
|
138
|
+
if (HP_FL_TEST(hp, INTRAILER))
|
139
|
+
rb_raise(eParserError, "invalid Trailer");
|
140
|
+
}
|
141
|
+
|
142
|
+
static void write_cont_value(struct http_parser *hp,
|
143
|
+
char *buffer, const char *p)
|
144
|
+
{
|
145
|
+
char *vptr;
|
146
|
+
|
147
|
+
if (hp->cont == Qfalse)
|
148
|
+
rb_raise(eParserError, "invalid continuation line");
|
149
|
+
|
150
|
+
if (NIL_P(hp->cont))
|
151
|
+
return; /* we're ignoring this header (probably Status:) */
|
152
|
+
|
153
|
+
assert(TYPE(hp->cont) == T_STRING && "continuation line is not a string");
|
154
|
+
assert(hp->mark > 0 && "impossible continuation line offset");
|
155
|
+
|
156
|
+
if (LEN(mark, p) == 0)
|
157
|
+
return;
|
158
|
+
|
159
|
+
if (RSTRING_LEN(hp->cont) > 0)
|
160
|
+
--hp->mark;
|
161
|
+
|
162
|
+
vptr = PTR_TO(mark);
|
163
|
+
|
164
|
+
if (RSTRING_LEN(hp->cont) > 0) {
|
165
|
+
assert((' ' == *vptr || '\t' == *vptr) && "invalid leading white space");
|
166
|
+
*vptr = ' ';
|
167
|
+
}
|
168
|
+
rb_str_buf_cat(hp->cont, vptr, LEN(mark, p));
|
169
|
+
}
|
170
|
+
|
171
|
+
static void write_value(VALUE hdr, struct http_parser *hp,
|
172
|
+
const char *buffer, const char *p)
|
173
|
+
{
|
174
|
+
VALUE f, v;
|
175
|
+
VALUE hclass;
|
176
|
+
const char *fptr = PTR_TO(start.field);
|
177
|
+
long flen = hp->s.field_len;
|
178
|
+
const char *vptr;
|
179
|
+
long vlen;
|
180
|
+
|
181
|
+
/* Rack does not like Status headers, so we never send them */
|
182
|
+
if (CSTR_CASE_EQ(fptr, flen, "status")) {
|
183
|
+
hp->cont = Qnil;
|
184
|
+
return;
|
185
|
+
}
|
186
|
+
|
187
|
+
vptr = PTR_TO(mark);
|
188
|
+
vlen = LEN(mark, p);
|
189
|
+
VALIDATE_MAX_LENGTH(vlen, FIELD_VALUE);
|
190
|
+
VALIDATE_MAX_LENGTH(flen, FIELD_NAME);
|
191
|
+
f = rb_str_new(fptr, flen);
|
192
|
+
v = rb_str_new(vptr, vlen);
|
193
|
+
|
194
|
+
/* needs more tests for error-checking here */
|
195
|
+
/*
|
196
|
+
* TODO:
|
197
|
+
* some of these tests might be too strict for real-world HTTP servers,
|
198
|
+
* report real-world examples as we find them:
|
199
|
+
*/
|
200
|
+
if (STR_CSTR_CASE_EQ(f, "connection")) {
|
201
|
+
hp_keepalive_connection(hp, v);
|
202
|
+
} else if (STR_CSTR_CASE_EQ(f, "content-length")) {
|
203
|
+
if (! HP_FL_TEST(hp, HASBODY))
|
204
|
+
rb_raise(eParserError, "Content-Length with no body expected");
|
205
|
+
if (HP_FL_TEST(hp, CHUNKED))
|
206
|
+
rb_raise(eParserError,
|
207
|
+
"Content-Length when chunked Transfer-Encoding is set");
|
208
|
+
hp->len.content = parse_length(vptr, vlen);
|
209
|
+
|
210
|
+
if (hp->len.content < 0)
|
211
|
+
rb_raise(eParserError, "invalid Content-Length");
|
212
|
+
|
213
|
+
invalid_if_trailer(hp);
|
214
|
+
} else if (STR_CSTR_CASE_EQ(f, "transfer-encoding")) {
|
215
|
+
if (STR_CSTR_CASE_EQ(v, "chunked")) {
|
216
|
+
if (! HP_FL_TEST(hp, HASBODY))
|
217
|
+
rb_raise(eParserError,
|
218
|
+
"chunked Transfer-Encoding with no body expected");
|
219
|
+
if (hp->len.content >= 0)
|
220
|
+
rb_raise(eParserError,
|
221
|
+
"chunked Transfer-Encoding when Content-Length is set");
|
222
|
+
|
223
|
+
hp->len.chunk = 0;
|
224
|
+
HP_FL_SET(hp, CHUNKED);
|
225
|
+
}
|
226
|
+
invalid_if_trailer(hp);
|
227
|
+
} else if (STR_CSTR_CASE_EQ(f, "trailer")) {
|
228
|
+
if (! HP_FL_TEST(hp, HASBODY))
|
229
|
+
rb_raise(eParserError, "trailer with no body");
|
230
|
+
HP_FL_SET(hp, HASTRAILER);
|
231
|
+
invalid_if_trailer(hp);
|
232
|
+
}
|
233
|
+
|
234
|
+
hclass = CLASS_OF(hdr);
|
235
|
+
if (hclass == rb_cArray) {
|
236
|
+
rb_ary_push(hdr, rb_ary_new3(2, f, v));
|
237
|
+
hp->cont = v;
|
238
|
+
} else {
|
239
|
+
/* hash-ish, try rb_hash_* first and fall back to slow rb_funcall */
|
240
|
+
VALUE e;
|
241
|
+
|
242
|
+
/* try to read the existing value */
|
243
|
+
if (hclass == rb_cHash)
|
244
|
+
e = rb_hash_aref(hdr, f);
|
245
|
+
else
|
246
|
+
e = rb_funcall(hdr, id_sq, 1, f);
|
247
|
+
|
248
|
+
if (NIL_P(e)) {
|
249
|
+
/* new value, freeze it since it speeds up MRI slightly */
|
250
|
+
OBJ_FREEZE(f);
|
251
|
+
|
252
|
+
if (hclass == rb_cHash)
|
253
|
+
rb_hash_aset(hdr, f, v);
|
254
|
+
else
|
255
|
+
rb_funcall(hdr, id_sq_set, 2, f, v);
|
256
|
+
|
257
|
+
hp->cont = v;
|
258
|
+
} else {
|
259
|
+
/*
|
260
|
+
* existing value, append to it, Rack 1.x uses newlines to represent
|
261
|
+
* repeated cookies:
|
262
|
+
* { 'Set-Cookie' => "a=b\nc=d" }
|
263
|
+
* becomes:
|
264
|
+
* "Set-Cookie: a=b\r\nSet-Cookie: c=d\r\n"
|
265
|
+
*/
|
266
|
+
rb_str_buf_cat(e, "\n", 1);
|
267
|
+
hp->cont = rb_str_buf_append(e, v);
|
268
|
+
}
|
269
|
+
}
|
270
|
+
}
|
271
|
+
|
272
|
+
/** Machine **/
|
273
|
+
|
274
|
+
%%{
|
275
|
+
machine http_parser;
|
276
|
+
|
277
|
+
action mark {MARK(mark, fpc); }
|
278
|
+
|
279
|
+
action start_field { MARK(start.field, fpc); }
|
280
|
+
action write_field { hp->s.field_len = LEN(start.field, fpc); }
|
281
|
+
action start_value { MARK(mark, fpc); }
|
282
|
+
action write_value { write_value(hdr, hp, buffer, fpc); }
|
283
|
+
action write_cont_value { write_cont_value(hp, buffer, fpc); }
|
284
|
+
action http_version { http_version(hp, hdr, PTR_TO(mark), LEN(mark, fpc)); }
|
285
|
+
action status_phrase { status_phrase(hp, hdr, PTR_TO(mark), LEN(mark, fpc)); }
|
286
|
+
|
287
|
+
action add_to_chunk_size {
|
288
|
+
hp->len.chunk = step_incr(hp->len.chunk, fc, 16);
|
289
|
+
if (hp->len.chunk < 0)
|
290
|
+
rb_raise(eParserError, "invalid chunk size");
|
291
|
+
}
|
292
|
+
action header_done {
|
293
|
+
finalize_header(hp);
|
294
|
+
cs = http_parser_first_final;
|
295
|
+
|
296
|
+
if (HP_FL_TEST(hp, CHUNKED))
|
297
|
+
cs = http_parser_en_ChunkedBody;
|
298
|
+
|
299
|
+
/*
|
300
|
+
* go back to Ruby so we can call the Rack application, we'll reenter
|
301
|
+
* the parser iff the body needs to be processed.
|
302
|
+
*/
|
303
|
+
goto post_exec;
|
304
|
+
}
|
305
|
+
|
306
|
+
action end_trailers {
|
307
|
+
cs = http_parser_first_final;
|
308
|
+
goto post_exec;
|
309
|
+
}
|
310
|
+
|
311
|
+
action end_chunked_body {
|
312
|
+
HP_FL_SET(hp, INTRAILER);
|
313
|
+
cs = http_parser_en_Trailers;
|
314
|
+
++p;
|
315
|
+
assert(p <= pe && "buffer overflow after chunked body");
|
316
|
+
goto post_exec;
|
317
|
+
}
|
318
|
+
|
319
|
+
action skip_chunk_data {
|
320
|
+
skip_chunk_data_hack: {
|
321
|
+
size_t nr = MIN((size_t)hp->len.chunk, REMAINING);
|
322
|
+
memcpy(RSTRING_PTR(hdr) + hp->s.dest_offset, fpc, nr);
|
323
|
+
hp->s.dest_offset += nr;
|
324
|
+
hp->len.chunk -= nr;
|
325
|
+
p += nr;
|
326
|
+
assert(hp->len.chunk >= 0 && "negative chunk length");
|
327
|
+
if ((size_t)hp->len.chunk > REMAINING) {
|
328
|
+
HP_FL_SET(hp, INCHUNK);
|
329
|
+
goto post_exec;
|
330
|
+
} else {
|
331
|
+
fhold;
|
332
|
+
fgoto chunk_end;
|
333
|
+
}
|
334
|
+
}}
|
335
|
+
|
336
|
+
include kcar_http_common "kcar_http_common.rl";
|
337
|
+
}%%
|
338
|
+
|
339
|
+
/** Data **/
|
340
|
+
%% write data;
|
341
|
+
|
342
|
+
static void http_parser_init(struct http_parser *hp)
|
343
|
+
{
|
344
|
+
int cs = 0;
|
345
|
+
memset(hp, 0, sizeof(struct http_parser));
|
346
|
+
hp->cont = Qfalse; /* zero on MRI, should be optimized away by above */
|
347
|
+
hp->status = Qnil;
|
348
|
+
hp->len.content = -1;
|
349
|
+
%% write init;
|
350
|
+
hp->cs = cs;
|
351
|
+
}
|
352
|
+
|
353
|
+
/** exec **/
|
354
|
+
static void http_parser_execute(struct http_parser *hp,
|
355
|
+
VALUE hdr, char *buffer, size_t len)
|
356
|
+
{
|
357
|
+
const char *p, *pe;
|
358
|
+
int cs = hp->cs;
|
359
|
+
size_t off = hp->offset;
|
360
|
+
|
361
|
+
if (cs == http_parser_first_final)
|
362
|
+
return;
|
363
|
+
|
364
|
+
assert(off <= len && "offset past end of buffer");
|
365
|
+
|
366
|
+
p = buffer+off;
|
367
|
+
pe = buffer+len;
|
368
|
+
|
369
|
+
assert((void *)(pe - p) == (void *)(len - off) &&
|
370
|
+
"pointers aren't same distance");
|
371
|
+
|
372
|
+
if (HP_FL_TEST(hp, INCHUNK)) {
|
373
|
+
HP_FL_UNSET(hp, INCHUNK);
|
374
|
+
goto skip_chunk_data_hack;
|
375
|
+
}
|
376
|
+
%% write exec;
|
377
|
+
post_exec: /* "_out:" also goes here */
|
378
|
+
if (hp->cs != http_parser_error)
|
379
|
+
hp->cs = cs;
|
380
|
+
hp->offset = p - buffer;
|
381
|
+
|
382
|
+
assert(p <= pe && "buffer overflow after parsing execute");
|
383
|
+
assert(hp->offset <= len && "offset longer than length");
|
384
|
+
}
|
385
|
+
|
386
|
+
static struct http_parser *data_get(VALUE self)
|
387
|
+
{
|
388
|
+
struct http_parser *hp;
|
389
|
+
|
390
|
+
Data_Get_Struct(self, struct http_parser, hp);
|
391
|
+
assert(hp && "failed to extract http_parser struct");
|
392
|
+
return hp;
|
393
|
+
}
|
394
|
+
|
395
|
+
static void mark(void *ptr)
|
396
|
+
{
|
397
|
+
struct http_parser *hp = ptr;
|
398
|
+
|
399
|
+
rb_gc_mark(hp->cont);
|
400
|
+
rb_gc_mark(hp->status);
|
401
|
+
}
|
402
|
+
|
403
|
+
static VALUE alloc(VALUE klass)
|
404
|
+
{
|
405
|
+
struct http_parser *hp;
|
406
|
+
return Data_Make_Struct(klass, struct http_parser, mark, -1, hp);
|
407
|
+
}
|
408
|
+
|
409
|
+
/**
|
410
|
+
* call-seq:
|
411
|
+
* Kcar::Parser.new => parser
|
412
|
+
*
|
413
|
+
* Creates a new parser.
|
414
|
+
*
|
415
|
+
* Document-method: reset
|
416
|
+
*
|
417
|
+
* call-seq:
|
418
|
+
* parser.reset => parser
|
419
|
+
*
|
420
|
+
* Resets the parser so it can be reused by another client
|
421
|
+
*/
|
422
|
+
static VALUE initialize(VALUE self)
|
423
|
+
{
|
424
|
+
http_parser_init(data_get(self));
|
425
|
+
|
426
|
+
return self;
|
427
|
+
}
|
428
|
+
|
429
|
+
static void advance_str(VALUE str, off_t nr)
|
430
|
+
{
|
431
|
+
long len = RSTRING_LEN(str);
|
432
|
+
|
433
|
+
if (len == 0)
|
434
|
+
return;
|
435
|
+
|
436
|
+
rb_str_modify(str);
|
437
|
+
|
438
|
+
assert(nr <= len && "trying to advance past end of buffer");
|
439
|
+
len -= nr;
|
440
|
+
if (len > 0) /* unlikely, len is usually 0 */
|
441
|
+
memmove(RSTRING_PTR(str), RSTRING_PTR(str) + nr, len);
|
442
|
+
rb_str_set_len(str, len);
|
443
|
+
}
|
444
|
+
|
445
|
+
/**
|
446
|
+
* call-seq:
|
447
|
+
* parser.body_bytes_left => nil or Integer
|
448
|
+
*
|
449
|
+
* Returns the number of bytes left to run through Parser#filter_body.
|
450
|
+
* This will initially be the value of the "Content-Length" HTTP header
|
451
|
+
* after header parsing is complete and will decrease in value as
|
452
|
+
* Parser#filter_body is called for each chunk. This should return
|
453
|
+
* zero for responses with no body.
|
454
|
+
*
|
455
|
+
* This will return nil on "Transfer-Encoding: chunked" responses as
|
456
|
+
* well as HTTP/1.0 responses where Content-Length is not set
|
457
|
+
*/
|
458
|
+
static VALUE body_bytes_left(VALUE self)
|
459
|
+
{
|
460
|
+
struct http_parser *hp = data_get(self);
|
461
|
+
|
462
|
+
if (HP_FL_TEST(hp, CHUNKED))
|
463
|
+
return Qnil;
|
464
|
+
if (hp->len.content >= 0)
|
465
|
+
return OFFT2NUM(hp->len.content);
|
466
|
+
|
467
|
+
return Qnil;
|
468
|
+
}
|
469
|
+
|
470
|
+
static VALUE chunked(VALUE self)
|
471
|
+
{
|
472
|
+
struct http_parser *hp = data_get(self);
|
473
|
+
|
474
|
+
return HP_FL_TEST(hp, CHUNKED) ? Qtrue : Qfalse;
|
475
|
+
}
|
476
|
+
|
477
|
+
/**
|
478
|
+
* Document-method: headers
|
479
|
+
* call-seq:
|
480
|
+
* parser.headers(hdr, data) => hdr or nil
|
481
|
+
*
|
482
|
+
* Takes a Hash and a String of data, parses the String of data filling
|
483
|
+
* in the Hash returning the Hash if parsing is finished, nil otherwise
|
484
|
+
* When returning the hdr Hash, it may modify data to point to where
|
485
|
+
* body processing should begin.
|
486
|
+
*
|
487
|
+
* Raises ParserError if there are parsing errors.
|
488
|
+
*/
|
489
|
+
static VALUE headers(VALUE self, VALUE hdr, VALUE data)
|
490
|
+
{
|
491
|
+
struct http_parser *hp = data_get(self);
|
492
|
+
|
493
|
+
rb_str_update(data);
|
494
|
+
|
495
|
+
http_parser_execute(hp, hdr, RSTRING_PTR(data), RSTRING_LEN(data));
|
496
|
+
VALIDATE_MAX_LENGTH(hp->offset, HEADER);
|
497
|
+
|
498
|
+
if (hp->cs == http_parser_first_final ||
|
499
|
+
hp->cs == http_parser_en_ChunkedBody) {
|
500
|
+
advance_str(data, hp->offset + 1);
|
501
|
+
hp->offset = 0;
|
502
|
+
if (HP_FL_TEST(hp, INTRAILER))
|
503
|
+
return hdr;
|
504
|
+
else
|
505
|
+
return rb_ary_new3(2, hp->status, hdr);
|
506
|
+
}
|
507
|
+
|
508
|
+
if (hp->cs == http_parser_error)
|
509
|
+
rb_raise(eParserError, "Invalid HTTP format, parsing fails.");
|
510
|
+
|
511
|
+
return Qnil;
|
512
|
+
}
|
513
|
+
|
514
|
+
static int chunked_eof(struct http_parser *hp)
|
515
|
+
{
|
516
|
+
return ((hp->cs == http_parser_first_final) || HP_FL_TEST(hp, INTRAILER));
|
517
|
+
}
|
518
|
+
|
519
|
+
/**
|
520
|
+
* call-seq:
|
521
|
+
* parser.body_eof? => true or false
|
522
|
+
*
|
523
|
+
* Detects if we're done filtering the body or not. This can be used
|
524
|
+
* to detect when to stop calling Parser#filter_body.
|
525
|
+
*/
|
526
|
+
static VALUE body_eof(VALUE self)
|
527
|
+
{
|
528
|
+
struct http_parser *hp = data_get(self);
|
529
|
+
|
530
|
+
if (HP_FL_TEST(hp, CHUNKED))
|
531
|
+
return chunked_eof(hp) ? Qtrue : Qfalse;
|
532
|
+
|
533
|
+
if (! HP_FL_TEST(hp, HASBODY))
|
534
|
+
return Qtrue;
|
535
|
+
|
536
|
+
return hp->len.content == 0 ? Qtrue : Qfalse;
|
537
|
+
}
|
538
|
+
|
539
|
+
/**
|
540
|
+
* call-seq:
|
541
|
+
* parser.keepalive? => true or false
|
542
|
+
*
|
543
|
+
* This should be used to detect if a request can really handle
|
544
|
+
* keepalives and pipelining. Currently, the rules are:
|
545
|
+
*
|
546
|
+
* 1. MUST be HTTP/1.1 +or+ HTTP/1.0 with "Connection: keep-alive"
|
547
|
+
* 2. MUST NOT have "Connection: close" set
|
548
|
+
* 3. If there is a response body, either a) Content-Length is set
|
549
|
+
* or b) chunked encoding is used
|
550
|
+
*/
|
551
|
+
static VALUE keepalive(VALUE self)
|
552
|
+
{
|
553
|
+
struct http_parser *hp = data_get(self);
|
554
|
+
|
555
|
+
if (HP_FL_ALL(hp, KEEPALIVE)) {
|
556
|
+
if ( HP_FL_TEST(hp, HASBODY) ) {
|
557
|
+
if (HP_FL_TEST(hp, CHUNKED) || (hp->len.content >= 0))
|
558
|
+
return Qtrue;
|
559
|
+
|
560
|
+
/* unknown Content-Length and not chunked, we must assume close */
|
561
|
+
return Qfalse;
|
562
|
+
} else {
|
563
|
+
/* 100 Continue, 304 Not Modified, etc... */
|
564
|
+
return Qtrue;
|
565
|
+
}
|
566
|
+
}
|
567
|
+
return Qfalse;
|
568
|
+
}
|
569
|
+
|
570
|
+
/**
|
571
|
+
* call-seq:
|
572
|
+
* parser.filter_body(buf, data) => nil/data
|
573
|
+
*
|
574
|
+
* Takes a String of +data+, will modify data if dechunking is done.
|
575
|
+
* Returns +nil+ if there is more data left to process. Returns
|
576
|
+
* +data+ if body processing is complete. When returning +data+,
|
577
|
+
* it may modify +data+ so the start of the string points to where
|
578
|
+
* the body ended so that trailer processing can begin.
|
579
|
+
*
|
580
|
+
* Raises ParserError if there are dechunking errors.
|
581
|
+
* Basically this is a glorified memcpy(3) that copies +data+
|
582
|
+
* into +buf+ while filtering it through the dechunker.
|
583
|
+
*/
|
584
|
+
static VALUE filter_body(VALUE self, VALUE buf, VALUE data)
|
585
|
+
{
|
586
|
+
struct http_parser *hp = data_get(self);
|
587
|
+
char *dptr;
|
588
|
+
long dlen;
|
589
|
+
|
590
|
+
rb_str_update(data);
|
591
|
+
dptr = RSTRING_PTR(data);
|
592
|
+
dlen = RSTRING_LEN(data);
|
593
|
+
|
594
|
+
StringValue(buf);
|
595
|
+
rb_str_resize(buf, dlen); /* we can never copy more than dlen bytes */
|
596
|
+
OBJ_TAINT(buf); /* keep weirdo $SAFE users happy */
|
597
|
+
|
598
|
+
if (!HP_FL_TEST(hp, CHUNKED))
|
599
|
+
rb_raise(rb_eRuntimeError, "filter_body is only for chunked bodies");
|
600
|
+
|
601
|
+
if (!chunked_eof(hp)) {
|
602
|
+
hp->s.dest_offset = 0;
|
603
|
+
http_parser_execute(hp, buf, dptr, dlen);
|
604
|
+
if (hp->cs == http_parser_error)
|
605
|
+
rb_raise(eParserError, "Invalid HTTP format, parsing fails.");
|
606
|
+
|
607
|
+
assert(hp->s.dest_offset <= hp->offset &&
|
608
|
+
"destination buffer overflow");
|
609
|
+
advance_str(data, hp->offset);
|
610
|
+
rb_str_set_len(buf, hp->s.dest_offset);
|
611
|
+
|
612
|
+
if (RSTRING_LEN(buf) == 0 && chunked_eof(hp)) {
|
613
|
+
assert(hp->len.chunk == 0 && "chunk at EOF but more to parse");
|
614
|
+
} else {
|
615
|
+
data = Qnil;
|
616
|
+
}
|
617
|
+
}
|
618
|
+
hp->offset = 0; /* for trailer parsing */
|
619
|
+
return data;
|
620
|
+
}
|
621
|
+
|
622
|
+
void Init_kcar_ext(void)
|
623
|
+
{
|
624
|
+
VALUE mKcar = rb_define_module("Kcar");
|
625
|
+
VALUE cParser = rb_define_class_under(mKcar, "Parser", rb_cObject);
|
626
|
+
|
627
|
+
eParserError = rb_define_class_under(mKcar, "ParserError", rb_eIOError);
|
628
|
+
|
629
|
+
rb_define_alloc_func(cParser, alloc);
|
630
|
+
rb_define_method(cParser, "initialize", initialize, 0);
|
631
|
+
rb_define_method(cParser, "reset", initialize, 0);
|
632
|
+
rb_define_method(cParser, "headers", headers, 2);
|
633
|
+
rb_define_method(cParser, "trailers", headers, 2);
|
634
|
+
rb_define_method(cParser, "filter_body", filter_body, 2);
|
635
|
+
rb_define_method(cParser, "body_bytes_left", body_bytes_left, 0);
|
636
|
+
rb_define_method(cParser, "body_eof?", body_eof, 0);
|
637
|
+
rb_define_method(cParser, "keepalive?", keepalive, 0);
|
638
|
+
rb_define_method(cParser, "chunked?", chunked, 0);
|
639
|
+
|
640
|
+
/*
|
641
|
+
* The maximum size a single chunk when using chunked transfer encoding.
|
642
|
+
* This is only a theoretical maximum used to detect errors in clients,
|
643
|
+
* it is highly unlikely to encounter clients that send more than
|
644
|
+
* several kilobytes at once.
|
645
|
+
*/
|
646
|
+
rb_define_const(cParser, "CHUNK_MAX", OFFT2NUM(UH_OFF_T_MAX));
|
647
|
+
|
648
|
+
/*
|
649
|
+
* The maximum size of the body as specified by Content-Length.
|
650
|
+
* This is only a theoretical maximum, the actual limit is subject
|
651
|
+
* to the limits of the file system used for +Dir.tmpdir+.
|
652
|
+
*/
|
653
|
+
rb_define_const(cParser, "LENGTH_MAX", OFFT2NUM(UH_OFF_T_MAX));
|
654
|
+
id_sq = rb_intern("[]");
|
655
|
+
id_sq_set = rb_intern("[]=");
|
656
|
+
}
|