hyperion-rb 1.6.2 → 2.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +4563 -0
  3. data/README.md +189 -13
  4. data/ext/hyperion_h2_codec/Cargo.lock +7 -0
  5. data/ext/hyperion_h2_codec/Cargo.toml +33 -0
  6. data/ext/hyperion_h2_codec/extconf.rb +73 -0
  7. data/ext/hyperion_h2_codec/src/frames.rs +140 -0
  8. data/ext/hyperion_h2_codec/src/hpack/huffman.rs +161 -0
  9. data/ext/hyperion_h2_codec/src/hpack.rs +457 -0
  10. data/ext/hyperion_h2_codec/src/lib.rs +296 -0
  11. data/ext/hyperion_http/extconf.rb +28 -0
  12. data/ext/hyperion_http/h2_codec_glue.c +408 -0
  13. data/ext/hyperion_http/page_cache.c +1125 -0
  14. data/ext/hyperion_http/parser.c +473 -38
  15. data/ext/hyperion_http/sendfile.c +982 -0
  16. data/ext/hyperion_http/websocket.c +493 -0
  17. data/ext/hyperion_io_uring/Cargo.lock +33 -0
  18. data/ext/hyperion_io_uring/Cargo.toml +34 -0
  19. data/ext/hyperion_io_uring/extconf.rb +74 -0
  20. data/ext/hyperion_io_uring/src/lib.rs +316 -0
  21. data/lib/hyperion/adapter/rack.rb +370 -42
  22. data/lib/hyperion/admin_listener.rb +207 -0
  23. data/lib/hyperion/admin_middleware.rb +36 -7
  24. data/lib/hyperion/cli.rb +310 -11
  25. data/lib/hyperion/config.rb +440 -14
  26. data/lib/hyperion/connection.rb +679 -22
  27. data/lib/hyperion/deprecations.rb +81 -0
  28. data/lib/hyperion/dispatch_mode.rb +165 -0
  29. data/lib/hyperion/fiber_local.rb +75 -13
  30. data/lib/hyperion/h2_admission.rb +77 -0
  31. data/lib/hyperion/h2_codec.rb +452 -0
  32. data/lib/hyperion/http/page_cache.rb +122 -0
  33. data/lib/hyperion/http/sendfile.rb +696 -0
  34. data/lib/hyperion/http2/native_hpack_adapter.rb +70 -0
  35. data/lib/hyperion/http2_handler.rb +368 -9
  36. data/lib/hyperion/io_uring.rb +317 -0
  37. data/lib/hyperion/lint_wrapper_pool.rb +126 -0
  38. data/lib/hyperion/master.rb +96 -9
  39. data/lib/hyperion/metrics/path_templater.rb +68 -0
  40. data/lib/hyperion/metrics.rb +256 -0
  41. data/lib/hyperion/prometheus_exporter.rb +150 -0
  42. data/lib/hyperion/request.rb +13 -0
  43. data/lib/hyperion/response_writer.rb +477 -16
  44. data/lib/hyperion/runtime.rb +195 -0
  45. data/lib/hyperion/server/route_table.rb +179 -0
  46. data/lib/hyperion/server.rb +519 -55
  47. data/lib/hyperion/static_preload.rb +133 -0
  48. data/lib/hyperion/thread_pool.rb +61 -7
  49. data/lib/hyperion/tls.rb +343 -1
  50. data/lib/hyperion/version.rb +1 -1
  51. data/lib/hyperion/websocket/close_codes.rb +71 -0
  52. data/lib/hyperion/websocket/connection.rb +876 -0
  53. data/lib/hyperion/websocket/frame.rb +356 -0
  54. data/lib/hyperion/websocket/handshake.rb +525 -0
  55. data/lib/hyperion/worker.rb +111 -9
  56. data/lib/hyperion.rb +137 -3
  57. metadata +50 -1
@@ -0,0 +1,1125 @@
1
+ /* ----------------------------------------------------------------------
2
+ * Hyperion::Http::PageCache — pre-built static-response cache.
3
+ *
4
+ * Borrowed from agoo's `agooPage` design (ext/agoo/page.c). For each
5
+ * cached static asset we hold ONE contiguous heap buffer that already
6
+ * contains the entire HTTP/1.1 response: status line + Content-Type +
7
+ * Content-Length + CRLF + body bytes.
8
+ *
9
+ * On the hot path (`PageCache.write_to(socket, path)`):
10
+ * 1. Hash-lookup the path in an open-addressed bucket table
11
+ * (PAGE_BUCKET_SIZE = 1024, max key length MAX_KEY_LEN = 1024 —
12
+ * mirrors agoo).
13
+ * 2. If `last_check` is older than `recheck_seconds` AND the page is
14
+ * not marked immutable, stat() the file. If mtime changed, rebuild
15
+ * the response buffer; otherwise update `last_check` only.
16
+ * 3. write(socket_fd, response_buf, response_len) — ONE syscall.
17
+ *
18
+ * Per-request cost on a hit:
19
+ * * 0 file reads.
20
+ * * 0 mime lookups (mime is baked into response_buf).
21
+ * * 0 header building (status + Content-Type + Content-Length pre-built).
22
+ * * 0 Rack env construction (caller bypasses the Rack call entirely).
23
+ * * 0 Ruby allocations on the C path itself (we accept Integer fds via
24
+ * extract_fd, so no Ruby Strings are allocated; the return value is a
25
+ * small Integer or interned Symbol).
26
+ * * 1 socket write syscall in the common case (buffer fits in TCP send
27
+ * buffer; for the 1 KB row this always holds).
28
+ *
29
+ * Public Ruby surface (singleton methods on Hyperion::Http::PageCache):
30
+ *
31
+ * PageCache.fetch(path) -> :ok | :stale | :missing
32
+ * Returns whether `path` is currently in the cache (after honoring the
33
+ * mtime recheck). `:ok` — cached and fresh. `:stale` — was cached but
34
+ * re-stat showed mtime change and we rebuilt. `:missing` — not in cache
35
+ * (caller should call `cache_file` first).
36
+ *
37
+ * PageCache.cache_file(path) -> Integer | :missing
38
+ * Read `path` from disk, build the HTTP response buffer, store it under
39
+ * the canonical path key. Returns the body bytes count, or `:missing`
40
+ * when the file doesn't exist / can't be read.
41
+ *
42
+ * PageCache.preload(dir) -> Integer
43
+ * Walks `dir` recursively, calls cache_file for every regular file.
44
+ * Returns the count of files added.
45
+ *
46
+ * PageCache.write_to(socket_io, path) -> Integer | :missing
47
+ * Hot path. Looks up `path`, honours the mtime recheck (or skips it
48
+ * when the page is immutable), and writes the pre-built response to
49
+ * the socket. Returns bytes written, or `:missing` when not cached.
50
+ *
51
+ * PageCache.set_immutable(path, bool) -> bool
52
+ * Mark a specific path as immutable: subsequent `write_to` calls skip
53
+ * the mtime stat entirely. Use for assets fingerprinted by hash.
54
+ *
55
+ * PageCache.size -> Integer
56
+ * Number of pages currently cached.
57
+ *
58
+ * PageCache.clear -> nil
59
+ * Drop every entry. Used by specs and on graceful reload.
60
+ *
61
+ * PageCache.recheck_seconds -> Float
62
+ * PageCache.recheck_seconds=(seconds)
63
+ * Per-process tunable, default 5.0s, mirrors agoo's PAGE_RECHECK_TIME.
64
+ *
65
+ * PageCache.response_bytes(path) -> String | nil
66
+ * Specs-only helper: returns a frozen copy of the pre-built response
67
+ * buffer so tests can assert exact wire bytes.
68
+ *
69
+ * Concurrency
70
+ * -----------
71
+ * The cache is per-process. Hyperion's worker model gives each worker its
72
+ * own page cache — there is no IPC / shared memory cost. The hash table
73
+ * itself is guarded by a single Mutex (rb_mutex_*) on the structural ops
74
+ * (insert, evict, clear); the hot read path takes the Mutex briefly to
75
+ * fetch the page pointer, then runs the kernel `write()` *outside* any
76
+ * Ruby lock (under rb_thread_call_without_gvl) so other fibers / threads
77
+ * can run while the socket buffer drains.
78
+ *
79
+ * The C lock is a plain pthread mutex because Ruby's rb_mutex_lock can't
80
+ * be acquired from inside `rb_thread_call_without_gvl` (no GVL, no Ruby
81
+ * VM access). We acquire the pthread mutex briefly to read the slot,
82
+ * make a stack-local snapshot of the response_buf pointer + len, release
83
+ * the mutex, then issue the write. Eviction grabs the same pthread
84
+ * mutex; readers see a consistent snapshot or a `:missing` result if
85
+ * eviction won the race.
86
+ *
87
+ * 2.10-C — initial drop.
88
+ * ---------------------------------------------------------------------- */
89
+
90
+ #include <ruby.h>
91
+ #include <ruby/thread.h>
92
+ #include <ruby/io.h>
93
+
94
+ #include <errno.h>
95
+ #include <stdint.h>
96
+ #include <stdio.h>
97
+ #include <stdlib.h>
98
+ #include <string.h>
99
+ #include <unistd.h>
100
+ #include <fcntl.h>
101
+ #include <pthread.h>
102
+ #include <dirent.h>
103
+ #include <sys/stat.h>
104
+ #include <sys/types.h>
105
+ #include <sys/time.h>
106
+
107
+ /* Shared identifiers / refs. parser.c and sendfile.c each register their
108
+ * own copies of Hyperion / Hyperion::Http (lazy-define if missing); we
109
+ * follow the same pattern so init order doesn't matter. */
110
+ static VALUE rb_mHyperion_pc;
111
+ static VALUE rb_mHyperionHttp_pc;
112
+ static VALUE rb_mHyperionHttpPageCache;
113
+
114
+ static ID id_fileno_pc;
115
+ static ID id_to_io_pc;
116
+
117
+ static VALUE sym_ok_pc;
118
+ static VALUE sym_stale_pc;
119
+ static VALUE sym_missing_pc;
120
+ /* 2.10-F — sentinel returned by `serve_request` when the path/method
121
+ * tuple isn't a hit. Distinct from `:missing` so the Ruby caller can
122
+ * tell "not in cache" (Rack-fallback) apart from "in cache but not
123
+ * cached for this method" (also Rack-fallback, same outcome — but the
124
+ * symbol is `:miss` so logs / metrics can tell them apart). */
125
+ static VALUE sym_miss_pc;
126
+
127
+ #define HYP_PC_BUCKET_SIZE 1024u
128
+ #define HYP_PC_BUCKET_MASK (HYP_PC_BUCKET_SIZE - 1u)
129
+ #define HYP_PC_MAX_KEY_LEN 1024
130
+ #define HYP_PC_DEFAULT_RECHECK_SECONDS 5.0
131
+ /* Auto-engage threshold from Adapter::Rack (mirrored on the Ruby side as
132
+ * well, but exposed here so specs can read the C constant). */
133
+ #define HYP_PC_AUTO_THRESHOLD (64 * 1024)
134
+
135
+ typedef struct hyp_page_s {
136
+ char *path; /* canonical filesystem path; heap-owned */
137
+ size_t path_len;
138
+ char *response_buf; /* pre-built HTTP/1.1 response, heap-owned */
139
+ size_t response_len;
140
+ size_t body_len; /* informational; body bytes only */
141
+ size_t headers_len; /* headers-only span (for HEAD writes) =
142
+ * response_len - body_len for cache_file
143
+ * entries; explicit for register_prebuilt
144
+ * entries that may carry a chunked body. */
145
+ time_t mtime; /* last-known file mtime; 0 for register_prebuilt */
146
+ double last_check; /* dtime() of last stat */
147
+ int immutable; /* non-zero → never re-stat */
148
+ int prebuilt; /* 1 = registered via register_prebuilt
149
+ * (no on-disk file backing — never re-stat,
150
+ * never invalidate on missing file). */
151
+ char *content_type; /* heap-owned, picked at insert time */
152
+ } hyp_page_t;
153
+
154
+ typedef struct hyp_page_slot_s {
155
+ struct hyp_page_slot_s *next;
156
+ uint64_t hash;
157
+ hyp_page_t *page;
158
+ } hyp_page_slot_t;
159
+
160
+ static hyp_page_slot_t *hyp_pc_buckets[HYP_PC_BUCKET_SIZE];
161
+ static size_t hyp_pc_count;
162
+ static double hyp_pc_recheck_seconds = HYP_PC_DEFAULT_RECHECK_SECONDS;
163
+ static pthread_mutex_t hyp_pc_lock = PTHREAD_MUTEX_INITIALIZER;
164
+
165
+ /* ============================================================
166
+ * Mime suffix → Content-Type. Borrowed wholesale from agoo's
167
+ * mime_map[] in ext/agoo/page.c.
168
+ * ============================================================ */
169
+ typedef struct {
170
+ const char *suffix;
171
+ const char *type;
172
+ } hyp_pc_mime_t;
173
+
174
+ static const hyp_pc_mime_t hyp_pc_mime_map[] = {
175
+ { "asc", "text/plain" },
176
+ { "avi", "video/x-msvideo" },
177
+ { "bin", "application/octet-stream" },
178
+ { "bmp", "image/bmp" },
179
+ { "css", "text/css" },
180
+ { "csv", "text/csv" },
181
+ { "eot", "application/vnd.ms-fontobject" },
182
+ { "gif", "image/gif" },
183
+ { "gz", "application/gzip" },
184
+ { "htm", "text/html" },
185
+ { "html", "text/html" },
186
+ { "ico", "image/x-icon" },
187
+ { "jpeg", "image/jpeg" },
188
+ { "jpg", "image/jpeg" },
189
+ { "js", "application/javascript" },
190
+ { "json", "application/json" },
191
+ { "map", "application/json" },
192
+ { "mp3", "audio/mpeg" },
193
+ { "mp4", "video/mp4" },
194
+ { "ogg", "audio/ogg" },
195
+ { "pdf", "application/pdf" },
196
+ { "png", "image/png" },
197
+ { "rss", "application/rss+xml" },
198
+ { "svg", "image/svg+xml" },
199
+ { "tif", "image/tiff" },
200
+ { "tiff", "image/tiff" },
201
+ { "ttf", "application/font-sfnt" },
202
+ { "txt", "text/plain; charset=utf-8" },
203
+ { "wasm", "application/wasm" },
204
+ { "webm", "video/webm" },
205
+ { "webp", "image/webp" },
206
+ { "woff", "application/font-woff" },
207
+ { "woff2", "font/woff2" },
208
+ { "xml", "application/xml" },
209
+ { "yml", "application/yaml" },
210
+ { "yaml", "application/yaml" },
211
+ { "zip", "application/zip" },
212
+ { NULL, NULL }
213
+ };
214
+
215
+ static const char hyp_pc_default_ct[] = "application/octet-stream";
216
+
217
+ /* Pick a content-type by file extension. Returns a pointer into the
218
+ * static mime map; never frees. */
219
+ static const char *hyp_pc_lookup_mime(const char *path) {
220
+ if (path == NULL) {
221
+ return hyp_pc_default_ct;
222
+ }
223
+ const char *dot = strrchr(path, '.');
224
+ if (dot == NULL || dot[1] == '\0') {
225
+ return hyp_pc_default_ct;
226
+ }
227
+ /* Skip past the '.' to the suffix proper. */
228
+ const char *suffix = dot + 1;
229
+ for (const hyp_pc_mime_t *m = hyp_pc_mime_map; m->suffix != NULL; m++) {
230
+ if (strcasecmp(suffix, m->suffix) == 0) {
231
+ return m->type;
232
+ }
233
+ }
234
+ return hyp_pc_default_ct;
235
+ }
236
+
237
+ /* Wall-clock seconds with sub-second precision. Mirrors agoo's dtime(). */
238
+ static double hyp_pc_now(void) {
239
+ struct timeval tv;
240
+ if (gettimeofday(&tv, NULL) != 0) {
241
+ return 0.0;
242
+ }
243
+ return (double)tv.tv_sec + (double)tv.tv_usec / 1.0e6;
244
+ }
245
+
246
+ /* FNV-1a 64-bit. Stable, cheap, branchless on the hot path; not a
247
+ * cryptographic hash but the cache only stores trusted operator paths. */
248
+ static uint64_t hyp_pc_hash(const char *key, size_t len) {
249
+ uint64_t h = 1469598103934665603ULL; /* FNV offset basis */
250
+ for (size_t i = 0; i < len; i++) {
251
+ h ^= (uint64_t)(unsigned char)key[i];
252
+ h *= 1099511628211ULL; /* FNV prime */
253
+ }
254
+ return h;
255
+ }
256
+
257
+ static void hyp_page_destroy(hyp_page_t *p) {
258
+ if (p == NULL) {
259
+ return;
260
+ }
261
+ free(p->path);
262
+ free(p->response_buf);
263
+ free(p->content_type);
264
+ free(p);
265
+ }
266
+
267
+ /* Build the pre-baked HTTP response buffer for `body` of `body_len` bytes
268
+ * with the given content-type. Allocates via malloc; caller owns the
269
+ * buffer (free() on eviction).
270
+ *
271
+ * Wire format:
272
+ * HTTP/1.1 200 OK\r\n
273
+ * Content-Type: <content_type>\r\n
274
+ * Content-Length: <body_len>\r\n
275
+ * \r\n
276
+ * <body bytes>
277
+ */
278
+ static char *hyp_pc_build_response(const char *body, size_t body_len,
279
+ const char *content_type,
280
+ size_t *out_response_len) {
281
+ /* Worst-case header span: status (17) + CT prefix (14) + CT value
282
+ * (≤256) + CRLF (2) + CL prefix (16) + CL value (≤21 for 64-bit) +
283
+ * CRLF (2) + blank line (2). Round to 512 + body_len for the malloc
284
+ * call. */
285
+ size_t header_max = 512 + strlen(content_type);
286
+ size_t buf_cap = header_max + body_len;
287
+ char *buf = (char *)malloc(buf_cap);
288
+ if (buf == NULL) {
289
+ return NULL;
290
+ }
291
+
292
+ int header_len = snprintf(
293
+ buf, header_max,
294
+ "HTTP/1.1 200 OK\r\n"
295
+ "Content-Type: %s\r\n"
296
+ "Content-Length: %zu\r\n"
297
+ "\r\n",
298
+ content_type, body_len);
299
+
300
+ if (header_len < 0 || (size_t)header_len >= header_max) {
301
+ free(buf);
302
+ return NULL;
303
+ }
304
+
305
+ if (body_len > 0 && body != NULL) {
306
+ memcpy(buf + header_len, body, body_len);
307
+ }
308
+ *out_response_len = (size_t)header_len + body_len;
309
+ return buf;
310
+ }
311
+
312
+ /* Read `path` into a newly-allocated body buffer. *out_len receives the
313
+ * size; *out_mtime receives the file mtime. Returns the buffer pointer
314
+ * (caller frees) or NULL on error. */
315
+ static char *hyp_pc_read_file(const char *path, size_t *out_len, time_t *out_mtime) {
316
+ int fd = open(path, O_RDONLY);
317
+ if (fd < 0) {
318
+ return NULL;
319
+ }
320
+ struct stat st;
321
+ if (fstat(fd, &st) != 0 || !S_ISREG(st.st_mode)) {
322
+ close(fd);
323
+ return NULL;
324
+ }
325
+ size_t len = (size_t)st.st_size;
326
+ char *buf = NULL;
327
+ if (len == 0) {
328
+ /* Allocate a 1-byte sentinel so callers that expect non-NULL
329
+ * for "successfully read" still get a valid pointer. We never
330
+ * read into it. */
331
+ buf = (char *)malloc(1);
332
+ if (buf == NULL) {
333
+ close(fd);
334
+ return NULL;
335
+ }
336
+ } else {
337
+ buf = (char *)malloc(len);
338
+ if (buf == NULL) {
339
+ close(fd);
340
+ return NULL;
341
+ }
342
+ size_t total = 0;
343
+ while (total < len) {
344
+ ssize_t n = read(fd, buf + total, len - total);
345
+ if (n > 0) {
346
+ total += (size_t)n;
347
+ continue;
348
+ }
349
+ if (n < 0 && errno == EINTR) {
350
+ continue;
351
+ }
352
+ free(buf);
353
+ close(fd);
354
+ return NULL;
355
+ }
356
+ }
357
+ close(fd);
358
+ *out_len = len;
359
+ *out_mtime = st.st_mtime;
360
+ return buf;
361
+ }
362
+
363
+ /* Find an existing slot for `path`; returns NULL when not present.
364
+ * Caller must hold hyp_pc_lock. */
365
+ static hyp_page_slot_t *hyp_pc_find_slot(const char *path, size_t path_len, uint64_t h) {
366
+ hyp_page_slot_t *slot = hyp_pc_buckets[h & HYP_PC_BUCKET_MASK];
367
+ while (slot != NULL) {
368
+ if (slot->hash == h
369
+ && slot->page->path_len == path_len
370
+ && memcmp(slot->page->path, path, path_len) == 0) {
371
+ return slot;
372
+ }
373
+ slot = slot->next;
374
+ }
375
+ return NULL;
376
+ }
377
+
378
+ /* Insert a page under `path`. Replaces any existing entry (which is
379
+ * destroyed in place). Caller must hold hyp_pc_lock. */
380
+ static void hyp_pc_insert_locked(hyp_page_t *page, uint64_t h) {
381
+ size_t bucket_idx = (size_t)(h & HYP_PC_BUCKET_MASK);
382
+ hyp_page_slot_t *slot = hyp_pc_buckets[bucket_idx];
383
+ while (slot != NULL) {
384
+ if (slot->hash == h
385
+ && slot->page->path_len == page->path_len
386
+ && memcmp(slot->page->path, page->path, page->path_len) == 0) {
387
+ /* Overwrite — destroy old body, swap in new. Counter is
388
+ * unchanged because we replaced an existing entry. */
389
+ hyp_page_destroy(slot->page);
390
+ slot->page = page;
391
+ return;
392
+ }
393
+ slot = slot->next;
394
+ }
395
+ slot = (hyp_page_slot_t *)malloc(sizeof(*slot));
396
+ if (slot == NULL) {
397
+ hyp_page_destroy(page);
398
+ return;
399
+ }
400
+ slot->next = hyp_pc_buckets[bucket_idx];
401
+ slot->hash = h;
402
+ slot->page = page;
403
+ hyp_pc_buckets[bucket_idx] = slot;
404
+ hyp_pc_count++;
405
+ }
406
+
407
+ /* Build a hyp_page_t struct from raw inputs. Returns NULL on alloc fail. */
408
+ static hyp_page_t *hyp_pc_alloc_page(const char *path, size_t path_len,
409
+ const char *body, size_t body_len,
410
+ time_t mtime) {
411
+ hyp_page_t *p = (hyp_page_t *)calloc(1, sizeof(*p));
412
+ if (p == NULL) {
413
+ return NULL;
414
+ }
415
+ p->path = (char *)malloc(path_len + 1);
416
+ if (p->path == NULL) {
417
+ free(p);
418
+ return NULL;
419
+ }
420
+ memcpy(p->path, path, path_len);
421
+ p->path[path_len] = '\0';
422
+ p->path_len = path_len;
423
+
424
+ const char *ct = hyp_pc_lookup_mime(path);
425
+ p->content_type = strdup(ct);
426
+ if (p->content_type == NULL) {
427
+ free(p->path);
428
+ free(p);
429
+ return NULL;
430
+ }
431
+ size_t resp_len = 0;
432
+ p->response_buf = hyp_pc_build_response(body, body_len, ct, &resp_len);
433
+ if (p->response_buf == NULL) {
434
+ free(p->content_type);
435
+ free(p->path);
436
+ free(p);
437
+ return NULL;
438
+ }
439
+ p->response_len = resp_len;
440
+ p->body_len = body_len;
441
+ p->headers_len = (resp_len >= body_len) ? (resp_len - body_len) : resp_len;
442
+ p->mtime = mtime;
443
+ p->last_check = hyp_pc_now();
444
+ p->immutable = 0;
445
+ p->prebuilt = 0;
446
+ return p;
447
+ }
448
+
449
+ /* PageCache.cache_file(path) — read the file, build the response, insert.
450
+ * Returns the body byte count on success, or :missing on read failure. */
451
+ static VALUE rb_pc_cache_file(VALUE self, VALUE rb_path) {
452
+ (void)self;
453
+ Check_Type(rb_path, T_STRING);
454
+ const char *path = RSTRING_PTR(rb_path);
455
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
456
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
457
+ return sym_missing_pc;
458
+ }
459
+
460
+ size_t body_len = 0;
461
+ time_t mtime = 0;
462
+ char *body = hyp_pc_read_file(path, &body_len, &mtime);
463
+ if (body == NULL) {
464
+ return sym_missing_pc;
465
+ }
466
+
467
+ hyp_page_t *page = hyp_pc_alloc_page(path, path_len, body, body_len, mtime);
468
+ free(body);
469
+ if (page == NULL) {
470
+ return sym_missing_pc;
471
+ }
472
+
473
+ uint64_t h = hyp_pc_hash(path, path_len);
474
+ pthread_mutex_lock(&hyp_pc_lock);
475
+ hyp_pc_insert_locked(page, h);
476
+ pthread_mutex_unlock(&hyp_pc_lock);
477
+
478
+ return SIZET2NUM(body_len);
479
+ }
480
+
481
+ /* Internal: find page + honor mtime recheck. Returns the slot pointer
482
+ * (still under the lock) or NULL when missing or rebuild failed. The
483
+ * caller is responsible for releasing the lock and snapshotting whatever
484
+ * fields it needs.
485
+ *
486
+ * Sets *was_stale to 1 if the file's mtime changed and we rebuilt the
487
+ * response; 0 otherwise. */
488
+ static hyp_page_slot_t *hyp_pc_lookup_locked(const char *path, size_t path_len,
489
+ int *was_stale) {
490
+ uint64_t h = hyp_pc_hash(path, path_len);
491
+ hyp_page_slot_t *slot = hyp_pc_find_slot(path, path_len, h);
492
+ if (slot == NULL) {
493
+ return NULL;
494
+ }
495
+ hyp_page_t *p = slot->page;
496
+ if (p->immutable || p->prebuilt) {
497
+ if (was_stale) *was_stale = 0;
498
+ return slot;
499
+ }
500
+ double now = hyp_pc_now();
501
+ if (now - p->last_check < hyp_pc_recheck_seconds) {
502
+ if (was_stale) *was_stale = 0;
503
+ return slot;
504
+ }
505
+ /* Time to re-stat. */
506
+ struct stat st;
507
+ if (stat(p->path, &st) != 0 || !S_ISREG(st.st_mode)) {
508
+ /* File vanished underneath us — drop the entry. */
509
+ hyp_page_slot_t **head = &hyp_pc_buckets[h & HYP_PC_BUCKET_MASK];
510
+ while (*head != NULL) {
511
+ if (*head == slot) {
512
+ *head = slot->next;
513
+ hyp_page_destroy(slot->page);
514
+ free(slot);
515
+ hyp_pc_count--;
516
+ break;
517
+ }
518
+ head = &(*head)->next;
519
+ }
520
+ return NULL;
521
+ }
522
+ if (st.st_mtime == p->mtime) {
523
+ p->last_check = now;
524
+ if (was_stale) *was_stale = 0;
525
+ return slot;
526
+ }
527
+ /* mtime changed — rebuild. */
528
+ size_t new_body_len = 0;
529
+ time_t new_mtime = 0;
530
+ char *new_body = hyp_pc_read_file(p->path, &new_body_len, &new_mtime);
531
+ if (new_body == NULL) {
532
+ return NULL;
533
+ }
534
+ size_t new_resp_len = 0;
535
+ char *new_resp = hyp_pc_build_response(new_body, new_body_len,
536
+ p->content_type, &new_resp_len);
537
+ free(new_body);
538
+ if (new_resp == NULL) {
539
+ return NULL;
540
+ }
541
+ free(p->response_buf);
542
+ p->response_buf = new_resp;
543
+ p->response_len = new_resp_len;
544
+ p->body_len = new_body_len;
545
+ p->headers_len = (new_resp_len >= new_body_len) ? (new_resp_len - new_body_len) : new_resp_len;
546
+ p->mtime = new_mtime;
547
+ p->last_check = now;
548
+ if (was_stale) *was_stale = 1;
549
+ return slot;
550
+ }
551
+
552
+ /* PageCache.fetch(path) -> :ok | :stale | :missing */
553
+ static VALUE rb_pc_fetch(VALUE self, VALUE rb_path) {
554
+ (void)self;
555
+ Check_Type(rb_path, T_STRING);
556
+ const char *path = RSTRING_PTR(rb_path);
557
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
558
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
559
+ return sym_missing_pc;
560
+ }
561
+
562
+ int was_stale = 0;
563
+ pthread_mutex_lock(&hyp_pc_lock);
564
+ hyp_page_slot_t *slot = hyp_pc_lookup_locked(path, path_len, &was_stale);
565
+ pthread_mutex_unlock(&hyp_pc_lock);
566
+
567
+ if (slot == NULL) {
568
+ return sym_missing_pc;
569
+ }
570
+ return was_stale ? sym_stale_pc : sym_ok_pc;
571
+ }
572
+
573
+ /* Extract a kernel fd from a Ruby IO-ish object. Same contract as the
574
+ * helper in sendfile.c, copy-pasted intentionally so this translation
575
+ * unit doesn't depend on sendfile.c's internals. */
576
+ static int hyp_pc_extract_fd(VALUE obj, const char *role) {
577
+ if (RB_TYPE_P(obj, T_FIXNUM) || RB_TYPE_P(obj, T_BIGNUM)) {
578
+ return NUM2INT(obj);
579
+ }
580
+ if (RB_TYPE_P(obj, T_FILE)) {
581
+ return rb_io_descriptor(obj);
582
+ }
583
+ if (rb_respond_to(obj, id_to_io_pc)) {
584
+ VALUE io = rb_funcall(obj, id_to_io_pc, 0);
585
+ if (RB_TYPE_P(io, T_FILE)) {
586
+ return rb_io_descriptor(io);
587
+ }
588
+ if (RB_TYPE_P(io, T_FIXNUM) || RB_TYPE_P(io, T_BIGNUM)) {
589
+ return NUM2INT(io);
590
+ }
591
+ }
592
+ if (rb_respond_to(obj, id_fileno_pc)) {
593
+ VALUE fd = rb_funcall(obj, id_fileno_pc, 0);
594
+ if (RB_TYPE_P(fd, T_FIXNUM) || RB_TYPE_P(fd, T_BIGNUM)) {
595
+ return NUM2INT(fd);
596
+ }
597
+ }
598
+ rb_raise(rb_eTypeError,
599
+ "Hyperion::Http::PageCache.write_to: %s argument must be an IO, "
600
+ "an Integer fd, or respond to #to_io / #fileno",
601
+ role);
602
+ return -1;
603
+ }
604
+
605
+ typedef struct {
606
+ int fd;
607
+ const char *buf;
608
+ size_t len;
609
+ ssize_t total;
610
+ int err;
611
+ } hyp_pc_write_args_t;
612
+
613
+ /* Drains the entire response_buf to the socket. Runs without the GVL
614
+ * (rb_thread_call_without_gvl). EAGAIN is handled inline with a bounded
615
+ * select() poll; for the 1 KB / 8 KB row this almost never fires. */
616
+ static void *hyp_pc_write_blocking(void *raw) {
617
+ hyp_pc_write_args_t *a = (hyp_pc_write_args_t *)raw;
618
+ a->total = 0;
619
+ a->err = 0;
620
+
621
+ int eagain_retries = 5;
622
+ while ((size_t)a->total < a->len) {
623
+ ssize_t w = write(a->fd, a->buf + a->total, a->len - (size_t)a->total);
624
+ if (w > 0) {
625
+ a->total += w;
626
+ continue;
627
+ }
628
+ if (w < 0 && errno == EINTR) {
629
+ continue;
630
+ }
631
+ if (w < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
632
+ if (eagain_retries-- <= 0) {
633
+ a->err = EAGAIN;
634
+ return NULL;
635
+ }
636
+ fd_set wfds;
637
+ FD_ZERO(&wfds);
638
+ FD_SET(a->fd, &wfds);
639
+ struct timeval tv;
640
+ tv.tv_sec = 0;
641
+ tv.tv_usec = 10000;
642
+ (void)select(a->fd + 1, NULL, &wfds, NULL, &tv);
643
+ continue;
644
+ }
645
+ if (w < 0) {
646
+ a->err = errno;
647
+ return NULL;
648
+ }
649
+ /* w == 0 — peer side gone. */
650
+ a->err = EIO;
651
+ return NULL;
652
+ }
653
+ return NULL;
654
+ }
655
+
656
+ /* PageCache.write_to(socket_io, path) -> Integer | :missing
657
+ *
658
+ * Lookup, then write the pre-built response. Lookup grabs the C lock
659
+ * briefly to snapshot (response_buf, response_len) onto the stack; the
660
+ * actual write runs without the GVL and without any Ruby-level lock,
661
+ * so other fibers / threads can run while the socket buffer drains. */
662
+ static VALUE rb_pc_write_to(VALUE self, VALUE socket_io, VALUE rb_path) {
663
+ (void)self;
664
+ Check_Type(rb_path, T_STRING);
665
+ const char *path = RSTRING_PTR(rb_path);
666
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
667
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
668
+ return sym_missing_pc;
669
+ }
670
+
671
+ int fd = hyp_pc_extract_fd(socket_io, "socket_io");
672
+
673
+ /* Snapshot the response under the lock, then release before we do
674
+ * the kernel write — readers MUST NOT hold the C mutex across a
675
+ * blocking syscall. We malloc a transient buffer rather than
676
+ * referencing the slot's response_buf directly because eviction
677
+ * could free that memory mid-write otherwise. */
678
+ pthread_mutex_lock(&hyp_pc_lock);
679
+ int was_stale = 0;
680
+ hyp_page_slot_t *slot = hyp_pc_lookup_locked(path, path_len, &was_stale);
681
+ if (slot == NULL) {
682
+ pthread_mutex_unlock(&hyp_pc_lock);
683
+ return sym_missing_pc;
684
+ }
685
+ size_t resp_len = slot->page->response_len;
686
+ char *snapshot = (char *)malloc(resp_len);
687
+ if (snapshot == NULL) {
688
+ pthread_mutex_unlock(&hyp_pc_lock);
689
+ rb_raise(rb_eNoMemError, "Hyperion::Http::PageCache.write_to: "
690
+ "failed to snapshot response (%zu bytes)", resp_len);
691
+ }
692
+ memcpy(snapshot, slot->page->response_buf, resp_len);
693
+ pthread_mutex_unlock(&hyp_pc_lock);
694
+
695
+ hyp_pc_write_args_t args;
696
+ args.fd = fd;
697
+ args.buf = snapshot;
698
+ args.len = resp_len;
699
+ args.total = 0;
700
+ args.err = 0;
701
+
702
+ rb_thread_call_without_gvl(hyp_pc_write_blocking, &args, RUBY_UBF_IO, NULL);
703
+
704
+ free(snapshot);
705
+
706
+ if (args.err != 0 && args.total == 0) {
707
+ errno = args.err;
708
+ rb_sys_fail("Hyperion::Http::PageCache.write_to");
709
+ }
710
+ return SSIZET2NUM(args.total);
711
+ }
712
+
713
+ /* PageCache.register_prebuilt(path, response_bytes, body_len) -> Integer
714
+ *
715
+ * 2.10-F — register a fully prebuilt HTTP response under a route path
716
+ * (e.g. `/health`). Unlike `cache_file`, the entry has NO on-disk
717
+ * backing — `serve_request` looks it up directly and writes the
718
+ * stored bytes. `body_len` tells `serve_request` where the body
719
+ * starts inside `response_bytes` so HEAD requests can write the
720
+ * headers-only prefix.
721
+ *
722
+ * `response_bytes.bytesize` MUST be >= `body_len`. Returns the
723
+ * stored response byte count on success.
724
+ *
725
+ * Used by `Hyperion::Server.handle_static` to fold the prebuilt
726
+ * static-route response into the C fast path so the request hot
727
+ * path is one hash lookup + one `write()` syscall, fully outside
728
+ * Ruby method dispatch. */
729
+ static VALUE rb_pc_register_prebuilt(VALUE self, VALUE rb_path,
730
+ VALUE rb_response, VALUE rb_body_len) {
731
+ (void)self;
732
+ Check_Type(rb_path, T_STRING);
733
+ Check_Type(rb_response, T_STRING);
734
+
735
+ const char *path = RSTRING_PTR(rb_path);
736
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
737
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
738
+ rb_raise(rb_eArgError, "Hyperion::Http::PageCache.register_prebuilt: "
739
+ "path empty or > %d bytes", HYP_PC_MAX_KEY_LEN);
740
+ }
741
+ const char *resp_buf = RSTRING_PTR(rb_response);
742
+ size_t resp_len = (size_t)RSTRING_LEN(rb_response);
743
+ long body_len_signed = NUM2LONG(rb_body_len);
744
+ if (body_len_signed < 0) {
745
+ rb_raise(rb_eArgError, "body_len must be >= 0");
746
+ }
747
+ size_t body_len = (size_t)body_len_signed;
748
+ if (body_len > resp_len) {
749
+ rb_raise(rb_eArgError,
750
+ "body_len (%zu) must be <= response_bytes.bytesize (%zu)",
751
+ body_len, resp_len);
752
+ }
753
+
754
+ hyp_page_t *page = (hyp_page_t *)calloc(1, sizeof(*page));
755
+ if (page == NULL) {
756
+ rb_raise(rb_eNoMemError, "register_prebuilt: page alloc");
757
+ }
758
+ page->path = (char *)malloc(path_len + 1);
759
+ if (page->path == NULL) {
760
+ free(page);
761
+ rb_raise(rb_eNoMemError, "register_prebuilt: path alloc");
762
+ }
763
+ memcpy(page->path, path, path_len);
764
+ page->path[path_len] = '\0';
765
+ page->path_len = path_len;
766
+
767
+ /* For prebuilt entries we don't attempt mime sniffing — the
768
+ * caller already baked Content-Type into the response. Stash a
769
+ * placeholder so response_bytes() / content_type() helpers stay
770
+ * functional. */
771
+ page->content_type = strdup("__prebuilt__");
772
+ if (page->content_type == NULL) {
773
+ free(page->path);
774
+ free(page);
775
+ rb_raise(rb_eNoMemError, "register_prebuilt: content_type alloc");
776
+ }
777
+
778
+ page->response_buf = (char *)malloc(resp_len);
779
+ if (page->response_buf == NULL) {
780
+ free(page->content_type);
781
+ free(page->path);
782
+ free(page);
783
+ rb_raise(rb_eNoMemError, "register_prebuilt: response alloc (%zu bytes)",
784
+ resp_len);
785
+ }
786
+ memcpy(page->response_buf, resp_buf, resp_len);
787
+ page->response_len = resp_len;
788
+ page->body_len = body_len;
789
+ page->headers_len = resp_len - body_len;
790
+ page->mtime = 0;
791
+ page->last_check = hyp_pc_now();
792
+ page->immutable = 1;
793
+ page->prebuilt = 1;
794
+
795
+ uint64_t h = hyp_pc_hash(path, path_len);
796
+ pthread_mutex_lock(&hyp_pc_lock);
797
+ hyp_pc_insert_locked(page, h);
798
+ pthread_mutex_unlock(&hyp_pc_lock);
799
+
800
+ return SIZET2NUM(resp_len);
801
+ }
802
+
803
+ /* 2.10-F — fast-path lookup-and-write.
804
+ *
805
+ * Method gate: only GET and HEAD are eligible. Anything else returns
806
+ * `:miss` so the Ruby caller falls through to its non-cache path.
807
+ * Comparison is case-insensitive against ASCII bytes (the request
808
+ * line method is parsed verbatim, so callers that already canonical-
809
+ * cased their method gain a single fast path).
810
+ *
811
+ * Returns:
812
+ * * `[:ok, bytes_written]` — hit, response (or headers-only on HEAD)
813
+ * was written in full.
814
+ * * `:miss` — no match (path absent, method not GET/HEAD, or
815
+ * boundary-case empty/oversized path).
816
+ *
817
+ * Concurrency: the C lock is held just long enough to snapshot the
818
+ * response bytes onto the heap; the actual `write()` runs without the
819
+ * GVL via `rb_thread_call_without_gvl`. */
820
+ typedef enum {
821
+ HYP_PC_METHOD_OTHER = 0,
822
+ HYP_PC_METHOD_GET = 1,
823
+ HYP_PC_METHOD_HEAD = 2
824
+ } hyp_pc_method_t;
825
+
826
+ static hyp_pc_method_t hyp_pc_classify_method(const char *m, size_t len) {
827
+ if (len == 3 &&
828
+ (m[0] == 'G' || m[0] == 'g') &&
829
+ (m[1] == 'E' || m[1] == 'e') &&
830
+ (m[2] == 'T' || m[2] == 't')) {
831
+ return HYP_PC_METHOD_GET;
832
+ }
833
+ if (len == 4 &&
834
+ (m[0] == 'H' || m[0] == 'h') &&
835
+ (m[1] == 'E' || m[1] == 'e') &&
836
+ (m[2] == 'A' || m[2] == 'a') &&
837
+ (m[3] == 'D' || m[3] == 'd')) {
838
+ return HYP_PC_METHOD_HEAD;
839
+ }
840
+ return HYP_PC_METHOD_OTHER;
841
+ }
842
+
843
+ static VALUE rb_pc_serve_request(VALUE self, VALUE socket_io,
844
+ VALUE rb_method, VALUE rb_path) {
845
+ (void)self;
846
+ Check_Type(rb_method, T_STRING);
847
+ Check_Type(rb_path, T_STRING);
848
+
849
+ const char *method = RSTRING_PTR(rb_method);
850
+ size_t mlen = (size_t)RSTRING_LEN(rb_method);
851
+ hyp_pc_method_t kind = hyp_pc_classify_method(method, mlen);
852
+ if (kind == HYP_PC_METHOD_OTHER) {
853
+ return sym_miss_pc;
854
+ }
855
+
856
+ const char *path = RSTRING_PTR(rb_path);
857
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
858
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
859
+ return sym_miss_pc;
860
+ }
861
+
862
+ /* Resolve the fd up front — extract_fd may raise, and we want
863
+ * the raise to happen BEFORE we acquire the C lock or allocate. */
864
+ int fd = hyp_pc_extract_fd(socket_io, "socket_io");
865
+
866
+ pthread_mutex_lock(&hyp_pc_lock);
867
+ int was_stale = 0;
868
+ hyp_page_slot_t *slot = hyp_pc_lookup_locked(path, path_len, &was_stale);
869
+ if (slot == NULL) {
870
+ pthread_mutex_unlock(&hyp_pc_lock);
871
+ return sym_miss_pc;
872
+ }
873
+ /* HEAD writes only the headers prefix; GET writes the full
874
+ * response. Snapshot under the lock so a concurrent eviction
875
+ * can't free the source buffer mid-write. */
876
+ size_t write_len = (kind == HYP_PC_METHOD_HEAD)
877
+ ? slot->page->headers_len
878
+ : slot->page->response_len;
879
+ char *snapshot = (char *)malloc(write_len);
880
+ if (snapshot == NULL) {
881
+ pthread_mutex_unlock(&hyp_pc_lock);
882
+ rb_raise(rb_eNoMemError, "Hyperion::Http::PageCache.serve_request: "
883
+ "snapshot alloc (%zu bytes)", write_len);
884
+ }
885
+ memcpy(snapshot, slot->page->response_buf, write_len);
886
+ pthread_mutex_unlock(&hyp_pc_lock);
887
+
888
+ hyp_pc_write_args_t args;
889
+ args.fd = fd;
890
+ args.buf = snapshot;
891
+ args.len = write_len;
892
+ args.total = 0;
893
+ args.err = 0;
894
+
895
+ rb_thread_call_without_gvl(hyp_pc_write_blocking, &args, RUBY_UBF_IO, NULL);
896
+
897
+ free(snapshot);
898
+
899
+ if (args.err != 0 && args.total == 0) {
900
+ errno = args.err;
901
+ rb_sys_fail("Hyperion::Http::PageCache.serve_request");
902
+ }
903
+
904
+ /* Build the [:ok, bytes_written] return tuple. Two-element
905
+ * Array allocation is the only Ruby-level allocation on this
906
+ * path (the integer auto-fixnums for any reasonable response
907
+ * size). */
908
+ VALUE result = rb_ary_new_capa(2);
909
+ rb_ary_push(result, sym_ok_pc);
910
+ rb_ary_push(result, SSIZET2NUM(args.total));
911
+ return result;
912
+ }
913
+
914
+ /* PageCache.set_immutable(path, bool) -> bool */
915
+ static VALUE rb_pc_set_immutable(VALUE self, VALUE rb_path, VALUE rb_flag) {
916
+ (void)self;
917
+ Check_Type(rb_path, T_STRING);
918
+ int flag = RTEST(rb_flag) ? 1 : 0;
919
+ const char *path = RSTRING_PTR(rb_path);
920
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
921
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
922
+ return Qfalse;
923
+ }
924
+ uint64_t h = hyp_pc_hash(path, path_len);
925
+ pthread_mutex_lock(&hyp_pc_lock);
926
+ hyp_page_slot_t *slot = hyp_pc_find_slot(path, path_len, h);
927
+ if (slot != NULL) {
928
+ slot->page->immutable = flag;
929
+ }
930
+ pthread_mutex_unlock(&hyp_pc_lock);
931
+ return slot != NULL ? Qtrue : Qfalse;
932
+ }
933
+
934
+ /* PageCache.size -> Integer */
935
+ static VALUE rb_pc_size(VALUE self) {
936
+ (void)self;
937
+ pthread_mutex_lock(&hyp_pc_lock);
938
+ size_t n = hyp_pc_count;
939
+ pthread_mutex_unlock(&hyp_pc_lock);
940
+ return SIZET2NUM(n);
941
+ }
942
+
943
+ /* PageCache.clear -> nil */
944
+ static VALUE rb_pc_clear(VALUE self) {
945
+ (void)self;
946
+ pthread_mutex_lock(&hyp_pc_lock);
947
+ for (size_t i = 0; i < HYP_PC_BUCKET_SIZE; i++) {
948
+ hyp_page_slot_t *slot = hyp_pc_buckets[i];
949
+ while (slot != NULL) {
950
+ hyp_page_slot_t *next = slot->next;
951
+ hyp_page_destroy(slot->page);
952
+ free(slot);
953
+ slot = next;
954
+ }
955
+ hyp_pc_buckets[i] = NULL;
956
+ }
957
+ hyp_pc_count = 0;
958
+ pthread_mutex_unlock(&hyp_pc_lock);
959
+ return Qnil;
960
+ }
961
+
962
+ /* PageCache.recheck_seconds -> Float */
963
+ static VALUE rb_pc_get_recheck(VALUE self) {
964
+ (void)self;
965
+ pthread_mutex_lock(&hyp_pc_lock);
966
+ double s = hyp_pc_recheck_seconds;
967
+ pthread_mutex_unlock(&hyp_pc_lock);
968
+ return rb_float_new(s);
969
+ }
970
+
971
+ /* PageCache.recheck_seconds=(seconds) */
972
+ static VALUE rb_pc_set_recheck(VALUE self, VALUE rb_seconds) {
973
+ (void)self;
974
+ double s = NUM2DBL(rb_seconds);
975
+ if (s < 0.0) {
976
+ rb_raise(rb_eArgError, "recheck_seconds must be >= 0 (got %f)", s);
977
+ }
978
+ pthread_mutex_lock(&hyp_pc_lock);
979
+ hyp_pc_recheck_seconds = s;
980
+ pthread_mutex_unlock(&hyp_pc_lock);
981
+ return rb_float_new(s);
982
+ }
983
+
984
+ /* PageCache.response_bytes(path) -> String | nil
985
+ *
986
+ * Specs-only helper. Returns a frozen copy of the pre-built response
987
+ * buffer so tests can assert exact wire bytes without running a real
988
+ * socket pair. Always re-reads under the lock. */
989
+ static VALUE rb_pc_response_bytes(VALUE self, VALUE rb_path) {
990
+ (void)self;
991
+ Check_Type(rb_path, T_STRING);
992
+ const char *path = RSTRING_PTR(rb_path);
993
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
994
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
995
+ return Qnil;
996
+ }
997
+ uint64_t h = hyp_pc_hash(path, path_len);
998
+ pthread_mutex_lock(&hyp_pc_lock);
999
+ hyp_page_slot_t *slot = hyp_pc_find_slot(path, path_len, h);
1000
+ VALUE result = Qnil;
1001
+ if (slot != NULL) {
1002
+ result = rb_str_new(slot->page->response_buf,
1003
+ (long)slot->page->response_len);
1004
+ }
1005
+ pthread_mutex_unlock(&hyp_pc_lock);
1006
+ if (!NIL_P(result)) {
1007
+ rb_obj_freeze(result);
1008
+ }
1009
+ return result;
1010
+ }
1011
+
1012
+ /* PageCache.body_bytes(path) -> Integer | nil
1013
+ *
1014
+ * Specs-only helper: returns the body byte count without re-reading the
1015
+ * file. Useful for asserting the cached size matches expectations. */
1016
+ static VALUE rb_pc_body_bytes(VALUE self, VALUE rb_path) {
1017
+ (void)self;
1018
+ Check_Type(rb_path, T_STRING);
1019
+ const char *path = RSTRING_PTR(rb_path);
1020
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
1021
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
1022
+ return Qnil;
1023
+ }
1024
+ uint64_t h = hyp_pc_hash(path, path_len);
1025
+ pthread_mutex_lock(&hyp_pc_lock);
1026
+ hyp_page_slot_t *slot = hyp_pc_find_slot(path, path_len, h);
1027
+ VALUE result = Qnil;
1028
+ if (slot != NULL) {
1029
+ result = SIZET2NUM(slot->page->body_len);
1030
+ }
1031
+ pthread_mutex_unlock(&hyp_pc_lock);
1032
+ return result;
1033
+ }
1034
+
1035
+ /* PageCache.content_type(path) -> String | nil — specs/operator helper. */
1036
+ static VALUE rb_pc_content_type(VALUE self, VALUE rb_path) {
1037
+ (void)self;
1038
+ Check_Type(rb_path, T_STRING);
1039
+ const char *path = RSTRING_PTR(rb_path);
1040
+ size_t path_len = (size_t)RSTRING_LEN(rb_path);
1041
+ if (path_len == 0 || path_len > HYP_PC_MAX_KEY_LEN) {
1042
+ return Qnil;
1043
+ }
1044
+ uint64_t h = hyp_pc_hash(path, path_len);
1045
+ pthread_mutex_lock(&hyp_pc_lock);
1046
+ hyp_page_slot_t *slot = hyp_pc_find_slot(path, path_len, h);
1047
+ VALUE result = Qnil;
1048
+ if (slot != NULL) {
1049
+ result = rb_str_new_cstr(slot->page->content_type);
1050
+ }
1051
+ pthread_mutex_unlock(&hyp_pc_lock);
1052
+ if (!NIL_P(result)) {
1053
+ rb_obj_freeze(result);
1054
+ }
1055
+ return result;
1056
+ }
1057
+
1058
+ /* PageCache.auto_threshold -> Integer */
1059
+ static VALUE rb_pc_auto_threshold(VALUE self) {
1060
+ (void)self;
1061
+ return INT2NUM(HYP_PC_AUTO_THRESHOLD);
1062
+ }
1063
+
1064
+ /* PageCache.max_key_len -> Integer */
1065
+ static VALUE rb_pc_max_key_len(VALUE self) {
1066
+ (void)self;
1067
+ return INT2NUM(HYP_PC_MAX_KEY_LEN);
1068
+ }
1069
+
1070
+ void Init_hyperion_page_cache(void) {
1071
+ rb_mHyperion_pc = rb_const_get(rb_cObject, rb_intern("Hyperion"));
1072
+
1073
+ if (rb_const_defined(rb_mHyperion_pc, rb_intern("Http"))) {
1074
+ rb_mHyperionHttp_pc = rb_const_get(rb_mHyperion_pc, rb_intern("Http"));
1075
+ } else {
1076
+ rb_mHyperionHttp_pc = rb_define_module_under(rb_mHyperion_pc, "Http");
1077
+ }
1078
+
1079
+ rb_mHyperionHttpPageCache = rb_define_module_under(rb_mHyperionHttp_pc,
1080
+ "PageCache");
1081
+
1082
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "fetch",
1083
+ rb_pc_fetch, 1);
1084
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "cache_file",
1085
+ rb_pc_cache_file, 1);
1086
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "write_to",
1087
+ rb_pc_write_to, 2);
1088
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "set_immutable",
1089
+ rb_pc_set_immutable, 2);
1090
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "size",
1091
+ rb_pc_size, 0);
1092
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "clear",
1093
+ rb_pc_clear, 0);
1094
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "recheck_seconds",
1095
+ rb_pc_get_recheck, 0);
1096
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "recheck_seconds=",
1097
+ rb_pc_set_recheck, 1);
1098
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "response_bytes",
1099
+ rb_pc_response_bytes, 1);
1100
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "body_bytes",
1101
+ rb_pc_body_bytes, 1);
1102
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "content_type",
1103
+ rb_pc_content_type, 1);
1104
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "auto_threshold",
1105
+ rb_pc_auto_threshold, 0);
1106
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "max_key_len",
1107
+ rb_pc_max_key_len, 0);
1108
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "register_prebuilt",
1109
+ rb_pc_register_prebuilt, 3);
1110
+ rb_define_singleton_method(rb_mHyperionHttpPageCache, "serve_request",
1111
+ rb_pc_serve_request, 3);
1112
+
1113
+ id_fileno_pc = rb_intern("fileno");
1114
+ id_to_io_pc = rb_intern("to_io");
1115
+
1116
+ sym_ok_pc = ID2SYM(rb_intern("ok"));
1117
+ sym_stale_pc = ID2SYM(rb_intern("stale"));
1118
+ sym_missing_pc = ID2SYM(rb_intern("missing"));
1119
+ sym_miss_pc = ID2SYM(rb_intern("miss"));
1120
+
1121
+ rb_gc_register_mark_object(sym_ok_pc);
1122
+ rb_gc_register_mark_object(sym_stale_pc);
1123
+ rb_gc_register_mark_object(sym_missing_pc);
1124
+ rb_gc_register_mark_object(sym_miss_pc);
1125
+ }