itsi-server 0.2.16 → 0.2.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +3 -1
  3. data/exe/itsi +6 -1
  4. data/ext/itsi_acme/Cargo.toml +1 -1
  5. data/ext/itsi_scheduler/Cargo.toml +1 -1
  6. data/ext/itsi_server/Cargo.toml +3 -1
  7. data/ext/itsi_server/src/lib.rs +6 -1
  8. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
  9. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +4 -4
  10. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
  11. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +64 -33
  12. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
  13. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +6 -15
  14. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +26 -5
  15. data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
  16. data/ext/itsi_server/src/server/binds/listener.rs +45 -7
  17. data/ext/itsi_server/src/server/frame_stream.rs +142 -0
  18. data/ext/itsi_server/src/server/http_message_types.rs +142 -9
  19. data/ext/itsi_server/src/server/io_stream.rs +28 -5
  20. data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
  21. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
  22. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
  23. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
  24. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
  25. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -56
  26. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +5 -7
  27. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +5 -5
  28. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +7 -10
  29. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
  30. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
  31. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +4 -6
  32. data/ext/itsi_server/src/server/mod.rs +1 -0
  33. data/ext/itsi_server/src/server/process_worker.rs +3 -4
  34. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +16 -12
  35. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +87 -31
  36. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +158 -142
  37. data/ext/itsi_server/src/server/signal.rs +37 -9
  38. data/ext/itsi_server/src/server/thread_worker.rs +84 -69
  39. data/ext/itsi_server/src/services/itsi_http_service.rs +43 -43
  40. data/ext/itsi_server/src/services/static_file_server.rs +28 -47
  41. data/lib/itsi/http_request.rb +31 -39
  42. data/lib/itsi/http_response.rb +5 -0
  43. data/lib/itsi/rack_env_pool.rb +59 -0
  44. data/lib/itsi/server/config/dsl.rb +5 -4
  45. data/lib/itsi/server/config/middleware/proxy.rb +1 -1
  46. data/lib/itsi/server/config/middleware/rackup_file.rb +2 -2
  47. data/lib/itsi/server/config/options/auto_reload_config.rb +6 -2
  48. data/lib/itsi/server/config/options/include.rb +5 -2
  49. data/lib/itsi/server/config/options/pipeline_flush.md +16 -0
  50. data/lib/itsi/server/config/options/pipeline_flush.rb +19 -0
  51. data/lib/itsi/server/config/options/writev.md +25 -0
  52. data/lib/itsi/server/config/options/writev.rb +19 -0
  53. data/lib/itsi/server/config.rb +21 -8
  54. data/lib/itsi/server/default_config/Itsi.rb +1 -4
  55. data/lib/itsi/server/grpc/grpc_call.rb +2 -0
  56. data/lib/itsi/server/grpc/grpc_interface.rb +2 -2
  57. data/lib/itsi/server/rack/handler/itsi.rb +3 -1
  58. data/lib/itsi/server/rack_interface.rb +17 -12
  59. data/lib/itsi/server/scheduler_interface.rb +2 -0
  60. data/lib/itsi/server/version.rb +1 -1
  61. data/lib/itsi/server.rb +1 -0
  62. data/lib/ruby_lsp/itsi/addon.rb +12 -13
  63. metadata +7 -1
@@ -3,13 +3,12 @@ use itsi_error::ItsiError;
3
3
  use itsi_rb_helpers::{
4
4
  call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
5
5
  };
6
- use itsi_tracing::{debug, error, warn};
6
+ use itsi_tracing::{debug, error};
7
7
  use magnus::{
8
8
  error::Result,
9
9
  value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
10
10
  Module, RClass, Ruby, Thread, Value,
11
11
  };
12
- use nix::unistd::Pid;
13
12
  use parking_lot::{Mutex, RwLock};
14
13
  use std::{
15
14
  ops::Deref,
@@ -17,8 +16,7 @@ use std::{
17
16
  atomic::{AtomicBool, AtomicU64, Ordering},
18
17
  Arc,
19
18
  },
20
- thread,
21
- time::{Duration, Instant, SystemTime, UNIX_EPOCH},
19
+ time::{Instant, SystemTime, UNIX_EPOCH},
22
20
  };
23
21
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
24
22
  use tracing::instrument;
@@ -35,7 +33,7 @@ use super::request_job::RequestJob;
35
33
  pub struct ThreadWorker {
36
34
  pub params: Arc<ServerParams>,
37
35
  pub id: u8,
38
- pub name: String,
36
+ pub worker_id: usize,
39
37
  pub request_id: AtomicU64,
40
38
  pub current_request_start: AtomicU64,
41
39
  pub receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -64,8 +62,11 @@ type ThreadWorkerBuildResult = Result<(
64
62
  Sender<RequestJob>,
65
63
  )>;
66
64
 
67
- #[instrument(name = "boot", parent=None, skip(params, pid))]
68
- pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorkerBuildResult {
65
+ #[instrument(name = "boot", parent=None, skip(params, worker_id))]
66
+ pub fn build_thread_workers(
67
+ params: Arc<ServerParams>,
68
+ worker_id: usize,
69
+ ) -> ThreadWorkerBuildResult {
69
70
  let blocking_thread_count = params.threads;
70
71
  let nonblocking_thread_count = params.scheduler_threads;
71
72
  let ruby_thread_request_backlog_size: usize = params
@@ -83,7 +84,7 @@ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorker
83
84
  ThreadWorker::new(
84
85
  params.clone(),
85
86
  id,
86
- format!("{:?}#{:?}", pid, id),
87
+ worker_id,
87
88
  blocking_receiver_ref.clone(),
88
89
  blocking_sender_ref.clone(),
89
90
  if nonblocking_thread_count.is_some() {
@@ -106,7 +107,7 @@ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorker
106
107
  workers.push(ThreadWorker::new(
107
108
  params.clone(),
108
109
  id,
109
- format!("{:?}#{:?}", pid, id),
110
+ worker_id,
110
111
  nonblocking_receiver_ref.clone(),
111
112
  nonblocking_sender_ref.clone(),
112
113
  Some(scheduler_class),
@@ -141,7 +142,7 @@ impl ThreadWorker {
141
142
  pub fn new(
142
143
  params: Arc<ServerParams>,
143
144
  id: u8,
144
- name: String,
145
+ worker_id: usize,
145
146
  receiver: Arc<async_channel::Receiver<RequestJob>>,
146
147
  sender: Sender<RequestJob>,
147
148
  scheduler_class: Option<Opaque<Value>>,
@@ -149,9 +150,9 @@ impl ThreadWorker {
149
150
  let worker = Arc::new(Self {
150
151
  params,
151
152
  id,
153
+ worker_id,
152
154
  request_id: AtomicU64::new(0),
153
155
  current_request_start: AtomicU64::new(0),
154
- name,
155
156
  receiver,
156
157
  sender,
157
158
  thread: RwLock::new(None),
@@ -181,24 +182,24 @@ impl ThreadWorker {
181
182
  }
182
183
 
183
184
  pub fn run(self: Arc<Self>) -> Result<()> {
184
- let name = self.name.clone();
185
185
  let receiver = self.receiver.clone();
186
186
  let terminated = self.terminated.clone();
187
187
  let scheduler_class = self.scheduler_class;
188
188
  let params = self.params.clone();
189
189
  let self_ref = self.clone();
190
- let id = self.id;
190
+ let worker_id = self.worker_id;
191
191
  call_with_gvl(|_| {
192
192
  *self.thread.write() = Some(
193
193
  create_ruby_thread(move || {
194
194
  if params.pin_worker_cores {
195
- core_affinity::set_for_current(CORE_IDS[(id as usize) % CORE_IDS.len()]);
195
+ core_affinity::set_for_current(
196
+ CORE_IDS[((2 * worker_id) + 1) % CORE_IDS.len()],
197
+ );
196
198
  }
197
199
  debug!("Ruby thread worker started");
198
200
  if let Some(scheduler_class) = scheduler_class {
199
201
  if let Err(err) = self_ref.fiber_accept_loop(
200
202
  params,
201
- name,
202
203
  receiver,
203
204
  scheduler_class,
204
205
  terminated,
@@ -206,7 +207,7 @@ impl ThreadWorker {
206
207
  error!("Error in fiber_accept_loop: {:?}", err);
207
208
  }
208
209
  } else {
209
- self_ref.accept_loop(params, name, receiver, terminated);
210
+ self_ref.accept_loop(params, receiver, terminated);
210
211
  }
211
212
  })
212
213
  .ok_or_else(|| {
@@ -262,9 +263,14 @@ impl ThreadWorker {
262
263
  }
263
264
  }
264
265
  }
266
+
265
267
  for _ in 0..MAX_BATCH_SIZE {
266
268
  if let Ok(req) = receiver.try_recv() {
269
+ let should_break = matches!(req, RequestJob::Shutdown);
267
270
  batch.push(req);
271
+ if should_break {
272
+ break;
273
+ }
268
274
  } else {
269
275
  break;
270
276
  }
@@ -307,7 +313,9 @@ impl ThreadWorker {
307
313
  ItsiGrpcCall::internal_error(ruby, response, err)
308
314
  }
309
315
  }
310
- RequestJob::Shutdown => return true,
316
+ RequestJob::Shutdown => {
317
+ return true;
318
+ }
311
319
  }
312
320
  }
313
321
  false
@@ -339,15 +347,14 @@ impl ThreadWorker {
339
347
  if yield_result.is_err() {
340
348
  break;
341
349
  }
342
- })
350
+ });
343
351
  })
344
352
  }
345
353
 
346
- #[instrument(skip_all, fields(thread_worker=name))]
354
+ #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))]
347
355
  pub fn fiber_accept_loop(
348
356
  self: Arc<Self>,
349
357
  params: Arc<ServerParams>,
350
- name: String,
351
358
  receiver: Arc<async_channel::Receiver<RequestJob>>,
352
359
  scheduler_class: Opaque<Value>,
353
360
  terminated: Arc<AtomicBool>,
@@ -422,68 +429,76 @@ impl ThreadWorker {
422
429
  });
423
430
  }
424
431
 
425
- #[instrument(skip_all, fields(thread_worker=id))]
432
+ #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))]
426
433
  pub fn accept_loop(
427
434
  self: Arc<Self>,
428
435
  params: Arc<ServerParams>,
429
- id: String,
430
436
  receiver: Arc<async_channel::Receiver<RequestJob>>,
431
437
  terminated: Arc<AtomicBool>,
432
438
  ) {
433
- let ruby = Ruby::get().unwrap();
434
439
  let mut idle_counter = 0;
435
- let self_ref = self.clone();
436
440
  call_without_gvl(|| loop {
437
- if receiver.is_empty() {
438
- if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
439
- idle_counter = (idle_counter + 1) % oob_gc_threshold;
440
- if idle_counter == 0 {
441
- call_with_gvl(|_ruby| {
442
- ruby.gc_start();
443
- });
444
- }
445
- };
446
- }
447
441
  match receiver.recv_blocking() {
448
- Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
449
- self_ref.request_id.fetch_add(1, Ordering::Relaxed);
450
- self_ref.current_request_start.store(
451
- SystemTime::now()
452
- .duration_since(UNIX_EPOCH)
453
- .unwrap()
454
- .as_secs(),
455
- Ordering::Relaxed,
456
- );
457
- call_with_gvl(|_ruby| {
458
- request.process(&ruby, app_proc).ok();
459
- });
460
- if terminated.load(Ordering::Relaxed) {
461
- break;
442
+ Err(_) => break,
443
+ Ok(RequestJob::Shutdown) => break,
444
+ Ok(request_job) => call_with_gvl(|ruby| {
445
+ self.process_one(&ruby, request_job, &terminated);
446
+ while let Ok(request_job) = receiver.try_recv() {
447
+ if matches!(request_job, RequestJob::Shutdown) {
448
+ terminated.store(true, Ordering::Relaxed);
449
+ break;
450
+ }
451
+ self.process_one(&ruby, request_job, &terminated);
462
452
  }
463
- }
464
- Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
465
- self_ref.request_id.fetch_add(1, Ordering::Relaxed);
466
- self_ref.current_request_start.store(
467
- SystemTime::now()
468
- .duration_since(UNIX_EPOCH)
469
- .unwrap()
470
- .as_secs(),
471
- Ordering::Relaxed,
472
- );
473
- call_with_gvl(|_ruby| {
474
- request.process(&ruby, app_proc).ok();
475
- });
476
- if terminated.load(Ordering::Relaxed) {
477
- break;
453
+ if let Some(thresh) = params.oob_gc_responses_threshold {
454
+ idle_counter = (idle_counter + 1) % thresh;
455
+ if idle_counter == 0 {
456
+ ruby.gc_start();
457
+ }
478
458
  }
459
+ }),
460
+ };
461
+ if terminated.load(Ordering::Relaxed) {
462
+ break;
463
+ }
464
+ });
465
+ }
466
+
467
+ fn process_one(self: &Arc<Self>, ruby: &Ruby, job: RequestJob, terminated: &Arc<AtomicBool>) {
468
+ match job {
469
+ RequestJob::ProcessHttpRequest(request, app_proc) => {
470
+ if terminated.load(Ordering::Relaxed) {
471
+ request.response().unwrap().service_unavailable();
472
+ return;
479
473
  }
480
- Ok(RequestJob::Shutdown) => {
481
- break;
482
- }
483
- Err(_) => {
484
- thread::sleep(Duration::from_micros(1));
474
+ self.request_id.fetch_add(1, Ordering::Relaxed);
475
+ self.current_request_start.store(
476
+ SystemTime::now()
477
+ .duration_since(UNIX_EPOCH)
478
+ .unwrap()
479
+ .as_secs(),
480
+ Ordering::Relaxed,
481
+ );
482
+ request.process(ruby, app_proc).ok();
483
+ }
484
+
485
+ RequestJob::ProcessGrpcRequest(request, app_proc) => {
486
+ if terminated.load(Ordering::Relaxed) {
487
+ request.stream().unwrap().close().ok();
488
+ return;
485
489
  }
490
+ self.request_id.fetch_add(1, Ordering::Relaxed);
491
+ self.current_request_start.store(
492
+ SystemTime::now()
493
+ .duration_since(UNIX_EPOCH)
494
+ .unwrap()
495
+ .as_secs(),
496
+ Ordering::Relaxed,
497
+ );
498
+ request.process(ruby, app_proc).ok();
486
499
  }
487
- });
500
+
501
+ RequestJob::Shutdown => unreachable!(),
502
+ }
488
503
  }
489
504
  }
@@ -6,22 +6,20 @@ use crate::server::http_message_types::{
6
6
  use crate::server::lifecycle_event::LifecycleEvent;
7
7
  use crate::server::middleware_stack::MiddlewareLayer;
8
8
  use crate::server::serve_strategy::acceptor::AcceptorArgs;
9
- use crate::server::signal::send_lifecycle_event;
9
+ use crate::server::signal::{send_lifecycle_event, SHUTDOWN_REQUESTED};
10
10
  use chrono::{self, DateTime, Local};
11
11
  use either::Either;
12
12
  use http::header::ACCEPT_ENCODING;
13
13
  use http::{HeaderValue, Request};
14
14
  use hyper::body::Incoming;
15
- use hyper::service::Service;
16
- use itsi_error::ItsiError;
17
15
  use regex::Regex;
16
+ use smallvec::SmallVec;
17
+ use std::ops::Deref;
18
18
  use std::sync::atomic::{AtomicBool, Ordering};
19
- use std::sync::OnceLock;
19
+ use std::sync::{Arc, OnceLock};
20
20
  use std::time::{Duration, Instant};
21
- use tracing::error;
22
-
23
- use std::{future::Future, ops::Deref, pin::Pin, sync::Arc};
24
21
  use tokio::time::timeout;
22
+ use tracing::error;
25
23
 
26
24
  #[derive(Clone)]
27
25
  pub struct ItsiHttpService {
@@ -80,12 +78,14 @@ pub struct RequestContextInner {
80
78
  pub request_start_time: OnceLock<DateTime<Local>>,
81
79
  pub start_instant: Instant,
82
80
  pub if_none_match: OnceLock<Option<String>>,
83
- pub supported_encoding_set: OnceLock<Vec<HeaderValue>>,
81
+ pub supported_encoding_set: OnceLock<AcceptEncodingSet>,
84
82
  pub is_ruby_request: Arc<AtomicBool>,
85
83
  }
86
84
 
85
+ type AcceptEncodingSet = SmallVec<[HeaderValue; 2]>;
86
+
87
87
  impl HttpRequestContext {
88
- fn new(
88
+ pub fn new(
89
89
  service: ItsiHttpService,
90
90
  matching_pattern: Option<Arc<Regex>>,
91
91
  accept: ResponseFormat,
@@ -109,12 +109,14 @@ impl HttpRequestContext {
109
109
  }
110
110
 
111
111
  pub fn set_supported_encoding_set(&self, req: &HttpRequest) {
112
- self.inner.supported_encoding_set.get_or_init(move || {
113
- req.headers()
114
- .get_all(ACCEPT_ENCODING)
115
- .into_iter()
116
- .cloned()
117
- .collect::<Vec<_>>()
112
+ self.inner.supported_encoding_set.get_or_init(|| {
113
+ let mut set: AcceptEncodingSet = SmallVec::new();
114
+
115
+ for hv in req.headers().get_all(ACCEPT_ENCODING) {
116
+ set.push(hv.clone()); // clone ≈ 16 B struct copy
117
+ }
118
+
119
+ set
118
120
  });
119
121
  }
120
122
 
@@ -164,7 +166,7 @@ impl HttpRequestContext {
164
166
  self.inner.response_format.get().unwrap()
165
167
  }
166
168
 
167
- pub fn supported_encoding_set(&self) -> Option<&Vec<HeaderValue>> {
169
+ pub fn supported_encoding_set(&self) -> Option<&AcceptEncodingSet> {
168
170
  self.inner.supported_encoding_set.get()
169
171
  }
170
172
  }
@@ -173,13 +175,8 @@ const SERVER_TOKEN_VERSION: HeaderValue =
173
175
  HeaderValue::from_static(concat!("Itsi/", env!("CARGO_PKG_VERSION")));
174
176
  const SERVER_TOKEN_NAME: HeaderValue = HeaderValue::from_static("Itsi");
175
177
 
176
- impl Service<Request<Incoming>> for ItsiHttpService {
177
- type Response = HttpResponse;
178
- type Error = ItsiError;
179
- type Future = Pin<Box<dyn Future<Output = itsi_error::Result<HttpResponse>> + Send>>;
180
-
181
- fn call(&self, req: Request<Incoming>) -> Self::Future {
182
- let self_clone = self.clone();
178
+ impl ItsiHttpService {
179
+ pub async fn handle_request(&self, req: Request<Incoming>) -> itsi_error::Result<HttpResponse> {
183
180
  let mut req = req.limit();
184
181
  let accept: ResponseFormat = req.accept().into();
185
182
  let is_single_mode = self.server_params.workers == 1;
@@ -191,7 +188,7 @@ impl Service<Request<Incoming>> for ItsiHttpService {
191
188
  let token_preference = self.server_params.itsi_server_token_preference;
192
189
 
193
190
  let service_future = async move {
194
- let middleware_stack = self_clone
191
+ let middleware_stack = self
195
192
  .server_params
196
193
  .middleware
197
194
  .get()
@@ -202,7 +199,7 @@ impl Service<Request<Incoming>> for ItsiHttpService {
202
199
  let mut resp: Option<HttpResponse> = None;
203
200
 
204
201
  let mut context =
205
- HttpRequestContext::new(self_clone.clone(), matching_pattern, accept, irr_clone);
202
+ HttpRequestContext::new(self.clone(), matching_pattern, accept, irr_clone);
206
203
  let mut depth = 0;
207
204
 
208
205
  for (index, elm) in stack.iter().enumerate() {
@@ -243,28 +240,31 @@ impl Service<Request<Incoming>> for ItsiHttpService {
243
240
  };
244
241
 
245
242
  if let Some(timeout_duration) = request_timeout {
246
- Box::pin(async move {
247
- match timeout(timeout_duration, service_future).await {
248
- Ok(result) => result,
249
- Err(_) => {
250
- // If we're still running Ruby at this point, we can't just kill the
251
- // thread as it might be in a critical section.
252
- // Instead we must ask the worker to hot restart.
253
- if is_ruby_request.load(Ordering::Relaxed) {
254
- if is_single_mode {
255
- // If we're in single mode, re-exec the whole process
256
- send_lifecycle_event(LifecycleEvent::Restart);
257
- } else {
258
- // Otherwise we can shutdown the worker and rely on the master to restart it
259
- send_lifecycle_event(LifecycleEvent::Shutdown);
260
- }
243
+ match timeout(timeout_duration, service_future).await {
244
+ Ok(result) => result,
245
+ Err(_) => {
246
+ // If we're still running Ruby at this point, we can't just kill the
247
+ // thread as it might be in a critical section.
248
+ // Instead we must ask the worker to hot restart.
249
+ // But only if we're not already shutting down
250
+ if is_ruby_request.load(Ordering::Relaxed)
251
+ && !SHUTDOWN_REQUESTED.load(Ordering::SeqCst)
252
+ {
253
+ // When we've detected a timeout, use the safer send_lifecycle_event
254
+ // which will properly handle signal-safe state transitions
255
+ if is_single_mode {
256
+ // If we're in single mode, re-exec the whole process
257
+ send_lifecycle_event(LifecycleEvent::Restart);
258
+ } else {
259
+ // Otherwise we can shutdown the worker and rely on the master to restart it
260
+ send_lifecycle_event(LifecycleEvent::Shutdown);
261
261
  }
262
- Ok(TIMEOUT_RESPONSE.to_http_response(accept).await)
263
262
  }
263
+ Ok(TIMEOUT_RESPONSE.to_http_response(accept).await)
264
264
  }
265
- })
265
+ }
266
266
  } else {
267
- Box::pin(service_future)
267
+ service_future.await
268
268
  }
269
269
  }
270
270
  }
@@ -2,7 +2,7 @@ use crate::{
2
2
  default_responses::NOT_FOUND_RESPONSE,
3
3
  prelude::*,
4
4
  server::{
5
- http_message_types::{HttpRequest, HttpResponse, RequestExt, ResponseFormat},
5
+ http_message_types::{HttpBody, HttpRequest, HttpResponse, RequestExt, ResponseFormat},
6
6
  middleware_stack::ErrorResponse,
7
7
  redirect_type::RedirectType,
8
8
  },
@@ -16,7 +16,6 @@ use http::{
16
16
  },
17
17
  HeaderName, HeaderValue, Response, StatusCode,
18
18
  };
19
- use http_body_util::{combinators::BoxBody, Full};
20
19
  use itsi_error::Result;
21
20
  use parking_lot::{Mutex, RwLock};
22
21
  use percent_encoding::percent_decode_str;
@@ -28,7 +27,6 @@ use std::{
28
27
  borrow::Cow,
29
28
  cmp::Ordering,
30
29
  collections::HashMap,
31
- convert::Infallible,
32
30
  fs::Metadata,
33
31
  ops::Deref,
34
32
  path::{Path, PathBuf},
@@ -324,7 +322,7 @@ impl StaticFileServer {
324
322
  }) => Response::builder()
325
323
  .status(StatusCode::MOVED_PERMANENTLY)
326
324
  .header(header::LOCATION, redirect_to)
327
- .body(BoxBody::new(Full::new(Bytes::new())))
325
+ .body(HttpBody::empty())
328
326
  .unwrap(),
329
327
  Err(not_found_behavior) => match not_found_behavior {
330
328
  NotFoundBehavior::Error(error_response) => {
@@ -340,7 +338,7 @@ impl StaticFileServer {
340
338
  NotFoundBehavior::Redirect(redirect) => Response::builder()
341
339
  .status(redirect.r#type.status_code())
342
340
  .header(header::LOCATION, redirect.to)
343
- .body(BoxBody::new(Full::new(Bytes::new())))
341
+ .body(HttpBody::empty())
344
342
  .unwrap(),
345
343
  },
346
344
  })
@@ -407,7 +405,7 @@ impl StaticFileServer {
407
405
 
408
406
  Response::builder()
409
407
  .status(StatusCode::NOT_FOUND)
410
- .body(BoxBody::new(Full::new(Bytes::new())))
408
+ .body(HttpBody::empty())
411
409
  .unwrap()
412
410
  }
413
411
 
@@ -648,15 +646,8 @@ impl StaticFileServer {
648
646
  Err(nf)
649
647
  }
650
648
 
651
- async fn stream_file_range(
652
- &self,
653
- path: PathBuf,
654
- start: u64,
655
- end: u64,
656
- ) -> Option<BoxBody<Bytes, Infallible>> {
649
+ async fn stream_file_range(&self, path: PathBuf, start: u64, end: u64) -> Option<HttpBody> {
657
650
  use futures::TryStreamExt;
658
- use http_body_util::StreamBody;
659
- use hyper::body::Frame;
660
651
  use tokio::io::AsyncSeekExt;
661
652
  use tokio_util::io::ReaderStream;
662
653
 
@@ -687,32 +678,25 @@ impl StaticFileServer {
687
678
  let range_length = end - start + 1;
688
679
  let limited_reader = tokio::io::AsyncReadExt::take(file, range_length);
689
680
  let path_clone = path.clone();
690
- let stream = ReaderStream::with_capacity(limited_reader, 64 * 1024)
691
- .map_ok(Frame::data)
692
- .map_err(move |e| {
693
- warn!("Error streaming file {}: {}", path_clone.display(), e);
694
- unreachable!("We handle IO errors above")
695
- });
696
-
697
- Some(BoxBody::new(StreamBody::new(stream)))
681
+ let stream = ReaderStream::with_capacity(limited_reader, 64 * 1024).map_err(move |e| {
682
+ warn!("Error streaming file {}: {}", path_clone.display(), e);
683
+ unreachable!("We handle IO errors above")
684
+ });
685
+ Some(HttpBody::stream(stream))
698
686
  }
699
687
 
700
- async fn stream_file(&self, path: PathBuf) -> Option<BoxBody<Bytes, Infallible>> {
688
+ async fn stream_file(&self, path: PathBuf) -> Option<HttpBody> {
701
689
  use futures::TryStreamExt;
702
- use http_body_util::StreamBody;
703
- use hyper::body::Frame;
704
690
  use tokio_util::io::ReaderStream;
705
691
 
706
692
  match File::open(&path).await {
707
693
  Ok(file) => {
708
694
  let path_clone = path.clone();
709
- let stream = ReaderStream::with_capacity(file, 64 * 1024)
710
- .map_ok(Frame::data)
711
- .map_err(move |e| {
712
- warn!("Error streaming file {}: {}", path_clone.display(), e);
713
- unreachable!("We handle IO errors above")
714
- });
715
- Some(BoxBody::new(StreamBody::new(stream)))
695
+ let stream = ReaderStream::with_capacity(file, 64 * 1024).map_err(move |e| {
696
+ warn!("Error streaming file {}: {}", path_clone.display(), e);
697
+ unreachable!("We handle IO errors above")
698
+ });
699
+ Some(HttpBody::stream(stream))
716
700
  }
717
701
  Err(e) => {
718
702
  warn!(
@@ -749,7 +733,7 @@ impl StaticFileServer {
749
733
  return Response::builder()
750
734
  .status(StatusCode::RANGE_NOT_SATISFIABLE)
751
735
  .header("Content-Range", format!("bytes */{}", content_length))
752
- .body(BoxBody::new(Full::new(Bytes::new())))
736
+ .body(HttpBody::empty())
753
737
  .unwrap();
754
738
  }
755
739
 
@@ -795,7 +779,7 @@ impl StaticFileServer {
795
779
  builder = builder.header("Content-Range", range);
796
780
  }
797
781
 
798
- return builder.body(BoxBody::new(Full::new(Bytes::new()))).unwrap();
782
+ return builder.body(HttpBody::empty()).unwrap();
799
783
  }
800
784
 
801
785
  // For GET requests, prepare the actual content
@@ -829,10 +813,7 @@ impl StaticFileServer {
829
813
  }
830
814
  }
831
815
 
832
- fn serve_cached_content(
833
- &self,
834
- serve_cache_args: ServeCacheArgs,
835
- ) -> http::Response<BoxBody<Bytes, Infallible>> {
816
+ fn serve_cached_content(&self, serve_cache_args: ServeCacheArgs) -> HttpResponse {
836
817
  let ServeCacheArgs(
837
818
  cache_entry,
838
819
  start,
@@ -855,7 +836,7 @@ impl StaticFileServer {
855
836
  return Response::builder()
856
837
  .status(StatusCode::RANGE_NOT_SATISFIABLE)
857
838
  .header("Content-Range", format!("bytes */{}", content_length))
858
- .body(BoxBody::new(Full::new(Bytes::new())))
839
+ .body(HttpBody::empty())
859
840
  .unwrap();
860
841
  }
861
842
 
@@ -904,7 +885,7 @@ impl StaticFileServer {
904
885
  builder = builder.header("Content-Range", range);
905
886
  }
906
887
 
907
- return builder.body(BoxBody::new(Full::new(Bytes::new()))).unwrap();
888
+ return builder.body(HttpBody::empty()).unwrap();
908
889
  }
909
890
 
910
891
  if is_range_request {
@@ -920,7 +901,7 @@ impl StaticFileServer {
920
901
  cache_entry.last_modified_http_date.clone(),
921
902
  content_range,
922
903
  &self.headers,
923
- BoxBody::new(Full::new(range_bytes)),
904
+ HttpBody::full(range_bytes),
924
905
  )
925
906
  } else {
926
907
  // Return the full content
@@ -987,15 +968,15 @@ fn format_http_date_header(time: SystemTime) -> HeaderValue {
987
968
  .unwrap()
988
969
  }
989
970
 
990
- fn build_ok_body(bytes: Arc<Bytes>) -> BoxBody<Bytes, Infallible> {
991
- BoxBody::new(Full::new(bytes.as_ref().clone()))
971
+ fn build_ok_body(bytes: Arc<Bytes>) -> HttpBody {
972
+ HttpBody::full(bytes.as_ref().clone())
992
973
  }
993
974
 
994
975
  // Helper function to handle not modified responses
995
- fn build_not_modified_response() -> http::Response<BoxBody<Bytes, Infallible>> {
976
+ fn build_not_modified_response() -> HttpResponse {
996
977
  Response::builder()
997
978
  .status(StatusCode::NOT_MODIFIED)
998
- .body(BoxBody::new(Full::new(Bytes::new())))
979
+ .body(HttpBody::empty())
999
980
  .unwrap()
1000
981
  }
1001
982
 
@@ -1009,8 +990,8 @@ fn build_file_response(
1009
990
  last_modified_http_date: HeaderValue,
1010
991
  range_header: Option<String>,
1011
992
  headers: &Option<HashMap<String, String>>,
1012
- body: BoxBody<Bytes, Infallible>,
1013
- ) -> http::Response<BoxBody<Bytes, Infallible>> {
993
+ body: HttpBody,
994
+ ) -> HttpResponse {
1014
995
  let mut response = Response::new(body);
1015
996
 
1016
997
  *response.status_mut() = status;