itsi-server 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- use super::itsi_server::RequestJob;
1
+ use super::itsi_server::{RequestJob, Server};
2
2
  use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
3
3
  use itsi_rb_helpers::{
4
4
  call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapVal, HeapValue,
@@ -24,6 +24,7 @@ use std::{
24
24
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
25
25
  use tracing::instrument;
26
26
  pub struct ThreadWorker {
27
+ pub server: Arc<Server>,
27
28
  pub id: String,
28
29
  pub app: Opaque<Value>,
29
30
  pub receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -48,8 +49,9 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
48
49
 
49
50
  pub struct TerminateWakerSignal(bool);
50
51
 
51
- #[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
52
+ #[instrument(name = "Boot", parent=None, skip(server, threads, app, pid, scheduler_class))]
52
53
  pub fn build_thread_workers(
54
+ server: Arc<Server>,
53
55
  pid: Pid,
54
56
  threads: NonZeroU8,
55
57
  app: HeapVal,
@@ -65,6 +67,7 @@ pub fn build_thread_workers(
65
67
  .map(|id| {
66
68
  info!(pid = pid.as_raw(), id, "Thread");
67
69
  ThreadWorker::new(
70
+ server.clone(),
68
71
  format!("{:?}#{:?}", pid, id),
69
72
  app,
70
73
  receiver_ref.clone(),
@@ -83,10 +86,7 @@ pub fn load_app(
83
86
  scheduler_class: Option<String>,
84
87
  ) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
85
88
  call_with_gvl(|ruby| {
86
- let app = Opaque::from(
87
- app.funcall::<_, _, Value>(*ID_CALL, ())
88
- .expect("Couldn't load app"),
89
- );
89
+ let app = Opaque::from(app.funcall::<_, _, Value>(*ID_CALL, ())?);
90
90
  let scheduler_class = if let Some(scheduler_class) = scheduler_class {
91
91
  Some(Opaque::from(
92
92
  ruby.module_kernel()
@@ -100,6 +100,7 @@ pub fn load_app(
100
100
  }
101
101
  impl ThreadWorker {
102
102
  pub fn new(
103
+ server: Arc<Server>,
103
104
  id: String,
104
105
  app: Opaque<Value>,
105
106
  receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -107,6 +108,7 @@ impl ThreadWorker {
107
108
  scheduler_class: Option<Opaque<Value>>,
108
109
  ) -> Result<Self> {
109
110
  let mut worker = Self {
111
+ server,
110
112
  id,
111
113
  app,
112
114
  receiver,
@@ -125,7 +127,7 @@ impl ThreadWorker {
125
127
  Ok(_) => {}
126
128
  Err(err) => error!("Failed to send shutdown request: {}", err),
127
129
  };
128
- info!("Requesting shutdown");
130
+ debug!("Requesting shutdown");
129
131
  }
130
132
 
131
133
  #[instrument(skip(self, deadline), fields(id = self.id))]
@@ -140,7 +142,7 @@ impl ThreadWorker {
140
142
  if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
141
143
  return true;
142
144
  }
143
- info!("Thread has shut down");
145
+ debug!("Thread has shut down");
144
146
  }
145
147
  self.thread.write().take();
146
148
 
@@ -154,17 +156,23 @@ impl ThreadWorker {
154
156
  let receiver = self.receiver.clone();
155
157
  let terminated = self.terminated.clone();
156
158
  let scheduler_class = self.scheduler_class;
159
+ let server = self.server.clone();
157
160
  call_with_gvl(|_| {
158
161
  *self.thread.write() = Some(
159
162
  create_ruby_thread(move || {
160
163
  if let Some(scheduler_class) = scheduler_class {
161
- if let Err(err) =
162
- Self::fiber_accept_loop(id, app, receiver, scheduler_class, terminated)
163
- {
164
+ if let Err(err) = Self::fiber_accept_loop(
165
+ server,
166
+ id,
167
+ app,
168
+ receiver,
169
+ scheduler_class,
170
+ terminated,
171
+ ) {
164
172
  error!("Error in fiber_accept_loop: {:?}", err);
165
173
  }
166
174
  } else {
167
- Self::accept_loop(id, app, receiver, terminated);
175
+ Self::accept_loop(server, id, app, receiver, terminated);
168
176
  }
169
177
  })
170
178
  .into(),
@@ -180,6 +188,7 @@ impl ThreadWorker {
180
188
  receiver: &Arc<async_channel::Receiver<RequestJob>>,
181
189
  terminated: &Arc<AtomicBool>,
182
190
  waker_sender: &watch::Sender<TerminateWakerSignal>,
191
+ oob_gc_responses_threshold: Option<u64>,
183
192
  ) -> magnus::block::Proc {
184
193
  let leader = leader.clone();
185
194
  let receiver = receiver.clone();
@@ -243,10 +252,15 @@ impl ThreadWorker {
243
252
  }
244
253
 
245
254
  let yield_result = if receiver.is_empty() {
255
+ let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
256
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
257
+ idle_counter == 0
258
+ } else {
259
+ false
260
+ };
246
261
  waker_sender.send(TerminateWakerSignal(false)).unwrap();
247
- idle_counter = (idle_counter + 1) % 100;
248
262
  call_with_gvl(|ruby| {
249
- if idle_counter == 0 {
263
+ if should_gc {
250
264
  ruby.gc_start();
251
265
  }
252
266
  scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
@@ -264,6 +278,8 @@ impl ThreadWorker {
264
278
 
265
279
  #[instrument(skip_all, fields(thread_worker=id))]
266
280
  pub fn fiber_accept_loop(
281
+ server: Arc<Server>,
282
+
267
283
  id: String,
268
284
  app: Opaque<Value>,
269
285
  receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -273,10 +289,16 @@ impl ThreadWorker {
273
289
  let ruby = Ruby::get().unwrap();
274
290
  let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
275
291
  let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
276
- let server = ruby.get_inner(&ITSI_SERVER);
277
- let scheduler_proc =
278
- Self::build_scheduler_proc(app, &leader, &receiver, &terminated, &waker_sender);
279
- let (scheduler, scheduler_fiber) = server.funcall::<_, _, (Value, Value)>(
292
+ let server_class = ruby.get_inner(&ITSI_SERVER);
293
+ let scheduler_proc = Self::build_scheduler_proc(
294
+ app,
295
+ &leader,
296
+ &receiver,
297
+ &terminated,
298
+ &waker_sender,
299
+ server.oob_gc_responses_threshold,
300
+ );
301
+ let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
280
302
  "start_scheduler_loop",
281
303
  (scheduler_class, scheduler_proc),
282
304
  )?;
@@ -337,21 +359,31 @@ impl ThreadWorker {
337
359
 
338
360
  #[instrument(skip_all, fields(thread_worker=id))]
339
361
  pub fn accept_loop(
362
+ server: Arc<Server>,
340
363
  id: String,
341
364
  app: Opaque<Value>,
342
365
  receiver: Arc<async_channel::Receiver<RequestJob>>,
343
366
  terminated: Arc<AtomicBool>,
344
367
  ) {
345
368
  let ruby = Ruby::get().unwrap();
346
- let server = ruby.get_inner(&ITSI_SERVER);
369
+ let server_class = ruby.get_inner(&ITSI_SERVER);
370
+ let mut idle_counter = 0;
347
371
  call_without_gvl(|| loop {
372
+ if receiver.is_empty() {
373
+ if let Some(oob_gc_threshold) = server.oob_gc_responses_threshold {
374
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
375
+ if idle_counter == 0 {
376
+ ruby.gc_start();
377
+ }
378
+ };
379
+ }
348
380
  match receiver.recv_blocking() {
349
381
  Ok(RequestJob::ProcessRequest(request)) => {
350
382
  if terminated.load(Ordering::Relaxed) {
351
383
  break;
352
384
  }
353
385
  call_with_gvl(|_ruby| {
354
- request.process(&ruby, server, app).ok();
386
+ request.process(&ruby, server_class, app).ok();
355
387
  })
356
388
  }
357
389
  Ok(RequestJob::Shutdown) => {
@@ -63,7 +63,7 @@ pub fn configure_tls(
63
63
  .map(|s| s.to_string())
64
64
  .or_else(|| (*ITSI_ACME_CONTACT_EMAIL).as_ref().ok().map(|s| s.to_string()))
65
65
  .ok_or_else(|| itsi_error::ItsiError::ArgumentError(
66
- "acme_cert query param or ITSI_ACME_CONTACT_EMAIL must be set before you can auto-generate let's encrypt certificates".to_string(),
66
+ "acme_email query param or ITSI_ACME_CONTACT_EMAIL must be set before you can auto-generate let's encrypt certificates".to_string(),
67
67
  ))?;
68
68
 
69
69
  let acme_config = AcmeConfig::new(domains)
@@ -0,0 +1,127 @@
1
+ # frozen_string_literal: true
2
+ env = ENV.fetch('APP_ENV') { ENV.fetch('RACK_ENV', 'development') }
3
+
4
+ # This is the default Itsi configuration file, installed when you run `itsi init`
5
+ # It contains a sane starting point for configuring your Itsi server.
6
+ # You can use this file in both development and production environments.
7
+ # Most of the options in this file can be overridden by command line options.
8
+ # Check out itsi -h to learn more about the command line options available to you.
9
+
10
+ # Number of worker processes to spawn
11
+ # If more than 1, Itsi will be booted in Cluster mode
12
+ workers ENV.fetch('ITSI_WORKERS') {
13
+ require 'etc'
14
+ env == 'development' ? 1 : Etc.nprocessors
15
+ }
16
+
17
+ # Number of threads to spawn per worker process
18
+ # For pure CPU bound applicationss, you'll get the best results keeping this number low
19
+ # Setting a value of 1 is great for superficial benchmarks, but in reality
20
+ # it's better to set this a bit higher to allow expensive requests to get overtaken and minimize head-of-line blocking
21
+ threads ENV.fetch('ITSI_THREADS', 3)
22
+
23
+ # If your application is IO bound (e.g. performing a lot of proxied HTTP requests, or heavy queries etc)
24
+ # you can see *substantial* benefits from enabling this option.
25
+ # To set this option, pass a string, not a class (as we will not have loaded the class yet)
26
+ # E.g.
27
+ # `fiber_scheduler "Itsi::Scheduler"` - The default fast and light-weight scheduler that comes with Itsi
28
+ # `fiber_scheduler "Async::Scheduler"` - Bring your own scheduler!
29
+ fiber_scheduler nil
30
+
31
+ # By default Itsi will run the Rack app from config.ru.
32
+ # You can provide an alternative Rack app file name here
33
+ # Or you can inline the app directly inside Itsi.rb.
34
+ # Only one of `run` and `rackup_file` can be used.
35
+ # E.g.
36
+ # require 'rack'
37
+ # run(Rack::Builder.app do
38
+ # use Rack::CommonLogger
39
+ # run ->(env) { [200, { 'content-type' => 'text/plain' }, ['OK']] }
40
+ # end)
41
+ rackup_file 'config.ru'
42
+
43
+ # If you bind to https, without specifying a certificate, Itsi will use a self-signed certificate.
44
+ # The self-signed certificate will use a CA generated for your host and stored inside `ITSI_LOCAL_CA_DIR` (Defaults to ~/.itsi)
45
+ # bind "https://localhost:3000"
46
+ # bind "https://localhost:3000?domains=dev.itsi.fyi"
47
+ #
48
+ # If you want to use let's encrypt to generate you a real certificate you and pass cert=acme and an acme_email address to generate one.
49
+ # bind "https://itsi.fyi?cert=acme&acme_email=admin@itsi.fyi"
50
+ # You can generate certificates for multiple domains at once, by passing a comma-separated list of domains
51
+ # bind "https://0.0.0.0?domains=foo.itsi.fyi,bar.itsi.fyi&cert=acme&acme_email=admin@itsi.fyi"
52
+ #
53
+ # If you already have a certificate you can specify it using the cert and key parameters
54
+ # bind "https://itsi.fyi?cert=/path/to/cert.pem&key=/path/to/key.pem"
55
+ #
56
+ # You can also bind to a unix socket or a tls unix socket. E.g.
57
+ # bind "unix:///tmp/itsi.sock"
58
+ # bind "tls:///tmp/itsi.secure.sock"
59
+
60
+ if env == 'development'
61
+ bind 'http://localhost:3000'
62
+ else
63
+ bind "https://0.0.0.0?domains=#{ENV['PRODUCTION_DOMAINS']}&cert=acme&acme_email=admin@itsi.fyi"
64
+ end
65
+
66
+ # If you want to preload the application, set preload to true
67
+ # to load the entire rack-app defined in rack_file_name before forking.
68
+ # Alternatively, you can preload just a specific set of gems in a group in your gemfile,
69
+ # by providing the group name here.
70
+ # E.g.
71
+ #
72
+ # preload :preload # Load gems inside the preload group
73
+ # preload false # Don't preload.
74
+ #
75
+ # If you want to be able to perform zero-downtime deploys using a single itsi process,
76
+ # you should disable preloads, so that the application is loaded fresh each time a new worker boots
77
+ preload true
78
+
79
+ # Set the maximum memory limit for each worker process in bytes
80
+ # When this limit is reached, the worker will be gracefully restarted.
81
+ # Only one worker is restarted at a time to ensure we don't take down
82
+ # all of them at once, if they reach the threshold simultaneously.
83
+ worker_memory_limit 48 * 1024 * 1024
84
+
85
+ # You can provide an optional block of code to run, when a worker hits its memory threshold (Use this to send yourself an alert,
86
+ # write metrics to disk etc. etc.)
87
+ after_memory_threshold_reached do |pid|
88
+ puts "Worker #{pid} has reached its memory threshold and will restart"
89
+ end
90
+
91
+ # Do clean up of any non-threadsafe resources before forking a new worker here.
92
+ before_fork {}
93
+
94
+ # Reinitialize any non-threadsafe resources after forking a new worker here.
95
+ after_fork {}
96
+
97
+ # Shutdown timeout
98
+ # Number of seconds to wait for workers to gracefully shutdown before killing them.
99
+ shutdown_timeout 5
100
+
101
+ # Set this to false for application environments that require rack.input to be a rewindable body
102
+ # (like Rails). For rack applications that can stream inputs, you can set this to true for a more memory-efficient approach.
103
+ stream_body false
104
+
105
+ # OOB GC responses threshold
106
+ # Specifies how frequently OOB gc should be triggered during periods where there is a gap in queued requests.
107
+ # Setting this too low can substantially worsen performance
108
+ oob_gc_responses_threshold 512
109
+
110
+ # Set this to false for application environments that require rack.input to be a rewindable body
111
+ # (like Rails). For rack applications that can stream inputs, you can set this to true for a more memory-efficient approach.
112
+ stream_body false
113
+
114
+ # OOB GC responses threshold
115
+ # Specifies how frequently OOB gc should be triggered during periods where there is a gap in queued requests.
116
+ # Setting this too low can substantially worsen performance
117
+ oob_gc_responses_threshold 512
118
+
119
+ # Log level
120
+ # Set this to one of the following values: debug, info, warn, error, fatal
121
+ # Can also be set using the ITSI_LOG environment variable
122
+ log_level :info
123
+
124
+ # Log Format
125
+ # Set this to be either :ansi or :json. If you leave it blank Itsi will try
126
+ # and auto-detect the format based on the TTY environment.
127
+ log_format :auto
@@ -0,0 +1,36 @@
1
+ module Itsi
2
+ class Server
3
+ module Config
4
+ module_function
5
+
6
+ ITSI_DEFAULT_CONFIG_FILE = "Itsi.rb"
7
+
8
+ def load(options)
9
+ options[:config_file] ||= \
10
+ if File.exist?(ITSI_DEFAULT_CONFIG_FILE)
11
+ ITSI_DEFAULT_CONFIG_FILE
12
+ elsif File.exist?("config/#{ITSI_DEFAULT_CONFIG_FILE}")
13
+ "config/#{ITSI_DEFAULT_CONFIG_FILE}"
14
+ end
15
+
16
+ # Options simply pass through unless we've specified a config file
17
+ return options unless options[:config_file]
18
+
19
+ require_relative "options_dsl"
20
+ OptionsDSL.evaluate(options[:config_file]).merge(options)
21
+ end
22
+
23
+ def write_default
24
+ if File.exist?(ITSI_DEFAULT_CONFIG_FILE)
25
+ puts "#{ITSI_DEFAULT_CONFIG_FILE} already exists."
26
+ return
27
+ end
28
+
29
+ puts "Writing default configuration..."
30
+ File.open(ITSI_DEFAULT_CONFIG_FILE, "w") do |file|
31
+ file.write(IO.read("#{__dir__}/Itsi.rb"))
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end