@nmtjs/proxy 0.15.0-beta.1 → 0.15.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/proxy.rs CHANGED
@@ -1,147 +1,687 @@
1
- use crate::{
2
- config::{ApplicationConfig, ProxyConfig, ProxyOptions},
3
- router::{RouterAssembly, build_router},
4
- };
5
- use log::{debug, info};
6
- use napi::Error as NapiError;
1
+ use crate::{errors, lb, options, router, server};
2
+ use napi::bindgen_prelude::*;
7
3
  use napi_derive::napi;
8
- use pingora::{
9
- prelude::{Opt, Server, http_proxy_service},
10
- server::{RunArgs, ShutdownSignal, ShutdownSignalWatch, configuration::ServerConf},
11
- };
12
- use std::{collections::HashMap, sync::Mutex, thread};
13
- use tokio::sync::oneshot;
4
+ use pingora::lb::{LoadBalancer, health_check::TcpHealthCheck, selection::RoundRobin};
5
+ use std::collections::{HashMap, HashSet};
6
+ use std::net::ToSocketAddrs;
7
+ #[cfg(unix)]
8
+ use std::os::unix::io::IntoRawFd;
9
+ use std::sync::{Arc, Mutex};
10
+ use std::time::Duration;
11
+ use tokio::sync::{Mutex as TokioMutex, watch};
12
+ use tokio_util::sync::CancellationToken;
13
+
14
+ #[derive(Debug, Clone, Copy, PartialEq, Eq)]
15
+ #[allow(dead_code)]
16
+ enum ProxyState {
17
+ Stopped,
18
+ Starting,
19
+ Running,
20
+ Stopping,
21
+ }
22
+
23
+ struct ProxyInner {
24
+ options: options::ProxyOptionsParsed,
25
+ state: ProxyState,
26
+ server: server::Server,
27
+ router: Arc<router::Router>,
28
+ // Upstreams are mutable; app definitions are immutable (from `options`).
29
+ upstreams_by_app: HashMap<String, HashSet<PortUpstream>>,
30
+ // One pool per (app, transport).
31
+ pools: HashMap<(String, lb::TransportKind), router::PoolConfig>,
32
+ }
33
+
34
+ #[derive(Debug, Clone, PartialEq, Eq, Hash)]
35
+ struct PortUpstream {
36
+ transport: lb::TransportKind,
37
+ secure: bool,
38
+ hostname: String,
39
+ port: u16,
40
+ }
41
+
42
+ #[napi(object)]
43
+ pub struct PortUpstreamOptions {
44
+ pub r#type: String,
45
+ pub transport: String,
46
+ pub secure: bool,
47
+ pub hostname: String,
48
+ pub port: u32,
49
+ }
50
+
51
+ #[napi(object)]
52
+ pub struct UnixSocketUpstreamOptions {
53
+ pub r#type: String,
54
+ pub transport: String,
55
+ pub secure: bool,
56
+ pub path: String,
57
+ }
58
+
59
+ pub type UpstreamOptions = Either<PortUpstreamOptions, UnixSocketUpstreamOptions>;
14
60
 
15
61
  #[napi]
16
- pub struct NeemataProxy {
17
- server: Option<Server>,
18
- shutdown_tx: Option<oneshot::Sender<ShutdownSignal>>,
19
- runner: Option<thread::JoinHandle<()>>,
62
+ pub struct Proxy {
63
+ inner: Arc<Mutex<ProxyInner>>,
20
64
  }
21
65
 
22
66
  #[napi]
23
- impl NeemataProxy {
67
+ impl Proxy {
24
68
  #[napi(constructor)]
25
- pub fn new(
26
- apps: HashMap<String, ApplicationConfig>,
27
- options: Option<ProxyOptions>,
28
- ) -> napi::Result<Self> {
29
- env_logger::try_init().ok();
30
- const DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS: u64 = 1;
31
- let config = ProxyConfig::from_inputs(apps, options)?;
32
-
33
- let mut server_conf = ServerConf::default();
34
- let server_opts = Opt {
35
- daemon: false,
36
- ..Default::default()
37
- };
69
+ pub fn new(env: Env, options: options::ProxyOptions) -> Result<Self> {
70
+ let parsed = options::parse_proxy_options(&env, options)?;
38
71
 
39
- server_conf.grace_period_seconds = Some(DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS - 1);
40
- server_conf.graceful_shutdown_timeout_seconds =
41
- Some(DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS);
42
- server_conf.threads = config.threads.unwrap_or(1) as usize;
43
- server_conf.work_stealing = true;
72
+ let router = Arc::new(router::Router::new(router::RouterConfig::default()));
44
73
 
45
- let mut server = Server::new_with_opt_and_conf(server_opts, server_conf);
74
+ let mut upstreams_by_app = HashMap::new();
75
+ for app in &parsed.applications {
76
+ upstreams_by_app.insert(app.name.clone(), HashSet::new());
77
+ }
46
78
 
47
- let RouterAssembly {
79
+ let inner = ProxyInner {
80
+ options: parsed,
81
+ state: ProxyState::Stopped,
82
+ server: server::Server::new(),
48
83
  router,
49
- background_services,
50
- } = build_router(&config)?;
84
+ upstreams_by_app,
85
+ pools: HashMap::new(),
86
+ };
51
87
 
52
- let mut proxy_service = http_proxy_service(&server.configuration, router);
88
+ // Initial router config: routes known, pools empty.
89
+ let initial_config = build_router_config(&inner.options, &inner.pools);
90
+ inner.router.update(initial_config);
53
91
 
54
- if config.tls {
55
- // TODO: support TLS options
56
- panic!("TLS is not supported yet");
57
- } else {
58
- proxy_service.add_tcp(config.listener());
59
- }
92
+ Ok(Self {
93
+ inner: Arc::new(Mutex::new(inner)),
94
+ })
95
+ }
60
96
 
61
- server.add_service(proxy_service);
97
+ #[napi]
98
+ pub fn start<'env>(&self, env: &'env Env) -> Result<PromiseRaw<'env, ()>> {
99
+ // Requirement: `start()` resolves only after the bind attempt completes.
100
+ // We do the bind synchronously here so we can throw a JS Error with a stable `code`.
101
+ let mut guard = self
102
+ .inner
103
+ .lock()
104
+ .map_err(|_| napi::Error::from_reason("Proxy mutex poisoned"))?;
62
105
 
63
- for service in background_services {
64
- server.add_service(service);
106
+ match guard.state {
107
+ ProxyState::Stopped => {
108
+ guard.state = ProxyState::Starting;
109
+ }
110
+ ProxyState::Starting => {
111
+ // Block until the other start() finishes binding by waiting for the mutex.
112
+ // By the time we get here, we already hold the mutex, so just decide based on
113
+ // the current state.
114
+ return env.spawn_future(async { Ok(()) });
115
+ }
116
+ ProxyState::Running => {
117
+ return Err(errors::generic_error(
118
+ env,
119
+ errors::codes::ALREADY_STARTED,
120
+ "Proxy is already started",
121
+ ));
122
+ }
123
+ ProxyState::Stopping => {
124
+ return env.spawn_future(async { Ok(()) });
125
+ }
65
126
  }
66
127
 
67
- Ok(Self {
68
- server: Some(server),
69
- shutdown_tx: None,
70
- runner: None,
128
+ let listen = guard.options.listen.clone();
129
+ let tls = guard.options.tls.clone();
130
+ let server = guard.server.clone();
131
+ let shared_router = router::SharedRouter::new(guard.router.clone());
132
+ let initial_lb_tasks = guard
133
+ .pools
134
+ .iter()
135
+ .map(|((app, transport), pool)| (lb_service_name(app, *transport), pool.lb.clone()))
136
+ .collect::<Vec<_>>();
137
+
138
+ let std_listener = std::net::TcpListener::bind(&listen).map_err(|e| {
139
+ // Transition back to Stopped on bind failure.
140
+ guard.state = ProxyState::Stopped;
141
+ errors::generic_error(
142
+ env,
143
+ errors::codes::LISTEN_BIND_FAILED,
144
+ format!("Failed to bind listener on '{listen}': {e}"),
145
+ )
146
+ })?;
147
+ std_listener.set_nonblocking(true).map_err(|e| {
148
+ napi::Error::from_reason(format!("Failed to set listener nonblocking: {e}"))
149
+ })?;
150
+
151
+ let tls_settings = if let Some(tls) = tls {
152
+ let mut settings =
153
+ pingora::listeners::tls::TlsSettings::intermediate(&tls.cert_path, &tls.key_path)
154
+ .map_err(|e| {
155
+ // Transition back to Stopped on TLS config failure.
156
+ guard.state = ProxyState::Stopped;
157
+ errors::generic_error(
158
+ env,
159
+ errors::codes::INVALID_PROXY_OPTIONS,
160
+ format!(
161
+ "Failed to configure TLS using cert '{}' and key '{}': {e}",
162
+ tls.cert_path, tls.key_path
163
+ ),
164
+ )
165
+ })?;
166
+ if tls.enable_h2 {
167
+ settings.enable_h2();
168
+ }
169
+ Some(settings)
170
+ } else {
171
+ None
172
+ };
173
+
174
+ // At this point, bind + TLS config are done.
175
+ // Mark Running before releasing the mutex so concurrent start/stop behave deterministically.
176
+ guard.state = ProxyState::Running;
177
+ drop(guard);
178
+
179
+ // Move listener into tokio, start accept loop, then resolve.
180
+ env.spawn_future(async move {
181
+ let cancel = CancellationToken::new();
182
+ let cancel_child = cancel.clone();
183
+
184
+ // Start LB health tasks first so pools become ready.
185
+ for (svc_name, lb) in initial_lb_tasks {
186
+ server
187
+ .upsert_service(svc_name, lb::spawn_lb_health_task(lb))
188
+ .await;
189
+ }
190
+
191
+ // Hand the already-bound socket over to Pingora via the `ListenFds` table.
192
+ // This ensures the bind attempt (and any bind errors) happen before Pingora
193
+ // starts, avoiding its internal `expect("Failed to build listeners")` panic.
194
+ #[cfg(unix)]
195
+ let fd = std_listener.into_raw_fd();
196
+
197
+ #[cfg(unix)]
198
+ let mut fds = pingora::server::Fds::new();
199
+ #[cfg(unix)]
200
+ fds.add(listen.clone(), fd);
201
+
202
+ #[cfg(unix)]
203
+ let listen_fds: pingora::server::ListenFds = Arc::new(TokioMutex::new(fds));
204
+
205
+ let conf = Arc::new(pingora::server::configuration::ServerConf::default());
206
+ let mut svc = pingora::proxy::http_proxy_service_with_name(
207
+ &conf,
208
+ shared_router,
209
+ "Neemata Proxy (HTTP)",
210
+ );
211
+ if let Some(settings) = tls_settings {
212
+ svc.add_tls_with_settings(&listen, None, settings);
213
+ } else {
214
+ svc.add_tcp(&listen);
215
+ }
216
+
217
+ let task = tokio::spawn(async move {
218
+ let (shutdown_tx, shutdown_rx) = watch::channel(false);
219
+ let mut service_fut = Box::pin(async move {
220
+ use pingora::services::Service as _;
221
+ #[cfg(unix)]
222
+ svc.start_service(Some(listen_fds), shutdown_rx, conf.listener_tasks_per_fd)
223
+ .await;
224
+
225
+ #[cfg(windows)]
226
+ svc.start_service(shutdown_rx, conf.listener_tasks_per_fd)
227
+ .await;
228
+ });
229
+
230
+ tokio::select! {
231
+ _ = &mut service_fut => {}
232
+ _ = cancel_child.cancelled() => {
233
+ let _ = shutdown_tx.send(true);
234
+ let _ = service_fut.await;
235
+ }
236
+ }
237
+ });
238
+
239
+ server
240
+ .upsert_service(
241
+ "main_http".to_string(),
242
+ server::ServiceHandle { cancel, task },
243
+ )
244
+ .await;
245
+
246
+ Ok(())
71
247
  })
72
248
  }
73
249
 
74
250
  #[napi]
75
- pub fn run(&mut self) -> napi::Result<()> {
76
- if self.runner.is_some() {
77
- return Err(NapiError::from_reason("server already running"));
78
- }
251
+ pub fn stop<'env>(&self, env: &'env Env) -> Result<PromiseRaw<'env, ()>> {
252
+ let (server, inner) = {
253
+ let mut guard = self
254
+ .inner
255
+ .lock()
256
+ .map_err(|_| napi::Error::from_reason("Proxy mutex poisoned"))?;
257
+
258
+ match guard.state {
259
+ ProxyState::Stopped => {
260
+ return env.spawn_future(async { Ok(()) });
261
+ }
262
+ ProxyState::Stopping => {
263
+ return env.spawn_future(async { Ok(()) });
264
+ }
265
+ ProxyState::Starting | ProxyState::Running => {
266
+ guard.state = ProxyState::Stopping;
267
+ }
268
+ }
269
+
270
+ (guard.server.clone(), self.inner.clone())
271
+ };
272
+
273
+ env.spawn_future(async move {
274
+ server.stop_all().await;
275
+ if let Ok(mut guard) = inner.lock() {
276
+ guard.state = ProxyState::Stopped;
277
+ }
278
+ Ok(())
279
+ })
280
+ }
79
281
 
80
- let server = self
81
- .server
82
- .take()
83
- .ok_or_else(|| NapiError::from_reason("server already running"))?;
282
+ #[napi]
283
+ pub fn add_upstream<'env>(
284
+ &self,
285
+ env: &'env Env,
286
+ app_name: String,
287
+ upstream: UpstreamOptions,
288
+ ) -> Result<PromiseRaw<'env, ()>> {
289
+ let upstream = parse_port_upstream(env, upstream)?;
84
290
 
85
- let (tx, rx) = oneshot::channel();
86
- self.shutdown_tx = Some(tx);
291
+ let (server, is_running, to_upsert, to_remove) = {
292
+ let mut guard = self
293
+ .inner
294
+ .lock()
295
+ .map_err(|_| napi::Error::from_reason("Proxy mutex poisoned"))?;
87
296
 
88
- let handle = thread::spawn(move || {
89
- let args = RunArgs {
90
- shutdown_signal: Box::new(JsShutdownWatch::new(rx)),
297
+ let app_def = guard
298
+ .options
299
+ .applications
300
+ .iter()
301
+ .find(|a| a.name == app_name)
302
+ .cloned();
303
+ let health_check_interval_ms = guard.options.health_check_interval_ms;
304
+ let Some(app_def) = app_def else {
305
+ return Err(errors::generic_error(
306
+ env,
307
+ errors::codes::UNKNOWN_APPLICATION,
308
+ format!("Unknown application '{app_name}'"),
309
+ ));
91
310
  };
92
- server.run(args);
93
- });
94
311
 
95
- self.runner = Some(handle);
96
- Ok(())
312
+ let upstreams = guard
313
+ .upstreams_by_app
314
+ .get_mut(&app_name)
315
+ .expect("apps are initialized at construction");
316
+
317
+ if upstreams.contains(&upstream) {
318
+ return Err(errors::generic_error(
319
+ env,
320
+ errors::codes::UPSTREAM_ALREADY_EXISTS,
321
+ "Upstream already exists",
322
+ ));
323
+ }
324
+
325
+ // TLS policy: if this upstream is secure, we must be able to derive verify hostname.
326
+ if upstream.secure {
327
+ let _ = verify_hostname_for_secure_upstream(env, &app_def)?;
328
+ }
329
+
330
+ upstreams.insert(upstream);
331
+
332
+ let (new_pools, removed_transports) =
333
+ rebuild_app_pools(env, &app_def, upstreams, health_check_interval_ms)?;
334
+
335
+ let mut to_upsert = Vec::new();
336
+ let mut to_remove = Vec::new();
337
+
338
+ for (t, p) in new_pools {
339
+ to_upsert.push((lb_service_name(&app_name, t), p.lb.clone()));
340
+ guard.pools.insert((app_name.clone(), t), p);
341
+ }
342
+ for t in removed_transports {
343
+ if guard.pools.remove(&(app_name.clone(), t)).is_some() {
344
+ to_remove.push(lb_service_name(&app_name, t));
345
+ }
346
+ }
347
+
348
+ // Always update router config (even when stopped) so it is ready for `start()`.
349
+ let cfg = build_router_config(&guard.options, &guard.pools);
350
+ guard.router.update(cfg);
351
+
352
+ (
353
+ guard.server.clone(),
354
+ guard.state == ProxyState::Running,
355
+ to_upsert,
356
+ to_remove,
357
+ )
358
+ };
359
+
360
+ if !is_running {
361
+ return env.spawn_future(async { Ok(()) });
362
+ }
363
+
364
+ env.spawn_future(async move {
365
+ for (name, lb) in to_upsert {
366
+ server
367
+ .upsert_service(name, lb::spawn_lb_health_task(lb))
368
+ .await;
369
+ }
370
+ for name in to_remove {
371
+ server.remove_service(&name).await;
372
+ }
373
+ Ok(())
374
+ })
97
375
  }
98
376
 
99
377
  #[napi]
100
- pub fn shutdown(&mut self) -> napi::Result<()> {
101
- info!("Shutting down server. Sending shutdown signal...");
102
- let tx = self.shutdown_tx.take().unwrap();
103
- let _ = tx.send(ShutdownSignal::GracefulTerminate);
104
-
105
- info!("Joining server thread...");
106
- let handle = self.runner.take().unwrap();
107
- let _ = handle.join();
108
- info!("Server shut down successfully.");
109
- Ok(())
378
+ pub fn remove_upstream<'env>(
379
+ &self,
380
+ env: &'env Env,
381
+ app_name: String,
382
+ upstream: UpstreamOptions,
383
+ ) -> Result<PromiseRaw<'env, ()>> {
384
+ let upstream = parse_port_upstream(env, upstream)?;
385
+
386
+ let (server, is_running, to_upsert, to_remove) = {
387
+ let mut guard = self
388
+ .inner
389
+ .lock()
390
+ .map_err(|_| napi::Error::from_reason("Proxy mutex poisoned"))?;
391
+
392
+ let app_def = guard
393
+ .options
394
+ .applications
395
+ .iter()
396
+ .find(|a| a.name == app_name)
397
+ .cloned();
398
+ let health_check_interval_ms = guard.options.health_check_interval_ms;
399
+ let Some(app_def) = app_def else {
400
+ return Err(errors::generic_error(
401
+ env,
402
+ errors::codes::UNKNOWN_APPLICATION,
403
+ format!("Unknown application '{app_name}'"),
404
+ ));
405
+ };
406
+
407
+ let upstreams = guard
408
+ .upstreams_by_app
409
+ .get_mut(&app_name)
410
+ .expect("apps are initialized at construction");
411
+
412
+ if !upstreams.remove(&upstream) {
413
+ return Err(errors::generic_error(
414
+ env,
415
+ errors::codes::UPSTREAM_NOT_FOUND,
416
+ "Upstream not found",
417
+ ));
418
+ }
419
+
420
+ let (new_pools, removed_transports) =
421
+ rebuild_app_pools(env, &app_def, upstreams, health_check_interval_ms)?;
422
+
423
+ let mut to_upsert = Vec::new();
424
+ let mut to_remove = Vec::new();
425
+
426
+ for (t, p) in new_pools {
427
+ to_upsert.push((lb_service_name(&app_name, t), p.lb.clone()));
428
+ guard.pools.insert((app_name.clone(), t), p);
429
+ }
430
+ for t in removed_transports {
431
+ if guard.pools.remove(&(app_name.clone(), t)).is_some() {
432
+ to_remove.push(lb_service_name(&app_name, t));
433
+ }
434
+ }
435
+
436
+ let cfg = build_router_config(&guard.options, &guard.pools);
437
+ guard.router.update(cfg);
438
+
439
+ (
440
+ guard.server.clone(),
441
+ guard.state == ProxyState::Running,
442
+ to_upsert,
443
+ to_remove,
444
+ )
445
+ };
446
+
447
+ if !is_running {
448
+ return env.spawn_future(async { Ok(()) });
449
+ }
450
+
451
+ env.spawn_future(async move {
452
+ for (name, lb) in to_upsert {
453
+ server
454
+ .upsert_service(name, lb::spawn_lb_health_task(lb))
455
+ .await;
456
+ }
457
+ for name in to_remove {
458
+ server.remove_service(&name).await;
459
+ }
460
+ Ok(())
461
+ })
110
462
  }
111
463
  }
112
464
 
113
- struct JsShutdownWatch {
114
- receiver: Mutex<Option<oneshot::Receiver<ShutdownSignal>>>,
465
+ fn build_router_config(
466
+ options: &options::ProxyOptionsParsed,
467
+ pools: &HashMap<(String, lb::TransportKind), router::PoolConfig>,
468
+ ) -> router::RouterConfig {
469
+ let mut cfg = router::RouterConfig::default();
470
+
471
+ for app in &options.applications {
472
+ match &app.routing {
473
+ options::ApplicationRoutingParsed::Subdomain { name } => {
474
+ cfg.subdomain_routes
475
+ .insert(name.to_ascii_lowercase(), app.name.clone());
476
+ }
477
+ options::ApplicationRoutingParsed::Path { name } => {
478
+ cfg.path_routes.insert(name.clone(), app.name.clone());
479
+ }
480
+ options::ApplicationRoutingParsed::Default => {
481
+ cfg.default_app = Some(app.name.clone());
482
+ }
483
+ }
484
+
485
+ let http1 = pools
486
+ .get(&(app.name.clone(), lb::TransportKind::Http1))
487
+ .cloned();
488
+ let http2 = pools
489
+ .get(&(app.name.clone(), lb::TransportKind::Http2))
490
+ .cloned();
491
+ cfg.apps
492
+ .insert(app.name.clone(), router::AppPools { http1, http2 });
493
+ }
494
+
495
+ cfg
496
+ }
497
+
498
+ fn lb_service_name(app: &str, transport: lb::TransportKind) -> String {
499
+ match transport {
500
+ lb::TransportKind::Http1 => format!("lb:{app}:http"),
501
+ lb::TransportKind::Http2 => format!("lb:{app}:http2"),
502
+ }
115
503
  }
116
504
 
117
- impl JsShutdownWatch {
118
- fn new(receiver: oneshot::Receiver<ShutdownSignal>) -> Self {
119
- Self {
120
- receiver: Mutex::new(Some(receiver)),
505
+ fn parse_port_upstream(env: &Env, upstream: UpstreamOptions) -> Result<PortUpstream> {
506
+ match upstream {
507
+ Either::A(port) => {
508
+ if port.r#type != "port" {
509
+ return Err(errors::generic_error(
510
+ env,
511
+ errors::codes::UNSUPPORTED_UPSTREAM_TYPE,
512
+ format!("Unsupported upstream type '{}'", port.r#type),
513
+ ));
514
+ }
515
+
516
+ let transport = match port.transport.as_str() {
517
+ "http" => lb::TransportKind::Http1,
518
+ "http2" => lb::TransportKind::Http2,
519
+ other => {
520
+ return errors::throw_type_error(
521
+ env,
522
+ errors::codes::INVALID_PROXY_OPTIONS,
523
+ format!("Unknown upstream transport '{other}'"),
524
+ );
525
+ }
526
+ };
527
+
528
+ if port.port == 0 || port.port > u16::MAX as u32 {
529
+ return errors::throw_type_error(
530
+ env,
531
+ errors::codes::INVALID_PROXY_OPTIONS,
532
+ "PortUpstreamOptions.port must be in range 1..=65535",
533
+ );
534
+ }
535
+
536
+ if port.hostname.is_empty() {
537
+ return errors::throw_type_error(
538
+ env,
539
+ errors::codes::INVALID_PROXY_OPTIONS,
540
+ "PortUpstreamOptions.hostname must be non-empty",
541
+ );
542
+ }
543
+
544
+ Ok(PortUpstream {
545
+ transport,
546
+ secure: port.secure,
547
+ hostname: port.hostname,
548
+ port: port.port as u16,
549
+ })
550
+ }
551
+ Either::B(unix) => {
552
+ if unix.r#type != "unix_socket" {
553
+ return Err(errors::generic_error(
554
+ env,
555
+ errors::codes::UNSUPPORTED_UPSTREAM_TYPE,
556
+ format!("Unsupported upstream type '{}'", unix.r#type),
557
+ ));
558
+ }
559
+ Err(errors::generic_error(
560
+ env,
561
+ errors::codes::UNSUPPORTED_UPSTREAM_TYPE,
562
+ "unix_socket upstreams are not supported",
563
+ ))
121
564
  }
122
565
  }
123
566
  }
124
567
 
125
- #[async_trait::async_trait]
126
- impl ShutdownSignalWatch for JsShutdownWatch {
127
- async fn recv(&self) -> ShutdownSignal {
128
- let receiver = {
129
- let mut guard = match self.receiver.lock() {
130
- Ok(guard) => guard,
131
- Err(poisoned) => poisoned.into_inner(),
568
+ fn verify_hostname_for_secure_upstream(
569
+ env: &Env,
570
+ app: &options::ApplicationOptionsParsed,
571
+ ) -> Result<String> {
572
+ match &app.routing {
573
+ options::ApplicationRoutingParsed::Subdomain { name } => Ok(name.clone()),
574
+ options::ApplicationRoutingParsed::Path { .. }
575
+ | options::ApplicationRoutingParsed::Default => {
576
+ let Some(sni) = &app.sni else {
577
+ return Err(errors::type_error(
578
+ env,
579
+ errors::codes::INVALID_APPLICATION_OPTIONS,
580
+ "ApplicationOptions.sni is required when using secure upstreams for path/default routing",
581
+ ));
132
582
  };
583
+ Ok(sni.clone())
584
+ }
585
+ }
586
+ }
587
+
588
+ fn rebuild_app_pools(
589
+ env: &Env,
590
+ app: &options::ApplicationOptionsParsed,
591
+ upstreams: &HashSet<PortUpstream>,
592
+ health_check_interval_ms: u32,
593
+ ) -> Result<(
594
+ HashMap<lb::TransportKind, router::PoolConfig>,
595
+ Vec<lb::TransportKind>,
596
+ )> {
597
+ let mut by_transport: HashMap<lb::TransportKind, Vec<&PortUpstream>> = HashMap::new();
598
+ for u in upstreams {
599
+ by_transport.entry(u.transport).or_default().push(u);
600
+ }
601
+
602
+ let mut new_pools = HashMap::new();
603
+
604
+ for (transport, list) in by_transport {
605
+ if list.is_empty() {
606
+ continue;
607
+ }
608
+
609
+ let secure = list[0].secure;
610
+ if list.iter().any(|u| u.secure != secure) {
611
+ return Err(errors::type_error(
612
+ env,
613
+ errors::codes::INVALID_PROXY_OPTIONS,
614
+ "Mixing secure and insecure upstreams within the same (app, transport) pool is not supported",
615
+ ));
616
+ }
133
617
 
134
- guard.take()
618
+ let verify_hostname = if secure {
619
+ verify_hostname_for_secure_upstream(env, app)?
620
+ } else {
621
+ String::new()
135
622
  };
136
623
 
137
- match receiver {
138
- Some(rx) => {
139
- debug!("Waiting for shutdown signal from JS...");
140
- let signal = rx.await.unwrap_or(ShutdownSignal::FastShutdown);
141
- info!("Received shutdown signal from JS: {:?}", signal);
142
- signal
143
- }
144
- None => ShutdownSignal::FastShutdown,
624
+ let mut addrs = Vec::new();
625
+ for u in &list {
626
+ let iter = (u.hostname.as_str(), u.port)
627
+ .to_socket_addrs()
628
+ .map_err(|e| {
629
+ errors::type_error(
630
+ env,
631
+ errors::codes::INVALID_PROXY_OPTIONS,
632
+ format!(
633
+ "Failed to resolve upstream '{}:{}': {e}",
634
+ u.hostname, u.port
635
+ ),
636
+ )
637
+ })?;
638
+ addrs.extend(iter);
145
639
  }
640
+
641
+ if addrs.is_empty() {
642
+ continue;
643
+ }
644
+
645
+ let mut lb = LoadBalancer::<RoundRobin>::try_from_iter(addrs).map_err(|e| {
646
+ errors::type_error(
647
+ env,
648
+ errors::codes::INVALID_PROXY_OPTIONS,
649
+ format!("Failed to build load balancer: {e}"),
650
+ )
651
+ })?;
652
+
653
+ if secure {
654
+ lb.set_health_check(TcpHealthCheck::new_tls(&verify_hostname));
655
+ } else {
656
+ lb.set_health_check(TcpHealthCheck::new());
657
+ }
658
+
659
+ // TODO(vNext): Define dynamic-upstream readiness semantics.
660
+ // Currently, a newly added upstream becomes eligible only after the LB health-check loop
661
+ // runs and marks it healthy. This means `addUpstream()` does not imply "immediately routable".
662
+ // Options:
663
+ // - optimistic: treat newly-added backends as healthy until the first failed check/connection
664
+ // - eager: run an initial health check synchronously during addUpstream/start, then serve
665
+ // - explicit: expose readiness state/events to JS so callers can await health convergence
666
+ // Acceptance: behavior is documented, deterministic, and covered by integration tests.
667
+ lb.health_check_frequency = Some(Duration::from_millis(health_check_interval_ms as u64));
668
+
669
+ new_pools.insert(
670
+ transport,
671
+ router::PoolConfig {
672
+ lb: Arc::new(lb),
673
+ secure,
674
+ verify_hostname,
675
+ },
676
+ );
146
677
  }
678
+
679
+ let mut removed = Vec::new();
680
+ for t in [lb::TransportKind::Http1, lb::TransportKind::Http2] {
681
+ if !new_pools.contains_key(&t) {
682
+ removed.push(t);
683
+ }
684
+ }
685
+
686
+ Ok((new_pools, removed))
147
687
  }