@assetsart/nylon-mesh 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/.github/workflows/release.yml +98 -0
  2. package/Cargo.lock +2965 -0
  3. package/Cargo.toml +33 -0
  4. package/README.md +104 -0
  5. package/bin/nylon-mesh.js +213 -0
  6. package/bun.lock +360 -0
  7. package/docs/content/docs/caching.mdx +85 -0
  8. package/docs/content/docs/configuration.mdx +115 -0
  9. package/docs/content/docs/index.mdx +58 -0
  10. package/docs/content/docs/load-balancing.mdx +69 -0
  11. package/docs/content/docs/meta.json +9 -0
  12. package/docs/next.config.mjs +11 -0
  13. package/docs/package-lock.json +6099 -0
  14. package/docs/package.json +32 -0
  15. package/docs/postcss.config.mjs +7 -0
  16. package/docs/source.config.ts +23 -0
  17. package/docs/src/app/(home)/layout.tsx +6 -0
  18. package/docs/src/app/(home)/page.tsx +125 -0
  19. package/docs/src/app/api/search/route.ts +9 -0
  20. package/docs/src/app/docs/[[...slug]]/page.tsx +46 -0
  21. package/docs/src/app/docs/layout.tsx +11 -0
  22. package/docs/src/app/global.css +7 -0
  23. package/docs/src/app/layout.tsx +31 -0
  24. package/docs/src/app/llms-full.txt/route.ts +10 -0
  25. package/docs/src/app/llms.txt/route.ts +13 -0
  26. package/docs/src/app/og/docs/[...slug]/route.tsx +27 -0
  27. package/docs/src/components/ai/page-actions.tsx +240 -0
  28. package/docs/src/components/architecture-diagram.tsx +88 -0
  29. package/docs/src/components/benchmark.tsx +129 -0
  30. package/docs/src/components/configuration.tsx +107 -0
  31. package/docs/src/components/copy-button.tsx +29 -0
  32. package/docs/src/components/footer.tsx +37 -0
  33. package/docs/src/components/framework-logos.tsx +35 -0
  34. package/docs/src/lib/cn.ts +1 -0
  35. package/docs/src/lib/layout.shared.tsx +23 -0
  36. package/docs/src/lib/source.ts +27 -0
  37. package/docs/src/mdx-components.tsx +9 -0
  38. package/docs/tsconfig.json +46 -0
  39. package/nylon-mesh.yaml +41 -0
  40. package/package.json +23 -0
  41. package/scripts/publish.mjs +18 -0
  42. package/scripts/release.mjs +52 -0
  43. package/src/config.rs +91 -0
  44. package/src/main.rs +214 -0
  45. package/src/proxy/cache.rs +304 -0
  46. package/src/proxy/handlers.rs +76 -0
  47. package/src/proxy/load_balancer.rs +23 -0
  48. package/src/proxy/mod.rs +232 -0
  49. package/src/tls_accept.rs +119 -0
@@ -0,0 +1,52 @@
1
+ import { readFileSync, writeFileSync } from 'fs';
2
+ import { execSync } from 'child_process';
3
+ import { join } from 'path';
4
+
5
+ // Get version from standard arguments
6
+ const newVersion = process.argv[2];
7
+
8
+ if (!newVersion) {
9
+ console.error("āŒ Please specify a version. Example: bun run release 1.0.0");
10
+ process.exit(1);
11
+ }
12
+
13
+ // Clean and validate semantic version tag
14
+ const cleanVersion = newVersion.replace(/^v/, '');
15
+
16
+ if (!/^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$/.test(cleanVersion)) {
17
+ console.error(`āŒ Invalid semantic version: ${newVersion}`);
18
+ process.exit(1);
19
+ }
20
+
21
+ const tagVersion = `v${cleanVersion}`;
22
+
23
+ console.log(`šŸ“¦ Bumping version to: ${cleanVersion} (Tag: ${tagVersion})`);
24
+
25
+ try {
26
+ // Update package.json
27
+ console.log("šŸ“ Updating package.json...");
28
+ const pkgPath = join(process.cwd(), 'package.json');
29
+ const pkg = JSON.parse(readFileSync(pkgPath, 'utf8'));
30
+ pkg.version = cleanVersion;
31
+ writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + '\n');
32
+
33
+ // Update Cargo.toml
34
+ console.log("šŸ“ Updating Cargo.toml...");
35
+ const processCargoPath = join(process.cwd(), 'Cargo.toml');
36
+ let cargoToml = readFileSync(processCargoPath, 'utf8');
37
+ cargoToml = cargoToml.replace(/^version\s*=\s*"[^"]+"/m, `version = "${cleanVersion}"`);
38
+ writeFileSync(processCargoPath, cargoToml);
39
+
40
+ // Run Git Commands
41
+ console.log("šŸ”– Creating git commit and tag...");
42
+ execSync('git add package.json Cargo.toml', { stdio: 'inherit' });
43
+ execSync(`git commit -m "chore: release ${tagVersion}"`, { stdio: 'inherit' });
44
+ execSync(`git tag ${tagVersion}`, { stdio: 'inherit' });
45
+
46
+ console.log("\nāœ… Release prepared successfully!");
47
+ console.log(`\nšŸš€ To push the release, run:\n git push origin main && git push origin ${tagVersion}`);
48
+
49
+ } catch (err) {
50
+ console.error("\nāŒ Error during release:", err.message);
51
+ process.exit(1);
52
+ }
package/src/config.rs ADDED
@@ -0,0 +1,91 @@
1
+ use serde::Deserialize;
2
+
3
+ #[derive(Debug, Deserialize, Clone)]
4
+ pub struct Config {
5
+ pub threads: Option<usize>,
6
+ pub grace_period_seconds: Option<u64>,
7
+ pub graceful_shutdown_timeout_seconds: Option<u64>,
8
+ pub liveness_path: Option<String>,
9
+ pub readiness_path: Option<String>,
10
+ pub load_balancer_algo: Option<LoadBalancerAlgorithm>,
11
+ pub listen: Option<String>,
12
+ pub tls: Option<TlsConfig>,
13
+ pub upstreams: Vec<UpstreamConfig>,
14
+ pub redis_url: Option<String>,
15
+ pub cache: Option<CacheConfig>,
16
+ pub bypass: Option<BypassConfig>,
17
+ pub cache_control: Option<Vec<CacheControlConfig>>,
18
+ }
19
+
20
+ #[derive(Debug, Deserialize, Clone, PartialEq)]
21
+ #[serde(rename_all = "snake_case")]
22
+ pub enum LoadBalancerAlgorithm {
23
+ RoundRobin,
24
+ Random,
25
+ }
26
+
27
+ #[derive(Debug, Deserialize, Clone)]
28
+ #[serde(untagged)]
29
+ pub enum UpstreamConfig {
30
+ Simple(String),
31
+ Weighted { address: String, weight: usize },
32
+ }
33
+
34
+ impl UpstreamConfig {
35
+ pub fn address(&self) -> &str {
36
+ match self {
37
+ Self::Simple(addr) => addr,
38
+ Self::Weighted { address, .. } => address,
39
+ }
40
+ }
41
+
42
+ pub fn weight(&self) -> usize {
43
+ match self {
44
+ Self::Simple(_) => 1,
45
+ Self::Weighted { weight, .. } => *weight,
46
+ }
47
+ }
48
+ }
49
+
50
+ #[derive(Debug, Deserialize, Clone)]
51
+ pub struct TlsConfig {
52
+ pub listen: String,
53
+ pub certs: Vec<CertificateConfig>,
54
+ }
55
+
56
+ #[derive(Debug, Deserialize, Clone)]
57
+ pub struct CertificateConfig {
58
+ pub host: String, // e.g., "*.example.com" or "example.com" or "default"
59
+ pub cert_path: String,
60
+ pub key_path: String,
61
+ }
62
+
63
+ #[derive(Debug, Deserialize, Clone)]
64
+ pub struct CacheConfig {
65
+ pub tier1_capacity: Option<u64>,
66
+ pub tier1_ttl_seconds: Option<u64>,
67
+ pub tier2_ttl_seconds: Option<u64>,
68
+ pub status: Option<Vec<u16>>,
69
+ pub content_types: Option<Vec<String>>,
70
+ }
71
+
72
+ #[derive(Debug, Deserialize, Clone)]
73
+ pub struct BypassConfig {
74
+ pub paths: Option<Vec<String>>,
75
+ pub extensions: Option<Vec<String>>,
76
+ }
77
+
78
+ #[derive(Debug, Deserialize, Clone)]
79
+ pub struct CacheControlConfig {
80
+ pub value: String,
81
+ pub paths: Option<Vec<String>>,
82
+ pub extensions: Option<Vec<String>>,
83
+ }
84
+
85
+ impl Config {
86
+ pub fn load(path: &str) -> Result<Self, Box<dyn std::error::Error>> {
87
+ let f = std::fs::File::open(path)?;
88
+ let config: Config = serde_yaml::from_reader(f)?;
89
+ Ok(config)
90
+ }
91
+ }
package/src/main.rs ADDED
@@ -0,0 +1,214 @@
1
+ #[cfg(not(debug_assertions))]
2
+ use mimalloc::MiMalloc;
3
+
4
+ #[cfg(not(debug_assertions))]
5
+ #[global_allocator]
6
+ static GLOBAL: MiMalloc = MiMalloc;
7
+
8
+ pub mod config;
9
+ pub mod proxy;
10
+ pub mod tls_accept;
11
+
12
+ use pingora_core::server::Server;
13
+ use pingora_core::server::configuration::{Opt, ServerConf};
14
+ use pingora_load_balancing::LoadBalancer;
15
+ use pingora_proxy::http_proxy_service;
16
+ use std::env;
17
+ use std::sync::Arc;
18
+ use tracing::{error, info};
19
+
20
+ use config::Config;
21
+ use proxy::MeshProxy;
22
+
23
+ use std::sync::atomic::{AtomicBool, Ordering};
24
+ use tokio::signal;
25
+
26
+ pub static IS_SHUTTING_DOWN: AtomicBool = AtomicBool::new(false);
27
+
28
+ pub fn is_shutting_down() -> bool {
29
+ IS_SHUTTING_DOWN.load(Ordering::SeqCst)
30
+ }
31
+
32
+ // SIGTERM
33
+ async fn try_shutdown() -> std::io::Result<()> {
34
+ #[cfg(unix)]
35
+ if let Ok(mut stream) = signal::unix::signal(signal::unix::SignalKind::terminate()) {
36
+ stream.recv().await;
37
+ }
38
+ Ok(())
39
+ }
40
+
41
+ async fn shutdown(shutdown_timeout: u64) {
42
+ println!("\nšŸ›‘ Shutting down...");
43
+ IS_SHUTTING_DOWN.store(true, Ordering::SeqCst);
44
+ // countdown
45
+ for i in (0..shutdown_timeout).rev() {
46
+ println!("Shutting down in {} seconds", i);
47
+ tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
48
+ }
49
+ }
50
+
51
+ fn main() {
52
+ tracing_subscriber::fmt().init();
53
+
54
+ let args: Vec<String> = env::args().collect();
55
+ let config_path = if args.len() > 1 {
56
+ &args[1]
57
+ } else {
58
+ "nylon-mesh.yaml"
59
+ };
60
+
61
+ info!("Loading config from: {}", config_path);
62
+ let config = match Config::load(config_path) {
63
+ Ok(c) => Arc::new(c),
64
+ Err(e) => {
65
+ error!("Failed to load config: {}", e);
66
+ std::process::exit(1);
67
+ }
68
+ };
69
+
70
+ let mut pingora_upstreams = Vec::new();
71
+ for u in config.upstreams.iter() {
72
+ match pingora_load_balancing::Backend::new_with_weight(u.address(), u.weight()) {
73
+ Ok(b) => pingora_upstreams.push(b),
74
+ Err(e) => error!("Failed to parse upstream {}: {}", u.address(), e),
75
+ }
76
+ }
77
+
78
+ if pingora_upstreams.is_empty() {
79
+ error!("No upstreams configured in YAML.");
80
+ std::process::exit(1);
81
+ }
82
+
83
+ // Prepare discovery and backend manager
84
+ let discovery = pingora_load_balancing::discovery::Static::new(
85
+ pingora_upstreams
86
+ .into_iter()
87
+ .collect::<std::collections::BTreeSet<_>>(),
88
+ );
89
+ let backends = pingora_load_balancing::Backends::new(discovery);
90
+
91
+ // print!("config: {:?}", config);
92
+ let load_balancer = match config
93
+ .load_balancer_algo
94
+ .as_ref()
95
+ .unwrap_or(&crate::config::LoadBalancerAlgorithm::RoundRobin)
96
+ {
97
+ crate::config::LoadBalancerAlgorithm::RoundRobin => {
98
+ let lb = LoadBalancer::<pingora_load_balancing::selection::RoundRobin>::from_backends(
99
+ backends,
100
+ );
101
+ let rt = tokio::runtime::Runtime::new().unwrap();
102
+ rt.block_on(lb.update()).unwrap();
103
+ proxy::MeshLoadBalancer::RoundRobin(Arc::new(lb))
104
+ }
105
+ crate::config::LoadBalancerAlgorithm::Random => {
106
+ let lb =
107
+ LoadBalancer::<pingora_load_balancing::selection::Random>::from_backends(backends);
108
+ let rt = tokio::runtime::Runtime::new().unwrap();
109
+ rt.block_on(lb.update()).unwrap();
110
+ proxy::MeshLoadBalancer::Random(Arc::new(lb))
111
+ }
112
+ };
113
+
114
+ let tier1_capacity = config
115
+ .cache
116
+ .as_ref()
117
+ .and_then(|c| c.tier1_capacity)
118
+ .unwrap_or(10000);
119
+ let tier1_ttl = config
120
+ .cache
121
+ .as_ref()
122
+ .and_then(|c| c.tier1_ttl_seconds)
123
+ .unwrap_or(3);
124
+ let tier1_cache = moka::future::Cache::builder()
125
+ .max_capacity(tier1_capacity as u64)
126
+ .time_to_live(std::time::Duration::from_secs(tier1_ttl as u64))
127
+ .build();
128
+
129
+ let mut encoding_hits = std::collections::HashMap::new();
130
+ encoding_hits.insert("zstd", std::sync::atomic::AtomicU64::new(0));
131
+ encoding_hits.insert("br", std::sync::atomic::AtomicU64::new(0));
132
+ encoding_hits.insert("gzip", std::sync::atomic::AtomicU64::new(0));
133
+ encoding_hits.insert("deflate", std::sync::atomic::AtomicU64::new(0));
134
+
135
+ let proxy = MeshProxy {
136
+ config: config.clone(),
137
+ load_balancer: Arc::new(load_balancer),
138
+ tier1_cache,
139
+ encoding_hits: Arc::new(encoding_hits),
140
+ };
141
+
142
+ let opt = Opt::parse_args();
143
+ let mut server = Server::new(Some(opt)).unwrap_or_else(|e| {
144
+ error!("Failed to initialize Pingora Server: {}", e);
145
+ std::process::exit(1);
146
+ });
147
+
148
+ let threads = config.threads.unwrap_or_else(|| {
149
+ std::thread::available_parallelism()
150
+ .map(|n| n.get())
151
+ .unwrap_or(1)
152
+ });
153
+
154
+ let grace_period_seconds = config.grace_period_seconds.unwrap_or(0);
155
+ let graceful_shutdown_timeout_seconds = config.graceful_shutdown_timeout_seconds.unwrap_or(0);
156
+
157
+ server.configuration = Arc::new(ServerConf {
158
+ daemon: false,
159
+ grace_period_seconds: Some(grace_period_seconds),
160
+ graceful_shutdown_timeout_seconds: Some(graceful_shutdown_timeout_seconds),
161
+ threads,
162
+ ..Default::default()
163
+ });
164
+ server.bootstrap();
165
+
166
+ let mut proxy_service = http_proxy_service(&server.configuration, proxy);
167
+
168
+ // Setup Listeners
169
+ if let Some(listen_addr) = &config.listen {
170
+ info!("Adding plain HTTP listener on {}", listen_addr);
171
+ proxy_service.add_tcp(listen_addr);
172
+ }
173
+
174
+ if let Some(tls) = &config.tls {
175
+ info!("Adding TLS listener on {}", tls.listen);
176
+ match tls_accept::new_tls_settings(tls.certs.clone()) {
177
+ Ok(settings) => {
178
+ proxy_service.add_tls_with_settings(&tls.listen, None, settings);
179
+ }
180
+ Err(e) => {
181
+ error!("Failed to create TLS settings: {:?}", e);
182
+ std::process::exit(1);
183
+ }
184
+ }
185
+ }
186
+
187
+ server.add_service(proxy_service);
188
+
189
+ let shutdown_timeout = config.graceful_shutdown_timeout_seconds.unwrap_or(0);
190
+ std::thread::spawn(move || {
191
+ let rt = tokio::runtime::Builder::new_current_thread()
192
+ .enable_all()
193
+ .build()
194
+ .unwrap();
195
+
196
+ rt.block_on(async move {
197
+ // Wait for shutdown signal
198
+ tokio::select! {
199
+ _ = signal::ctrl_c() => {
200
+ println!("From ctrl_c");
201
+ shutdown(shutdown_timeout).await;
202
+ },
203
+ _ = try_shutdown() => {
204
+ println!("From try_shutdown");
205
+ shutdown(shutdown_timeout).await;
206
+ },
207
+ }
208
+ println!("āœ… Shutdown complete");
209
+ });
210
+ });
211
+
212
+ info!("Starting nylon-mesh proxy server...");
213
+ server.run_forever();
214
+ }
@@ -0,0 +1,304 @@
1
+ use bytes::Bytes;
2
+ use http::StatusCode;
3
+ use pingora::Result;
4
+ use pingora::http::ResponseHeader;
5
+ use pingora_proxy::Session;
6
+ use serde::{Deserialize, Serialize};
7
+ use tracing::{debug, error};
8
+
9
+ use super::{MeshProxy, ProxyCtx};
10
+
11
+ static REDIS_CONN: tokio::sync::OnceCell<redis::aio::MultiplexedConnection> =
12
+ tokio::sync::OnceCell::const_new();
13
+
14
+ pub async fn get_redis_conn(redis_url: &str) -> Option<redis::aio::MultiplexedConnection> {
15
+ if redis_url.is_empty() {
16
+ return None;
17
+ }
18
+ match REDIS_CONN
19
+ .get_or_try_init(|| async {
20
+ let client =
21
+ redis::Client::open(redis_url).map_err(|e| format!("URL {}: {}", redis_url, e))?;
22
+ client
23
+ .get_multiplexed_async_connection()
24
+ .await
25
+ .map_err(|e| e.to_string())
26
+ })
27
+ .await
28
+ {
29
+ Ok(c) => Some(c.clone()),
30
+ Err(e) => {
31
+ error!("Failed to init Redis: {}", e);
32
+ None
33
+ }
34
+ }
35
+ }
36
+
37
+ #[derive(Serialize, Deserialize)]
38
+ pub struct CachedHeaders {
39
+ pub status: u16,
40
+ pub headers: Vec<(String, String)>,
41
+ #[serde(default)]
42
+ pub expires_at: u64,
43
+ }
44
+
45
+ impl MeshProxy {
46
+ pub fn determine_encodings_to_check(&self, accept_encoding: &str) -> Vec<&'static str> {
47
+ let mut check = Vec::new();
48
+ if accept_encoding.contains("gzip") {
49
+ check.push("gzip");
50
+ }
51
+ if accept_encoding.contains("zstd") {
52
+ check.push("zstd");
53
+ }
54
+ if accept_encoding.contains("br") {
55
+ check.push("br");
56
+ }
57
+ if accept_encoding.contains("deflate") {
58
+ check.push("deflate");
59
+ }
60
+ check.push(""); // Fallback
61
+
62
+ check.sort_by(|a, b| {
63
+ let hit_a = self
64
+ .encoding_hits
65
+ .get(a)
66
+ .map(|v| v.load(std::sync::atomic::Ordering::Relaxed))
67
+ .unwrap_or(0);
68
+ let hit_b = self
69
+ .encoding_hits
70
+ .get(b)
71
+ .map(|v| v.load(std::sync::atomic::Ordering::Relaxed))
72
+ .unwrap_or(0);
73
+ hit_b.cmp(&hit_a) // Descending
74
+ });
75
+ check
76
+ }
77
+
78
+ pub async fn fetch_from_redis(
79
+ &self,
80
+ session: &mut Session,
81
+ enc: &str,
82
+ host: &str,
83
+ cache_key: &str,
84
+ redis_url: &str,
85
+ now_secs: u64,
86
+ ) -> Result<bool> {
87
+ if let Some(mut conn) = get_redis_conn(redis_url).await {
88
+ let sub_key = cache_key.strip_prefix(host).unwrap_or(cache_key);
89
+ let headers_key = format!("{}:headers", sub_key);
90
+
91
+ let mut pipe = redis::pipe();
92
+ pipe.cmd("HGET")
93
+ .arg(host)
94
+ .arg(sub_key)
95
+ .cmd("HGET")
96
+ .arg(host)
97
+ .arg(&headers_key);
98
+
99
+ let query_result: redis::RedisResult<(Option<Vec<u8>>, Option<Vec<u8>>)> =
100
+ pipe.query_async(&mut conn).await;
101
+
102
+ if let Ok((Some(cached_payload), cached_headers_vec)) = query_result {
103
+ let mut status_code = StatusCode::OK;
104
+ let mut parsed_headers = Vec::new();
105
+ let mut is_new_format = false;
106
+
107
+ if let Some(headers_bytes) = cached_headers_vec {
108
+ if let Ok(headers_json) = String::from_utf8(headers_bytes) {
109
+ if let Ok(ch) = serde_json::from_str::<CachedHeaders>(&headers_json) {
110
+ if ch.expires_at > 0 && now_secs > ch.expires_at {
111
+ let mut del_pipe = redis::pipe();
112
+ del_pipe
113
+ .cmd("HDEL")
114
+ .arg(host)
115
+ .arg(sub_key)
116
+ .cmd("HDEL")
117
+ .arg(host)
118
+ .arg(&headers_key);
119
+ let _: redis::RedisResult<()> =
120
+ del_pipe.query_async(&mut conn).await;
121
+ return Ok(false);
122
+ }
123
+ if let Ok(sc) = StatusCode::from_u16(ch.status) {
124
+ status_code = sc;
125
+ }
126
+ parsed_headers = ch.headers;
127
+ is_new_format = true;
128
+ }
129
+ }
130
+ }
131
+
132
+ debug!("Tier 2 HIT: {}", cache_key);
133
+ if let Some(timestamp) = self.encoding_hits.get(enc) {
134
+ timestamp.store(now_secs, std::sync::atomic::Ordering::Relaxed);
135
+ }
136
+
137
+ let bytes = Bytes::from(cached_payload);
138
+
139
+ if let Ok(mut header) = ResponseHeader::build(status_code, None) {
140
+ if is_new_format {
141
+ for (name, value) in parsed_headers {
142
+ if let (Ok(hname), Ok(hval)) = (
143
+ http::header::HeaderName::try_from(name.as_str()),
144
+ http::header::HeaderValue::try_from(value.as_str()),
145
+ ) {
146
+ let _ = header.insert_header(hname, hval);
147
+ }
148
+ }
149
+ } else {
150
+ let _ = header.insert_header("Content-Type", "text/html; charset=utf-8");
151
+ let _ = header.insert_header("Content-Length", bytes.len().to_string());
152
+ }
153
+
154
+ if !enc.is_empty() {
155
+ let _ = header.insert_header("Content-Encoding", enc);
156
+ }
157
+
158
+ self.tier1_cache
159
+ .insert(cache_key.to_string(), (header.clone(), bytes.clone()))
160
+ .await;
161
+ let _ = header.insert_header("X-Cache-Tier", "2");
162
+
163
+ let _ = session.write_response_header(Box::new(header), true).await;
164
+ let _ = session.write_response_body(Some(bytes), true).await;
165
+ return Ok(true);
166
+ }
167
+ }
168
+ }
169
+ Ok(false)
170
+ }
171
+
172
+ pub async fn fetch_from_cache(
173
+ &self,
174
+ session: &mut Session,
175
+ ctx: &mut ProxyCtx,
176
+ host: &str,
177
+ uri: &str,
178
+ query: &str,
179
+ encodings_to_check: &[&'static str],
180
+ now_secs: u64,
181
+ ) -> Result<bool> {
182
+ let base_cache_key = format!("{}{}{}", host, uri, query);
183
+
184
+ for enc in encodings_to_check.iter() {
185
+ let mut cache_key = base_cache_key.clone();
186
+ if !enc.is_empty() {
187
+ cache_key.push_str(&format!(":{}", enc));
188
+ }
189
+ ctx.cache_key = cache_key.clone();
190
+
191
+ // let now = std::time::Instant::now();
192
+ // Tier 1
193
+ if let Some((mut header, body)) = self.tier1_cache.get(&cache_key).await {
194
+ // println!("Tier 1 HIT: {:?}", now.elapsed());
195
+ debug!("Tier 1 HIT: {}", cache_key);
196
+ if let Some(timestamp) = self.encoding_hits.get(*enc) {
197
+ timestamp.store(now_secs, std::sync::atomic::Ordering::Relaxed);
198
+ }
199
+
200
+ let _ = header.insert_header("X-Cache-Tier", "1");
201
+ session
202
+ .write_response_header(Box::new(header), true)
203
+ .await?;
204
+ session.write_response_body(Some(body), true).await?;
205
+ return Ok(true);
206
+ }
207
+
208
+ // Tier 2
209
+ if let Some(redis_url) = &self.config.redis_url {
210
+ if !redis_url.is_empty() {
211
+ if self
212
+ .fetch_from_redis(session, enc, host, &cache_key, redis_url, now_secs)
213
+ .await?
214
+ {
215
+ return Ok(true);
216
+ }
217
+ }
218
+ }
219
+ }
220
+
221
+ Ok(false)
222
+ }
223
+
224
+ pub fn spawn_cache_save(
225
+ &self,
226
+ host: String,
227
+ cache_key: String,
228
+ mut header: ResponseHeader,
229
+ html_bytes: Bytes,
230
+ redis_url_opt: Option<String>,
231
+ t2_ttl: u64,
232
+ ) {
233
+ let _ = header.remove_header("Transfer-Encoding");
234
+ let _ = header.insert_header("Content-Length", html_bytes.len().to_string());
235
+ let _ = header.remove_header("Cache-Control");
236
+
237
+ let content_encoding = header
238
+ .headers
239
+ .get("Content-Encoding")
240
+ .map(|hv| hv.to_str().unwrap_or(""))
241
+ .unwrap_or("")
242
+ .to_string();
243
+
244
+ let mut final_cache_key = cache_key;
245
+ if !content_encoding.is_empty() {
246
+ final_cache_key.push_str(&format!(":{}", content_encoding));
247
+ }
248
+
249
+ let sub_key = final_cache_key
250
+ .strip_prefix(&host)
251
+ .unwrap_or(&final_cache_key)
252
+ .to_string();
253
+
254
+ let tier1_cache = self.tier1_cache.clone();
255
+ tokio::spawn(async move {
256
+ tier1_cache
257
+ .insert(
258
+ final_cache_key.clone(),
259
+ (header.clone(), html_bytes.clone()),
260
+ )
261
+ .await;
262
+
263
+ if let Some(redis_url) = redis_url_opt {
264
+ if !redis_url.is_empty() {
265
+ if let Some(mut conn) = get_redis_conn(&redis_url).await {
266
+ let mut headers_vec = Vec::new();
267
+ for (name, value) in header.headers.iter() {
268
+ if let Ok(value_str) = value.to_str() {
269
+ headers_vec
270
+ .push((name.as_str().to_string(), value_str.to_string()));
271
+ }
272
+ }
273
+
274
+ let now_secs = std::time::SystemTime::now()
275
+ .duration_since(std::time::UNIX_EPOCH)
276
+ .unwrap_or(std::time::Duration::from_secs(0))
277
+ .as_secs();
278
+
279
+ let cached_headers = CachedHeaders {
280
+ status: header.status.as_u16(),
281
+ headers: headers_vec,
282
+ expires_at: now_secs + t2_ttl,
283
+ };
284
+
285
+ let html_vec: Vec<u8> = html_bytes.into();
286
+ if let Ok(headers_json) = serde_json::to_string(&cached_headers) {
287
+ let headers_key = format!("{}:headers", sub_key);
288
+ let mut pipe = redis::pipe();
289
+ pipe.cmd("HSET")
290
+ .arg(&host)
291
+ .arg(&sub_key)
292
+ .arg(html_vec)
293
+ .cmd("HSET")
294
+ .arg(&host)
295
+ .arg(&headers_key)
296
+ .arg(headers_json);
297
+ let _: redis::RedisResult<()> = pipe.query_async(&mut conn).await;
298
+ }
299
+ }
300
+ }
301
+ }
302
+ });
303
+ }
304
+ }