spikard 0.8.3 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +19 -10
- data/ext/spikard_rb/Cargo.lock +234 -162
- data/ext/spikard_rb/Cargo.toml +3 -3
- data/ext/spikard_rb/extconf.rb +4 -3
- data/lib/spikard/config.rb +88 -12
- data/lib/spikard/testing.rb +3 -1
- data/lib/spikard/version.rb +1 -1
- data/lib/spikard.rb +11 -0
- data/vendor/crates/spikard-bindings-shared/Cargo.toml +3 -6
- data/vendor/crates/spikard-bindings-shared/examples/config_extraction.rs +8 -8
- data/vendor/crates/spikard-bindings-shared/src/config_extractor.rs +2 -2
- data/vendor/crates/spikard-bindings-shared/src/conversion_traits.rs +4 -4
- data/vendor/crates/spikard-bindings-shared/src/di_traits.rs +10 -4
- data/vendor/crates/spikard-bindings-shared/src/error_response.rs +3 -3
- data/vendor/crates/spikard-bindings-shared/src/handler_base.rs +10 -5
- data/vendor/crates/spikard-bindings-shared/src/json_conversion.rs +829 -0
- data/vendor/crates/spikard-bindings-shared/src/lazy_cache.rs +587 -0
- data/vendor/crates/spikard-bindings-shared/src/lib.rs +7 -0
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_base.rs +11 -11
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_executor.rs +9 -37
- data/vendor/crates/spikard-bindings-shared/src/response_builder.rs +436 -3
- data/vendor/crates/spikard-bindings-shared/src/response_interpreter.rs +944 -0
- data/vendor/crates/spikard-bindings-shared/src/test_client_base.rs +4 -4
- data/vendor/crates/spikard-bindings-shared/tests/config_extractor_behavior.rs +3 -2
- data/vendor/crates/spikard-bindings-shared/tests/error_response_edge_cases.rs +13 -13
- data/vendor/crates/spikard-bindings-shared/tests/{comprehensive_coverage.rs → full_coverage.rs} +10 -5
- data/vendor/crates/spikard-bindings-shared/tests/handler_base_integration.rs +14 -14
- data/vendor/crates/spikard-bindings-shared/tests/integration_tests.rs +669 -0
- data/vendor/crates/spikard-core/Cargo.toml +3 -3
- data/vendor/crates/spikard-core/src/di/container.rs +1 -1
- data/vendor/crates/spikard-core/src/di/factory.rs +2 -2
- data/vendor/crates/spikard-core/src/di/resolved.rs +2 -2
- data/vendor/crates/spikard-core/src/di/value.rs +1 -1
- data/vendor/crates/spikard-core/src/http.rs +75 -0
- data/vendor/crates/spikard-core/src/lifecycle.rs +43 -43
- data/vendor/crates/spikard-core/src/parameters.rs +14 -19
- data/vendor/crates/spikard-core/src/problem.rs +1 -1
- data/vendor/crates/spikard-core/src/request_data.rs +7 -16
- data/vendor/crates/spikard-core/src/router.rs +6 -0
- data/vendor/crates/spikard-core/src/schema_registry.rs +2 -3
- data/vendor/crates/spikard-core/src/type_hints.rs +3 -2
- data/vendor/crates/spikard-core/src/validation/error_mapper.rs +1 -1
- data/vendor/crates/spikard-core/src/validation/mod.rs +1 -1
- data/vendor/crates/spikard-core/tests/di_dependency_defaults.rs +1 -1
- data/vendor/crates/spikard-core/tests/error_mapper.rs +2 -2
- data/vendor/crates/spikard-core/tests/parameters_edge_cases.rs +1 -1
- data/vendor/crates/spikard-core/tests/parameters_full.rs +1 -1
- data/vendor/crates/spikard-core/tests/parameters_schema_and_formats.rs +1 -1
- data/vendor/crates/spikard-core/tests/validation_coverage.rs +4 -4
- data/vendor/crates/spikard-http/Cargo.toml +4 -2
- data/vendor/crates/spikard-http/src/cors.rs +32 -11
- data/vendor/crates/spikard-http/src/di_handler.rs +12 -8
- data/vendor/crates/spikard-http/src/grpc/framing.rs +469 -0
- data/vendor/crates/spikard-http/src/grpc/handler.rs +887 -25
- data/vendor/crates/spikard-http/src/grpc/mod.rs +114 -22
- data/vendor/crates/spikard-http/src/grpc/service.rs +232 -2
- data/vendor/crates/spikard-http/src/grpc/streaming.rs +80 -2
- data/vendor/crates/spikard-http/src/handler_trait.rs +204 -27
- data/vendor/crates/spikard-http/src/handler_trait_tests.rs +15 -15
- data/vendor/crates/spikard-http/src/jsonrpc/http_handler.rs +2 -2
- data/vendor/crates/spikard-http/src/jsonrpc/router.rs +2 -2
- data/vendor/crates/spikard-http/src/lib.rs +1 -1
- data/vendor/crates/spikard-http/src/lifecycle/adapter.rs +2 -2
- data/vendor/crates/spikard-http/src/lifecycle.rs +4 -4
- data/vendor/crates/spikard-http/src/openapi/spec_generation.rs +2 -0
- data/vendor/crates/spikard-http/src/server/fast_router.rs +186 -0
- data/vendor/crates/spikard-http/src/server/grpc_routing.rs +324 -23
- data/vendor/crates/spikard-http/src/server/handler.rs +33 -22
- data/vendor/crates/spikard-http/src/server/lifecycle_execution.rs +21 -2
- data/vendor/crates/spikard-http/src/server/mod.rs +125 -20
- data/vendor/crates/spikard-http/src/server/request_extraction.rs +126 -44
- data/vendor/crates/spikard-http/src/server/routing_factory.rs +80 -69
- data/vendor/crates/spikard-http/tests/common/handlers.rs +2 -2
- data/vendor/crates/spikard-http/tests/common/test_builders.rs +12 -12
- data/vendor/crates/spikard-http/tests/di_handler_error_responses.rs +2 -2
- data/vendor/crates/spikard-http/tests/di_integration.rs +6 -6
- data/vendor/crates/spikard-http/tests/grpc_bidirectional_streaming.rs +430 -0
- data/vendor/crates/spikard-http/tests/grpc_client_streaming.rs +738 -0
- data/vendor/crates/spikard-http/tests/grpc_integration_test.rs +13 -9
- data/vendor/crates/spikard-http/tests/grpc_server_streaming.rs +974 -0
- data/vendor/crates/spikard-http/tests/lifecycle_execution.rs +2 -2
- data/vendor/crates/spikard-http/tests/request_extraction_full.rs +4 -4
- data/vendor/crates/spikard-http/tests/server_config_builder.rs +2 -2
- data/vendor/crates/spikard-http/tests/server_cors_preflight.rs +1 -0
- data/vendor/crates/spikard-http/tests/server_openapi_jsonrpc_static.rs +140 -0
- data/vendor/crates/spikard-rb/Cargo.toml +3 -1
- data/vendor/crates/spikard-rb/src/conversion.rs +138 -4
- data/vendor/crates/spikard-rb/src/grpc/handler.rs +706 -229
- data/vendor/crates/spikard-rb/src/grpc/mod.rs +6 -2
- data/vendor/crates/spikard-rb/src/gvl.rs +2 -2
- data/vendor/crates/spikard-rb/src/handler.rs +169 -91
- data/vendor/crates/spikard-rb/src/lib.rs +444 -62
- data/vendor/crates/spikard-rb/src/lifecycle.rs +29 -1
- data/vendor/crates/spikard-rb/src/metadata/route_extraction.rs +108 -43
- data/vendor/crates/spikard-rb/src/request.rs +117 -20
- data/vendor/crates/spikard-rb/src/runtime/server_runner.rs +52 -25
- data/vendor/crates/spikard-rb/src/server.rs +23 -14
- data/vendor/crates/spikard-rb/src/testing/client.rs +5 -4
- data/vendor/crates/spikard-rb/src/testing/sse.rs +1 -36
- data/vendor/crates/spikard-rb/src/testing/websocket.rs +3 -38
- data/vendor/crates/spikard-rb/src/websocket.rs +32 -23
- data/vendor/crates/spikard-rb-macros/Cargo.toml +1 -1
- metadata +14 -4
- data/vendor/bundle/ruby/3.4.0/gems/diff-lcs-1.6.2/mise.toml +0 -5
- data/vendor/bundle/ruby/3.4.0/gems/rake-compiler-dock-1.10.0/build/buildkitd.toml +0 -2
|
@@ -42,12 +42,14 @@
|
|
|
42
42
|
//! let config = GrpcConfig::default();
|
|
43
43
|
//! ```
|
|
44
44
|
|
|
45
|
+
pub mod framing;
|
|
45
46
|
pub mod handler;
|
|
46
47
|
pub mod service;
|
|
47
48
|
pub mod streaming;
|
|
48
49
|
|
|
49
50
|
// Re-export main types
|
|
50
|
-
pub use
|
|
51
|
+
pub use framing::parse_grpc_client_stream;
|
|
52
|
+
pub use handler::{GrpcHandler, GrpcHandlerResult, GrpcRequestData, GrpcResponseData, RpcMode};
|
|
51
53
|
pub use service::{GenericGrpcService, copy_metadata, is_grpc_request, parse_grpc_path};
|
|
52
54
|
pub use streaming::{MessageStream, StreamingRequest, StreamingResponse};
|
|
53
55
|
|
|
@@ -59,6 +61,35 @@ use std::sync::Arc;
|
|
|
59
61
|
///
|
|
60
62
|
/// Controls how the server handles gRPC requests, including compression,
|
|
61
63
|
/// timeouts, and protocol settings.
|
|
64
|
+
///
|
|
65
|
+
/// # Stream Limits
|
|
66
|
+
///
|
|
67
|
+
/// This configuration enforces message-level size limits but delegates
|
|
68
|
+
/// concurrent stream limiting to the HTTP/2 transport layer:
|
|
69
|
+
///
|
|
70
|
+
/// - **Message Size Limits**: The `max_message_size` field is enforced per
|
|
71
|
+
/// individual message (request or response) in both unary and streaming RPCs.
|
|
72
|
+
/// When a single message exceeds this limit, the request is rejected with
|
|
73
|
+
/// `PAYLOAD_TOO_LARGE` (HTTP 413).
|
|
74
|
+
///
|
|
75
|
+
/// - **Concurrent Stream Limits**: The `max_concurrent_streams` is an advisory
|
|
76
|
+
/// configuration passed to the HTTP/2 layer for connection-level stream
|
|
77
|
+
/// negotiation. The HTTP/2 transport automatically enforces this limit and
|
|
78
|
+
/// returns GOAWAY frames when exceeded. Applications should not rely on
|
|
79
|
+
/// custom enforcement of this limit.
|
|
80
|
+
///
|
|
81
|
+
/// - **Stream Length Limits**: There is currently no built-in limit on the
|
|
82
|
+
/// total number of messages in a stream. Handlers should implement their own
|
|
83
|
+
/// message counting if needed. Future versions may add a `max_stream_response_bytes`
|
|
84
|
+
/// field to limit total response size per stream.
|
|
85
|
+
///
|
|
86
|
+
/// # Example
|
|
87
|
+
///
|
|
88
|
+
/// ```ignore
|
|
89
|
+
/// let mut config = GrpcConfig::default();
|
|
90
|
+
/// config.max_message_size = 10 * 1024 * 1024; // 10MB per message
|
|
91
|
+
/// config.max_concurrent_streams = 50; // Advised to HTTP/2 layer
|
|
92
|
+
/// ```
|
|
62
93
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
63
94
|
pub struct GrpcConfig {
|
|
64
95
|
/// Enable gRPC support
|
|
@@ -66,6 +97,17 @@ pub struct GrpcConfig {
|
|
|
66
97
|
pub enabled: bool,
|
|
67
98
|
|
|
68
99
|
/// Maximum message size in bytes (for both sending and receiving)
|
|
100
|
+
///
|
|
101
|
+
/// This limit applies to individual messages in both unary and streaming RPCs.
|
|
102
|
+
/// When a single message exceeds this size, the request is rejected with HTTP 413
|
|
103
|
+
/// (Payload Too Large).
|
|
104
|
+
///
|
|
105
|
+
/// Default: 4MB (4194304 bytes)
|
|
106
|
+
///
|
|
107
|
+
/// # Note
|
|
108
|
+
/// This limit does NOT apply to the total response size in streaming RPCs.
|
|
109
|
+
/// For multi-message streams, the total response can exceed this limit as long
|
|
110
|
+
/// as each individual message stays within the limit.
|
|
69
111
|
#[serde(default = "default_max_message_size")]
|
|
70
112
|
pub max_message_size: usize,
|
|
71
113
|
|
|
@@ -77,7 +119,25 @@ pub struct GrpcConfig {
|
|
|
77
119
|
#[serde(default)]
|
|
78
120
|
pub request_timeout: Option<u64>,
|
|
79
121
|
|
|
80
|
-
/// Maximum number of concurrent streams per connection
|
|
122
|
+
/// Maximum number of concurrent streams per connection (HTTP/2 advisory)
|
|
123
|
+
///
|
|
124
|
+
/// This value is communicated to HTTP/2 clients as the server's flow control limit.
|
|
125
|
+
/// The HTTP/2 transport layer enforces this limit automatically via SETTINGS frames
|
|
126
|
+
/// and GOAWAY responses. Applications should NOT implement custom enforcement.
|
|
127
|
+
///
|
|
128
|
+
/// Default: 100 streams per connection
|
|
129
|
+
///
|
|
130
|
+
/// # Stream Limiting Strategy
|
|
131
|
+
/// - **Per Connection**: This limit applies per HTTP/2 connection, not globally
|
|
132
|
+
/// - **Transport Enforcement**: HTTP/2 handles all stream limiting; applications
|
|
133
|
+
/// need not implement custom checks
|
|
134
|
+
/// - **Streaming Requests**: In server streaming or bidi streaming, each logical
|
|
135
|
+
/// RPC consumes one stream slot. Message ordering within a stream follows
|
|
136
|
+
/// HTTP/2 frame ordering.
|
|
137
|
+
///
|
|
138
|
+
/// # Future Enhancement
|
|
139
|
+
/// A future `max_stream_response_bytes` field may be added to limit the total
|
|
140
|
+
/// response size in streaming RPCs (separate from per-message limits).
|
|
81
141
|
#[serde(default = "default_max_concurrent_streams")]
|
|
82
142
|
pub max_concurrent_streams: u32,
|
|
83
143
|
|
|
@@ -92,6 +152,8 @@ pub struct GrpcConfig {
|
|
|
92
152
|
/// HTTP/2 keepalive timeout in seconds
|
|
93
153
|
#[serde(default = "default_keepalive_timeout")]
|
|
94
154
|
pub keepalive_timeout: u64,
|
|
155
|
+
// TODO: Consider adding in future versions:
|
|
156
|
+
// pub max_stream_response_bytes: Option<usize>, // Total bytes per streaming response
|
|
95
157
|
}
|
|
96
158
|
|
|
97
159
|
impl Default for GrpcConfig {
|
|
@@ -131,22 +193,24 @@ const fn default_keepalive_timeout() -> u64 {
|
|
|
131
193
|
|
|
132
194
|
/// Registry for gRPC handlers
|
|
133
195
|
///
|
|
134
|
-
/// Maps service names to their handlers. Used by the server to route
|
|
135
|
-
/// incoming gRPC requests to the appropriate handler.
|
|
196
|
+
/// Maps service names to their handlers and RPC modes. Used by the server to route
|
|
197
|
+
/// incoming gRPC requests to the appropriate handler method based on RPC mode.
|
|
136
198
|
///
|
|
137
199
|
/// # Example
|
|
138
200
|
///
|
|
139
201
|
/// ```ignore
|
|
140
|
-
/// use spikard_http::grpc::GrpcRegistry;
|
|
202
|
+
/// use spikard_http::grpc::{GrpcRegistry, RpcMode};
|
|
141
203
|
/// use std::sync::Arc;
|
|
142
204
|
///
|
|
143
205
|
/// let mut registry = GrpcRegistry::new();
|
|
144
|
-
/// registry.register("mypackage.UserService", Arc::new(user_handler));
|
|
145
|
-
/// registry.register("mypackage.
|
|
206
|
+
/// registry.register("mypackage.UserService", Arc::new(user_handler), RpcMode::Unary);
|
|
207
|
+
/// registry.register("mypackage.StreamService", Arc::new(stream_handler), RpcMode::ServerStreaming);
|
|
146
208
|
/// ```
|
|
209
|
+
type GrpcHandlerEntry = (Arc<dyn GrpcHandler>, RpcMode);
|
|
210
|
+
|
|
147
211
|
#[derive(Clone)]
|
|
148
212
|
pub struct GrpcRegistry {
|
|
149
|
-
handlers: Arc<HashMap<String,
|
|
213
|
+
handlers: Arc<HashMap<String, GrpcHandlerEntry>>,
|
|
150
214
|
}
|
|
151
215
|
|
|
152
216
|
impl GrpcRegistry {
|
|
@@ -163,13 +227,17 @@ impl GrpcRegistry {
|
|
|
163
227
|
///
|
|
164
228
|
/// * `service_name` - Fully qualified service name (e.g., "mypackage.MyService")
|
|
165
229
|
/// * `handler` - Handler implementation for this service
|
|
166
|
-
|
|
230
|
+
/// * `rpc_mode` - The RPC mode this handler supports (Unary, ServerStreaming, etc.)
|
|
231
|
+
pub fn register(&mut self, service_name: impl Into<String>, handler: Arc<dyn GrpcHandler>, rpc_mode: RpcMode) {
|
|
167
232
|
let handlers = Arc::make_mut(&mut self.handlers);
|
|
168
|
-
handlers.insert(service_name.into(), handler);
|
|
233
|
+
handlers.insert(service_name.into(), (handler, rpc_mode));
|
|
169
234
|
}
|
|
170
235
|
|
|
171
|
-
/// Get a handler by service name
|
|
172
|
-
|
|
236
|
+
/// Get a handler and its RPC mode by service name
|
|
237
|
+
///
|
|
238
|
+
/// Returns both the handler and the RPC mode it was registered with,
|
|
239
|
+
/// allowing the router to dispatch to the appropriate handler method.
|
|
240
|
+
pub fn get(&self, service_name: &str) -> Option<(Arc<dyn GrpcHandler>, RpcMode)> {
|
|
173
241
|
self.handlers.get(service_name).cloned()
|
|
174
242
|
}
|
|
175
243
|
|
|
@@ -262,7 +330,7 @@ mod tests {
|
|
|
262
330
|
let mut registry = GrpcRegistry::new();
|
|
263
331
|
let handler = Arc::new(TestHandler);
|
|
264
332
|
|
|
265
|
-
registry.register("test.Service", handler);
|
|
333
|
+
registry.register("test.Service", handler, RpcMode::Unary);
|
|
266
334
|
|
|
267
335
|
assert!(!registry.is_empty());
|
|
268
336
|
assert_eq!(registry.len(), 1);
|
|
@@ -274,11 +342,13 @@ mod tests {
|
|
|
274
342
|
let mut registry = GrpcRegistry::new();
|
|
275
343
|
let handler = Arc::new(TestHandler);
|
|
276
344
|
|
|
277
|
-
registry.register("test.Service", handler);
|
|
345
|
+
registry.register("test.Service", handler, RpcMode::Unary);
|
|
278
346
|
|
|
279
347
|
let retrieved = registry.get("test.Service");
|
|
280
348
|
assert!(retrieved.is_some());
|
|
281
|
-
|
|
349
|
+
let (handler, rpc_mode) = retrieved.unwrap();
|
|
350
|
+
assert_eq!(handler.service_name(), "test.Service");
|
|
351
|
+
assert_eq!(rpc_mode, RpcMode::Unary);
|
|
282
352
|
}
|
|
283
353
|
|
|
284
354
|
#[test]
|
|
@@ -292,9 +362,9 @@ mod tests {
|
|
|
292
362
|
fn test_grpc_registry_service_names() {
|
|
293
363
|
let mut registry = GrpcRegistry::new();
|
|
294
364
|
|
|
295
|
-
registry.register("service1", Arc::new(TestHandler));
|
|
296
|
-
registry.register("service2", Arc::new(TestHandler));
|
|
297
|
-
registry.register("service3", Arc::new(TestHandler));
|
|
365
|
+
registry.register("service1", Arc::new(TestHandler), RpcMode::Unary);
|
|
366
|
+
registry.register("service2", Arc::new(TestHandler), RpcMode::ServerStreaming);
|
|
367
|
+
registry.register("service3", Arc::new(TestHandler), RpcMode::Unary);
|
|
298
368
|
|
|
299
369
|
let mut names = registry.service_names();
|
|
300
370
|
names.sort();
|
|
@@ -305,7 +375,7 @@ mod tests {
|
|
|
305
375
|
#[test]
|
|
306
376
|
fn test_grpc_registry_contains() {
|
|
307
377
|
let mut registry = GrpcRegistry::new();
|
|
308
|
-
registry.register("test.Service", Arc::new(TestHandler));
|
|
378
|
+
registry.register("test.Service", Arc::new(TestHandler), RpcMode::Unary);
|
|
309
379
|
|
|
310
380
|
assert!(registry.contains("test.Service"));
|
|
311
381
|
assert!(!registry.contains("other.Service"));
|
|
@@ -315,8 +385,8 @@ mod tests {
|
|
|
315
385
|
fn test_grpc_registry_multiple_services() {
|
|
316
386
|
let mut registry = GrpcRegistry::new();
|
|
317
387
|
|
|
318
|
-
registry.register("user.Service", Arc::new(TestHandler));
|
|
319
|
-
registry.register("post.Service", Arc::new(TestHandler));
|
|
388
|
+
registry.register("user.Service", Arc::new(TestHandler), RpcMode::Unary);
|
|
389
|
+
registry.register("post.Service", Arc::new(TestHandler), RpcMode::ServerStreaming);
|
|
320
390
|
|
|
321
391
|
assert_eq!(registry.len(), 2);
|
|
322
392
|
assert!(registry.contains("user.Service"));
|
|
@@ -326,7 +396,7 @@ mod tests {
|
|
|
326
396
|
#[test]
|
|
327
397
|
fn test_grpc_registry_clone() {
|
|
328
398
|
let mut registry = GrpcRegistry::new();
|
|
329
|
-
registry.register("test.Service", Arc::new(TestHandler));
|
|
399
|
+
registry.register("test.Service", Arc::new(TestHandler), RpcMode::Unary);
|
|
330
400
|
|
|
331
401
|
let cloned = registry.clone();
|
|
332
402
|
|
|
@@ -339,4 +409,26 @@ mod tests {
|
|
|
339
409
|
let registry = GrpcRegistry::default();
|
|
340
410
|
assert!(registry.is_empty());
|
|
341
411
|
}
|
|
412
|
+
|
|
413
|
+
#[test]
|
|
414
|
+
fn test_grpc_registry_rpc_mode_storage() {
|
|
415
|
+
let mut registry = GrpcRegistry::new();
|
|
416
|
+
|
|
417
|
+
registry.register("unary.Service", Arc::new(TestHandler), RpcMode::Unary);
|
|
418
|
+
registry.register("server_stream.Service", Arc::new(TestHandler), RpcMode::ServerStreaming);
|
|
419
|
+
registry.register("client_stream.Service", Arc::new(TestHandler), RpcMode::ClientStreaming);
|
|
420
|
+
registry.register("bidi.Service", Arc::new(TestHandler), RpcMode::BidirectionalStreaming);
|
|
421
|
+
|
|
422
|
+
let (_, mode) = registry.get("unary.Service").unwrap();
|
|
423
|
+
assert_eq!(mode, RpcMode::Unary);
|
|
424
|
+
|
|
425
|
+
let (_, mode) = registry.get("server_stream.Service").unwrap();
|
|
426
|
+
assert_eq!(mode, RpcMode::ServerStreaming);
|
|
427
|
+
|
|
428
|
+
let (_, mode) = registry.get("client_stream.Service").unwrap();
|
|
429
|
+
assert_eq!(mode, RpcMode::ClientStreaming);
|
|
430
|
+
|
|
431
|
+
let (_, mode) = registry.get("bidi.Service").unwrap();
|
|
432
|
+
assert_eq!(mode, RpcMode::BidirectionalStreaming);
|
|
433
|
+
}
|
|
342
434
|
}
|
|
@@ -5,7 +5,9 @@
|
|
|
5
5
|
//! enabling language-agnostic gRPC handling.
|
|
6
6
|
|
|
7
7
|
use crate::grpc::handler::{GrpcHandler, GrpcHandlerResult, GrpcRequestData, GrpcResponseData};
|
|
8
|
+
use crate::grpc::streaming::MessageStream;
|
|
8
9
|
use bytes::Bytes;
|
|
10
|
+
use futures_util::StreamExt;
|
|
9
11
|
use std::sync::Arc;
|
|
10
12
|
use tonic::{Request, Response, Status};
|
|
11
13
|
|
|
@@ -75,6 +77,234 @@ impl GenericGrpcService {
|
|
|
75
77
|
}
|
|
76
78
|
}
|
|
77
79
|
|
|
80
|
+
/// Handle a server streaming RPC call
|
|
81
|
+
///
|
|
82
|
+
/// Takes a single request and returns a stream of response messages.
|
|
83
|
+
/// Converts the Tonic Request into our GrpcRequestData format, calls the
|
|
84
|
+
/// handler's call_server_stream method, and converts the MessageStream
|
|
85
|
+
/// into a Tonic streaming response body.
|
|
86
|
+
///
|
|
87
|
+
/// # Arguments
|
|
88
|
+
///
|
|
89
|
+
/// * `service_name` - Fully qualified service name
|
|
90
|
+
/// * `method_name` - Method name
|
|
91
|
+
/// * `request` - Tonic request containing the serialized protobuf message
|
|
92
|
+
///
|
|
93
|
+
/// # Returns
|
|
94
|
+
///
|
|
95
|
+
/// A Response with a streaming body containing the message stream
|
|
96
|
+
///
|
|
97
|
+
/// # Error Propagation Limitations
|
|
98
|
+
///
|
|
99
|
+
/// When a stream returns an error mid-stream (after messages have begun
|
|
100
|
+
/// being sent), the error may not be perfectly transmitted to the client
|
|
101
|
+
/// as a gRPC trailer. This is due to limitations in Axum's `Body::from_stream`:
|
|
102
|
+
///
|
|
103
|
+
/// - **Pre-stream errors** (before any messages): Properly converted to
|
|
104
|
+
/// HTTP status codes and returned to the client
|
|
105
|
+
/// - **Mid-stream errors** (after messages have begun): The error is converted
|
|
106
|
+
/// to a generic `BoxError`, and the stream terminates. The connection is
|
|
107
|
+
/// properly closed, but the gRPC status code metadata is lost.
|
|
108
|
+
///
|
|
109
|
+
/// For robust error handling in streaming RPCs:
|
|
110
|
+
/// - Prefer detecting errors early (before sending messages) when possible
|
|
111
|
+
/// - Include error information in the message stream itself if critical
|
|
112
|
+
/// (application-level error messages in the protobuf)
|
|
113
|
+
/// - For true gRPC trailer support, consider implementing a custom Axum
|
|
114
|
+
/// body type that wraps the stream and can inject trailers on error
|
|
115
|
+
///
|
|
116
|
+
/// See: <https://github.com/tokio-rs/axum/discussions/2043>
|
|
117
|
+
pub async fn handle_server_stream(
|
|
118
|
+
&self,
|
|
119
|
+
service_name: String,
|
|
120
|
+
method_name: String,
|
|
121
|
+
request: Request<Bytes>,
|
|
122
|
+
) -> Result<Response<axum::body::Body>, Status> {
|
|
123
|
+
// Extract metadata and payload from Tonic request
|
|
124
|
+
let (metadata, _extensions, payload) = request.into_parts();
|
|
125
|
+
|
|
126
|
+
// Create our internal request representation
|
|
127
|
+
let grpc_request = GrpcRequestData {
|
|
128
|
+
service_name,
|
|
129
|
+
method_name,
|
|
130
|
+
payload,
|
|
131
|
+
metadata,
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
// Call the handler's server streaming method
|
|
135
|
+
let message_stream: MessageStream = self.handler.call_server_stream(grpc_request).await?;
|
|
136
|
+
|
|
137
|
+
// Convert MessageStream to axum Body
|
|
138
|
+
//
|
|
139
|
+
// LIMITATION: When converting tonic::Status errors from the stream,
|
|
140
|
+
// we lose the gRPC status metadata. The Status is converted to a
|
|
141
|
+
// generic Box<dyn Error>, and Axum's Body::from_stream doesn't have
|
|
142
|
+
// special handling for gRPC error semantics.
|
|
143
|
+
//
|
|
144
|
+
// Current behavior:
|
|
145
|
+
// - Stream errors are converted to BoxError
|
|
146
|
+
// - Body stream terminates on the first error
|
|
147
|
+
// - Connection is properly closed
|
|
148
|
+
// - Error metadata (status code, message) is not transmitted to client
|
|
149
|
+
//
|
|
150
|
+
// TODO: Implement custom Body wrapper that can:
|
|
151
|
+
// 1. Capture tonic::Status errors
|
|
152
|
+
// 2. Extract status code and message
|
|
153
|
+
// 3. Inject gRPC trailers (grpc-status, grpc-message) when stream ends
|
|
154
|
+
// 4. Properly signal error to client while preserving partial messages
|
|
155
|
+
//
|
|
156
|
+
// This would require implementing a custom StreamBody or similar that
|
|
157
|
+
// understands gRPC error semantics.
|
|
158
|
+
let byte_stream =
|
|
159
|
+
message_stream.map(|result| result.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>));
|
|
160
|
+
|
|
161
|
+
let body = axum::body::Body::from_stream(byte_stream);
|
|
162
|
+
|
|
163
|
+
// Create response with streaming body
|
|
164
|
+
let response = Response::new(body);
|
|
165
|
+
|
|
166
|
+
Ok(response)
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
/// Handle a client streaming RPC call
|
|
170
|
+
///
|
|
171
|
+
/// Takes a request body stream of protobuf messages and returns a single response.
|
|
172
|
+
/// Parses the HTTP/2 body stream using gRPC frame parser, creates a MessageStream,
|
|
173
|
+
/// calls the handler's call_client_stream method, and converts the GrpcResponseData
|
|
174
|
+
/// back to a Tonic Response.
|
|
175
|
+
///
|
|
176
|
+
/// # Arguments
|
|
177
|
+
///
|
|
178
|
+
/// * `service_name` - Fully qualified service name
|
|
179
|
+
/// * `method_name` - Method name
|
|
180
|
+
/// * `request` - Axum request with streaming body containing HTTP/2 framed protobuf messages
|
|
181
|
+
/// * `max_message_size` - Maximum size per message (bytes)
|
|
182
|
+
///
|
|
183
|
+
/// # Returns
|
|
184
|
+
///
|
|
185
|
+
/// A Response with a single message body
|
|
186
|
+
///
|
|
187
|
+
/// # Stream Handling
|
|
188
|
+
///
|
|
189
|
+
/// The request body stream contains framed protobuf messages. Each frame is parsed
|
|
190
|
+
/// and validated for size:
|
|
191
|
+
/// - Messages within `max_message_size` are passed to the handler
|
|
192
|
+
/// - Messages exceeding the limit result in a ResourceExhausted error
|
|
193
|
+
/// - Invalid frames result in InvalidArgument errors
|
|
194
|
+
/// - The stream terminates when the client closes the write side
|
|
195
|
+
///
|
|
196
|
+
/// # Frame Format
|
|
197
|
+
///
|
|
198
|
+
/// Frames follow the gRPC HTTP/2 protocol format:
|
|
199
|
+
/// - 1 byte: compression flag (0 = uncompressed)
|
|
200
|
+
/// - 4 bytes: message size (big-endian)
|
|
201
|
+
/// - N bytes: message payload
|
|
202
|
+
///
|
|
203
|
+
/// # Metadata and Trailers
|
|
204
|
+
///
|
|
205
|
+
/// - Request metadata (headers) from the Tonic request is passed to the handler
|
|
206
|
+
/// - Response metadata from the handler is included in the response headers
|
|
207
|
+
/// - gRPC trailers (like grpc-status) should be handled by the caller
|
|
208
|
+
pub async fn handle_client_stream(
|
|
209
|
+
&self,
|
|
210
|
+
service_name: String,
|
|
211
|
+
method_name: String,
|
|
212
|
+
request: Request<axum::body::Body>,
|
|
213
|
+
max_message_size: usize,
|
|
214
|
+
) -> Result<Response<Bytes>, Status> {
|
|
215
|
+
// Extract metadata and body from Tonic request
|
|
216
|
+
let (metadata, _extensions, body) = request.into_parts();
|
|
217
|
+
|
|
218
|
+
// Parse HTTP/2 body into stream of gRPC frames with size validation
|
|
219
|
+
let message_stream = crate::grpc::framing::parse_grpc_client_stream(body, max_message_size).await?;
|
|
220
|
+
|
|
221
|
+
// Create our internal streaming request representation
|
|
222
|
+
let streaming_request = crate::grpc::streaming::StreamingRequest {
|
|
223
|
+
service_name,
|
|
224
|
+
method_name,
|
|
225
|
+
message_stream,
|
|
226
|
+
metadata,
|
|
227
|
+
};
|
|
228
|
+
|
|
229
|
+
// Call the handler's client streaming method
|
|
230
|
+
let response: crate::grpc::handler::GrpcHandlerResult =
|
|
231
|
+
self.handler.call_client_stream(streaming_request).await;
|
|
232
|
+
|
|
233
|
+
// Convert result to Tonic response
|
|
234
|
+
match response {
|
|
235
|
+
Ok(grpc_response) => {
|
|
236
|
+
let mut tonic_response = Response::new(grpc_response.payload);
|
|
237
|
+
copy_metadata(&grpc_response.metadata, tonic_response.metadata_mut());
|
|
238
|
+
Ok(tonic_response)
|
|
239
|
+
}
|
|
240
|
+
Err(status) => Err(status),
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
/// Handle a bidirectional streaming RPC call
|
|
245
|
+
///
|
|
246
|
+
/// Takes a request body stream and returns a stream of response messages.
|
|
247
|
+
/// Parses the HTTP/2 body stream using gRPC frame parser, creates a StreamingRequest,
|
|
248
|
+
/// calls the handler's call_bidi_stream method, and converts the MessageStream
|
|
249
|
+
/// back to an Axum streaming response body.
|
|
250
|
+
///
|
|
251
|
+
/// # Arguments
|
|
252
|
+
///
|
|
253
|
+
/// * `service_name` - Fully qualified service name
|
|
254
|
+
/// * `method_name` - Method name
|
|
255
|
+
/// * `request` - Axum request with streaming body containing HTTP/2 framed protobuf messages
|
|
256
|
+
/// * `max_message_size` - Maximum size per message (bytes)
|
|
257
|
+
///
|
|
258
|
+
/// # Returns
|
|
259
|
+
///
|
|
260
|
+
/// A Response with a streaming body containing response messages
|
|
261
|
+
///
|
|
262
|
+
/// # Stream Handling
|
|
263
|
+
///
|
|
264
|
+
/// - Request stream: Parsed from HTTP/2 body using frame parser
|
|
265
|
+
/// - Response stream: Converted from MessageStream to Axum Body
|
|
266
|
+
/// - Both streams are independent (full-duplex)
|
|
267
|
+
/// - Errors in either stream are propagated appropriately
|
|
268
|
+
///
|
|
269
|
+
/// # Error Propagation
|
|
270
|
+
///
|
|
271
|
+
/// Similar to server streaming, mid-stream errors in the response may not be
|
|
272
|
+
/// perfectly transmitted as gRPC trailers due to Axum Body::from_stream limitations.
|
|
273
|
+
/// See handle_server_stream() documentation for details.
|
|
274
|
+
pub async fn handle_bidi_stream(
|
|
275
|
+
&self,
|
|
276
|
+
service_name: String,
|
|
277
|
+
method_name: String,
|
|
278
|
+
request: Request<axum::body::Body>,
|
|
279
|
+
max_message_size: usize,
|
|
280
|
+
) -> Result<Response<axum::body::Body>, Status> {
|
|
281
|
+
// Extract metadata and body from Tonic request
|
|
282
|
+
let (metadata, _extensions, body) = request.into_parts();
|
|
283
|
+
|
|
284
|
+
// Parse HTTP/2 body into stream of gRPC frames with size validation
|
|
285
|
+
let message_stream = crate::grpc::framing::parse_grpc_client_stream(body, max_message_size).await?;
|
|
286
|
+
|
|
287
|
+
// Create our internal streaming request representation
|
|
288
|
+
let streaming_request = crate::grpc::streaming::StreamingRequest {
|
|
289
|
+
service_name,
|
|
290
|
+
method_name,
|
|
291
|
+
message_stream,
|
|
292
|
+
metadata,
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
// Call the handler's bidirectional streaming method
|
|
296
|
+
let response_stream: MessageStream = self.handler.call_bidi_stream(streaming_request).await?;
|
|
297
|
+
|
|
298
|
+
// Convert MessageStream to axum Body (same as server streaming)
|
|
299
|
+
let byte_stream =
|
|
300
|
+
response_stream.map(|result| result.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>));
|
|
301
|
+
|
|
302
|
+
let body = axum::body::Body::from_stream(byte_stream);
|
|
303
|
+
let response = Response::new(body);
|
|
304
|
+
|
|
305
|
+
Ok(response)
|
|
306
|
+
}
|
|
307
|
+
|
|
78
308
|
/// Get the service name from the handler
|
|
79
309
|
pub fn service_name(&self) -> &str {
|
|
80
310
|
self.handler.service_name()
|
|
@@ -189,7 +419,7 @@ mod tests {
|
|
|
189
419
|
})
|
|
190
420
|
}
|
|
191
421
|
|
|
192
|
-
fn service_name(&self) -> &
|
|
422
|
+
fn service_name(&self) -> &str {
|
|
193
423
|
"test.TestService"
|
|
194
424
|
}
|
|
195
425
|
}
|
|
@@ -371,7 +601,7 @@ mod tests {
|
|
|
371
601
|
Box::pin(async { Err(Status::not_found("Resource not found")) })
|
|
372
602
|
}
|
|
373
603
|
|
|
374
|
-
fn service_name(&self) -> &
|
|
604
|
+
fn service_name(&self) -> &str {
|
|
375
605
|
"test.ErrorService"
|
|
376
606
|
}
|
|
377
607
|
}
|
|
@@ -16,6 +16,53 @@ use tonic::Status;
|
|
|
16
16
|
/// Each item in the stream is either:
|
|
17
17
|
/// - Ok(Bytes): A serialized protobuf message
|
|
18
18
|
/// - Err(Status): A gRPC error
|
|
19
|
+
///
|
|
20
|
+
/// # Backpressure Considerations
|
|
21
|
+
///
|
|
22
|
+
/// Streaming responses should implement backpressure handling to avoid memory buildup with slow clients:
|
|
23
|
+
///
|
|
24
|
+
/// - **Problem**: If a client reads slowly but the handler produces messages quickly, messages will
|
|
25
|
+
/// queue in memory, potentially causing high memory usage or OOM errors.
|
|
26
|
+
/// - **Solution**: The gRPC layer (Tonic) handles backpressure automatically via the underlying TCP/HTTP/2
|
|
27
|
+
/// connection. However, handlers should be aware of this behavior.
|
|
28
|
+
/// - **Best Practice**: For long-running or high-volume streams, implement rate limiting or flow control
|
|
29
|
+
/// in the handler to avoid overwhelming the network buffer.
|
|
30
|
+
///
|
|
31
|
+
/// # Example: Rate-limited streaming
|
|
32
|
+
///
|
|
33
|
+
/// ```ignore
|
|
34
|
+
/// use spikard_http::grpc::streaming::MessageStream;
|
|
35
|
+
/// use bytes::Bytes;
|
|
36
|
+
/// use std::pin::Pin;
|
|
37
|
+
/// use std::time::Duration;
|
|
38
|
+
/// use tokio::time::sleep;
|
|
39
|
+
/// use futures_util::stream::{self, StreamExt};
|
|
40
|
+
///
|
|
41
|
+
/// // Handler that sends 1000 messages with rate limiting
|
|
42
|
+
/// fn create_rate_limited_stream() -> MessageStream {
|
|
43
|
+
/// let messages = (0..1000).map(|i| {
|
|
44
|
+
/// Ok(Bytes::from(format!("message_{}", i)))
|
|
45
|
+
/// });
|
|
46
|
+
///
|
|
47
|
+
/// // Stream with delay between messages to avoid overwhelming the client
|
|
48
|
+
/// let stream = stream::iter(messages)
|
|
49
|
+
/// .then(|msg| async {
|
|
50
|
+
/// sleep(Duration::from_millis(1)).await; // 1ms between messages
|
|
51
|
+
/// msg
|
|
52
|
+
/// });
|
|
53
|
+
///
|
|
54
|
+
/// Box::pin(stream)
|
|
55
|
+
/// }
|
|
56
|
+
/// ```
|
|
57
|
+
///
|
|
58
|
+
/// # Memory Management
|
|
59
|
+
///
|
|
60
|
+
/// Keep the following in mind when implementing large streams:
|
|
61
|
+
///
|
|
62
|
+
/// - Messages are buffered in the gRPC transport layer's internal queue
|
|
63
|
+
/// - Slow clients will cause the queue to grow, increasing memory usage
|
|
64
|
+
/// - Very large individual messages may cause buffer allocation spikes
|
|
65
|
+
/// - Consider implementing stream chunking for very large responses (split one large message into many small ones)
|
|
19
66
|
pub type MessageStream = Pin<Box<dyn Stream<Item = Result<Bytes, Status>> + Send>>;
|
|
20
67
|
|
|
21
68
|
/// Request for client streaming RPC
|
|
@@ -34,12 +81,18 @@ pub struct StreamingRequest {
|
|
|
34
81
|
|
|
35
82
|
/// Response for server streaming RPC
|
|
36
83
|
///
|
|
37
|
-
/// Contains metadata
|
|
84
|
+
/// Contains metadata, a stream of outgoing messages, and optional trailers.
|
|
85
|
+
/// Trailers are metadata sent after the stream completes (after all messages).
|
|
38
86
|
pub struct StreamingResponse {
|
|
39
87
|
/// Stream of outgoing protobuf messages
|
|
40
88
|
pub message_stream: MessageStream,
|
|
41
|
-
/// Response metadata
|
|
89
|
+
/// Response metadata (sent before messages)
|
|
42
90
|
pub metadata: tonic::metadata::MetadataMap,
|
|
91
|
+
/// Optional trailers (sent after stream completes)
|
|
92
|
+
///
|
|
93
|
+
/// Trailers are useful for sending status information or metrics
|
|
94
|
+
/// after all messages have been sent.
|
|
95
|
+
pub trailers: Option<tonic::metadata::MetadataMap>,
|
|
43
96
|
}
|
|
44
97
|
|
|
45
98
|
/// Helper to create a message stream from a vector of bytes
|
|
@@ -234,8 +287,33 @@ mod tests {
|
|
|
234
287
|
let response = StreamingResponse {
|
|
235
288
|
message_stream: stream,
|
|
236
289
|
metadata: tonic::metadata::MetadataMap::new(),
|
|
290
|
+
trailers: None,
|
|
291
|
+
};
|
|
292
|
+
|
|
293
|
+
assert!(response.metadata.is_empty());
|
|
294
|
+
assert!(response.trailers.is_none());
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
#[test]
|
|
298
|
+
fn test_streaming_response_with_trailers() {
|
|
299
|
+
let stream = empty_message_stream();
|
|
300
|
+
let mut trailers = tonic::metadata::MetadataMap::new();
|
|
301
|
+
trailers.insert(
|
|
302
|
+
"x-request-id",
|
|
303
|
+
"test-123"
|
|
304
|
+
.parse::<tonic::metadata::MetadataValue<tonic::metadata::Ascii>>()
|
|
305
|
+
.unwrap(),
|
|
306
|
+
);
|
|
307
|
+
|
|
308
|
+
let response = StreamingResponse {
|
|
309
|
+
message_stream: stream,
|
|
310
|
+
metadata: tonic::metadata::MetadataMap::new(),
|
|
311
|
+
trailers: Some(trailers),
|
|
237
312
|
};
|
|
238
313
|
|
|
239
314
|
assert!(response.metadata.is_empty());
|
|
315
|
+
assert!(response.trailers.is_some());
|
|
316
|
+
let trailers = response.trailers.unwrap();
|
|
317
|
+
assert_eq!(trailers.len(), 1);
|
|
240
318
|
}
|
|
241
319
|
}
|