hypern 0.3.1__cp312-cp312-win32.whl → 0.3.2__cp312-cp312-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hypern/application.py +47 -8
- hypern/args_parser.py +7 -0
- hypern/caching/__init__.py +6 -0
- hypern/caching/backend.py +31 -0
- hypern/caching/redis_backend.py +200 -2
- hypern/caching/strategies.py +164 -71
- hypern/gateway/__init__.py +6 -0
- hypern/gateway/aggregator.py +32 -0
- hypern/gateway/gateway.py +41 -0
- hypern/gateway/proxy.py +60 -0
- hypern/gateway/service.py +52 -0
- hypern/hypern.cp312-win32.pyd +0 -0
- hypern/hypern.pyi +20 -18
- hypern/middleware/__init__.py +14 -2
- hypern/middleware/base.py +9 -14
- hypern/middleware/cache.py +177 -0
- hypern/middleware/compress.py +78 -0
- hypern/middleware/cors.py +6 -3
- hypern/middleware/limit.py +5 -4
- hypern/middleware/security.py +21 -16
- hypern/processpool.py +41 -2
- hypern/routing/__init__.py +2 -1
- hypern/routing/queue.py +175 -0
- {hypern-0.3.1.dist-info → hypern-0.3.2.dist-info}/METADATA +1 -1
- {hypern-0.3.1.dist-info → hypern-0.3.2.dist-info}/RECORD +27 -24
- hypern/caching/base/__init__.py +0 -8
- hypern/caching/base/backend.py +0 -3
- hypern/caching/base/key_maker.py +0 -8
- hypern/caching/cache_manager.py +0 -56
- hypern/caching/cache_tag.py +0 -10
- hypern/caching/custom_key_maker.py +0 -11
- {hypern-0.3.1.dist-info → hypern-0.3.2.dist-info}/WHEEL +0 -0
- {hypern-0.3.1.dist-info → hypern-0.3.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,32 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Any, Dict, List
|
3
|
+
|
4
|
+
from hypern.response import JSONResponse
|
5
|
+
|
6
|
+
from .proxy import Proxy
|
7
|
+
from .service import ServiceRegistry
|
8
|
+
|
9
|
+
|
10
|
+
class Aggregator:
|
11
|
+
def __init__(self, registry: ServiceRegistry, proxy: Proxy):
|
12
|
+
self._registry = registry
|
13
|
+
self._proxy = proxy
|
14
|
+
|
15
|
+
async def aggregate_responses(self, requests: List[Dict[str, Any]]) -> JSONResponse:
|
16
|
+
tasks = []
|
17
|
+
for req in requests:
|
18
|
+
service = self._registry.get_service(req["service"])
|
19
|
+
if service:
|
20
|
+
tasks.append(self._proxy.forward_request(service, req["request"]))
|
21
|
+
|
22
|
+
responses = await asyncio.gather(*tasks, return_exceptions=True)
|
23
|
+
|
24
|
+
aggregated = {}
|
25
|
+
for i, response in enumerate(responses):
|
26
|
+
service_name = requests[i]["service"]
|
27
|
+
if isinstance(response, Exception):
|
28
|
+
aggregated[service_name] = {"status": "error", "error": str(response)}
|
29
|
+
else:
|
30
|
+
aggregated[service_name] = {"status": "success", "data": response.body}
|
31
|
+
|
32
|
+
return JSONResponse(content=aggregated)
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
2
|
+
|
3
|
+
from hypern import Hypern
|
4
|
+
from hypern.hypern import Request
|
5
|
+
from hypern.response import JSONResponse
|
6
|
+
|
7
|
+
from .aggregator import Aggregator
|
8
|
+
from .proxy import Proxy
|
9
|
+
from .service import ServiceConfig, ServiceRegistry
|
10
|
+
|
11
|
+
|
12
|
+
class APIGateway:
|
13
|
+
def __init__(self, app: Hypern):
|
14
|
+
self.app = app
|
15
|
+
self.registry = ServiceRegistry()
|
16
|
+
self.proxy = Proxy(self.registry)
|
17
|
+
self.aggregator = Aggregator(self.registry, self.proxy)
|
18
|
+
|
19
|
+
def register_service(self, config: ServiceConfig, metadata: Optional[Dict[str, Any]] = None):
|
20
|
+
"""Register a new service with the gateway"""
|
21
|
+
self.registry.register(config, metadata)
|
22
|
+
|
23
|
+
async def startup(self):
|
24
|
+
"""Initialize the gateway components"""
|
25
|
+
await self.proxy.startup()
|
26
|
+
|
27
|
+
async def shutdown(self):
|
28
|
+
"""Cleanup gateway resources"""
|
29
|
+
await self.proxy.shutdown()
|
30
|
+
|
31
|
+
async def handle_request(self, request: Request) -> Any:
|
32
|
+
"""Main request handler"""
|
33
|
+
service = self.registry.get_service_by_prefix(request.path)
|
34
|
+
if not service:
|
35
|
+
return JSONResponse(content={"error": "Service not found"}, status_code=404)
|
36
|
+
|
37
|
+
return await self.proxy.forward_request(service, request)
|
38
|
+
|
39
|
+
async def aggregate(self, requests: List[Dict[str, Any]]) -> Any:
|
40
|
+
"""Handle aggregated requests"""
|
41
|
+
return await self.aggregator.aggregate_responses(requests)
|
hypern/gateway/proxy.py
ADDED
@@ -0,0 +1,60 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Any, Dict, Optional
|
3
|
+
|
4
|
+
import aiohttp
|
5
|
+
import orjson
|
6
|
+
import traceback
|
7
|
+
|
8
|
+
from hypern.hypern import Request
|
9
|
+
from hypern.response import JSONResponse
|
10
|
+
|
11
|
+
from .service import ServiceConfig, ServiceRegistry, ServiceStatus
|
12
|
+
|
13
|
+
|
14
|
+
class Proxy:
|
15
|
+
def __init__(self, service_registry: ServiceRegistry):
|
16
|
+
self._registry = service_registry
|
17
|
+
self._session: Optional[aiohttp.ClientSession] = None
|
18
|
+
self._rate_limiters: Dict[str, asyncio.Semaphore] = {}
|
19
|
+
|
20
|
+
async def startup(self):
|
21
|
+
self._session = aiohttp.ClientSession()
|
22
|
+
for service in self._registry._services.values():
|
23
|
+
self._rate_limiters[service.name] = asyncio.Semaphore(100) # Default 100 concurrent requests
|
24
|
+
|
25
|
+
async def shutdown(self):
|
26
|
+
if self._session:
|
27
|
+
await self._session.close()
|
28
|
+
|
29
|
+
async def forward_request(self, service: ServiceConfig, request: Request) -> Any:
|
30
|
+
if not self._session:
|
31
|
+
await self.startup()
|
32
|
+
|
33
|
+
target_path = request.path.replace(service.prefix, "", 1)
|
34
|
+
target_url = f"{service.url}{target_path}"
|
35
|
+
|
36
|
+
headers = request.headers.get_headers()
|
37
|
+
# Remove hop-by-hop headers
|
38
|
+
for header in ["connection", "keep-alive", "transfer-encoding"]:
|
39
|
+
headers.pop(header, None)
|
40
|
+
|
41
|
+
async with self._rate_limiters[service.name]:
|
42
|
+
try:
|
43
|
+
async with self._session.request(
|
44
|
+
method=request.method,
|
45
|
+
url=target_url,
|
46
|
+
headers=headers,
|
47
|
+
params=request.query_params.to_dict(),
|
48
|
+
data=await request.json() if request.method in ["POST", "PUT", "PATCH"] else None,
|
49
|
+
timeout=aiohttp.ClientTimeout(total=service.timeout),
|
50
|
+
) as response:
|
51
|
+
body = await response.read()
|
52
|
+
return JSONResponse(
|
53
|
+
content=orjson.loads(body) if response.content_type == "application/json" else body.decode(),
|
54
|
+
status_code=response.status,
|
55
|
+
headers=dict(response.headers),
|
56
|
+
)
|
57
|
+
except Exception as e:
|
58
|
+
traceback.print_exc()
|
59
|
+
self._registry.update_status(service.name, ServiceStatus.DEGRADED)
|
60
|
+
return JSONResponse(content={"error": "Service unavailable", "details": str(e)}, status_code=503)
|
@@ -0,0 +1,52 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from enum import Enum
|
3
|
+
from typing import Any, Dict, Optional
|
4
|
+
|
5
|
+
|
6
|
+
class ServiceStatus(Enum):
|
7
|
+
ONLINE = "online"
|
8
|
+
OFFLINE = "offline"
|
9
|
+
DEGRADED = "degraded"
|
10
|
+
|
11
|
+
|
12
|
+
@dataclass
|
13
|
+
class ServiceConfig:
|
14
|
+
name: str
|
15
|
+
url: str
|
16
|
+
prefix: str
|
17
|
+
timeout: float = 30.0
|
18
|
+
max_retries: int = 3
|
19
|
+
health_check_path: str = "/health"
|
20
|
+
|
21
|
+
|
22
|
+
class ServiceRegistry:
|
23
|
+
def __init__(self):
|
24
|
+
self._services: Dict[str, ServiceConfig] = {}
|
25
|
+
self._status: Dict[str, ServiceStatus] = {}
|
26
|
+
self._metadata: Dict[str, Dict[str, Any]] = {}
|
27
|
+
|
28
|
+
def register(self, service: ServiceConfig, metadata: Optional[Dict[str, Any]] = None):
|
29
|
+
self._services[service.name] = service
|
30
|
+
self._status[service.name] = ServiceStatus.ONLINE
|
31
|
+
self._metadata[service.name] = metadata or {}
|
32
|
+
|
33
|
+
def unregister(self, service_name: str):
|
34
|
+
self._services.pop(service_name, None)
|
35
|
+
self._status.pop(service_name, None)
|
36
|
+
self._metadata.pop(service_name, None)
|
37
|
+
|
38
|
+
def get_service(self, service_name: str) -> Optional[ServiceConfig]:
|
39
|
+
return self._services.get(service_name)
|
40
|
+
|
41
|
+
def get_service_by_prefix(self, path: str) -> Optional[ServiceConfig]:
|
42
|
+
for service in self._services.values():
|
43
|
+
if path.startswith(service.prefix):
|
44
|
+
return service
|
45
|
+
return None
|
46
|
+
|
47
|
+
def update_status(self, service_name: str, status: ServiceStatus):
|
48
|
+
if service_name in self._services:
|
49
|
+
self._status[service_name] = status
|
50
|
+
|
51
|
+
def get_status(self, service_name: str) -> ServiceStatus:
|
52
|
+
return self._status.get(service_name, ServiceStatus.OFFLINE)
|
hypern/hypern.cp312-win32.pyd
CHANGED
Binary file
|
hypern/hypern.pyi
CHANGED
@@ -3,23 +3,6 @@ from __future__ import annotations
|
|
3
3
|
from dataclasses import dataclass
|
4
4
|
from typing import Any, Callable, Dict, List, Tuple
|
5
5
|
|
6
|
-
@dataclass
|
7
|
-
class BaseBackend:
|
8
|
-
get: Callable[[str], Any]
|
9
|
-
set: Callable[[Any, str, int], None]
|
10
|
-
delete_startswith: Callable[[str], None]
|
11
|
-
|
12
|
-
@dataclass
|
13
|
-
class RedisBackend(BaseBackend):
|
14
|
-
url: str
|
15
|
-
|
16
|
-
get: Callable[[str], Any]
|
17
|
-
set: Callable[[Any, str, int], None]
|
18
|
-
delete_startswith: Callable[[str], None]
|
19
|
-
set_nx: Callable[[Any, str, int], None]
|
20
|
-
get_ttl: Callable[[str], int]
|
21
|
-
current_timestamp: Callable[[], int]
|
22
|
-
|
23
6
|
@dataclass
|
24
7
|
class BaseSchemaGenerator:
|
25
8
|
remove_converter: Callable[[str], str]
|
@@ -202,6 +185,9 @@ class Server:
|
|
202
185
|
def set_before_hooks(self, hooks: List[FunctionInfo]) -> None: ...
|
203
186
|
def set_after_hooks(self, hooks: List[FunctionInfo]) -> None: ...
|
204
187
|
def set_response_headers(self, headers: Dict[str, str]) -> None: ...
|
188
|
+
def set_startup_handler(self, on_startup: FunctionInfo) -> None: ...
|
189
|
+
def set_shutdown_handler(self, on_shutdown: FunctionInfo) -> None: ...
|
190
|
+
def set_auto_compression(self, enabled: bool) -> None: ...
|
205
191
|
|
206
192
|
class Route:
|
207
193
|
path: str
|
@@ -264,6 +250,7 @@ class Header:
|
|
264
250
|
def set(self, key: str, value: str) -> None: ...
|
265
251
|
def append(self, key: str, value: str) -> None: ...
|
266
252
|
def update(self, headers: Dict[str, str]) -> None: ...
|
253
|
+
def get_headers(self) -> Dict[str, str]: ...
|
267
254
|
|
268
255
|
@dataclass
|
269
256
|
class Response:
|
@@ -271,12 +258,15 @@ class Response:
|
|
271
258
|
response_type: str
|
272
259
|
headers: Header
|
273
260
|
description: str
|
274
|
-
file_path: str
|
261
|
+
file_path: str | None
|
262
|
+
context_id: str
|
275
263
|
|
276
264
|
@dataclass
|
277
265
|
class QueryParams:
|
278
266
|
queries: Dict[str, List[str]]
|
279
267
|
|
268
|
+
def to_dict(self) -> Dict[str, str]: ...
|
269
|
+
|
280
270
|
@dataclass
|
281
271
|
class UploadedFile:
|
282
272
|
name: str
|
@@ -293,6 +283,7 @@ class BodyData:
|
|
293
283
|
|
294
284
|
@dataclass
|
295
285
|
class Request:
|
286
|
+
path: str
|
296
287
|
query_params: QueryParams
|
297
288
|
headers: Header
|
298
289
|
path_params: Dict[str, str]
|
@@ -301,3 +292,14 @@ class Request:
|
|
301
292
|
remote_addr: str
|
302
293
|
timestamp: float
|
303
294
|
context_id: str
|
295
|
+
|
296
|
+
def json(self) -> Dict[str, Any]: ...
|
297
|
+
def set_body(self, body: BodyData) -> None: ...
|
298
|
+
|
299
|
+
@dataclass
|
300
|
+
class MiddlewareConfig:
|
301
|
+
priority: int = 0
|
302
|
+
is_conditional: bool = True
|
303
|
+
|
304
|
+
@staticmethod
|
305
|
+
def default(self) -> MiddlewareConfig: ...
|
hypern/middleware/__init__.py
CHANGED
@@ -1,5 +1,17 @@
|
|
1
|
-
from .base import Middleware
|
1
|
+
from .base import Middleware, MiddlewareConfig
|
2
2
|
from .cors import CORSMiddleware
|
3
3
|
from .limit import RateLimitMiddleware, StorageBackend, RedisBackend, InMemoryBackend
|
4
|
+
from .compress import CompressionMiddleware
|
5
|
+
from .cache import EdgeCacheMiddleware
|
4
6
|
|
5
|
-
__all__ = [
|
7
|
+
__all__ = [
|
8
|
+
"Middleware",
|
9
|
+
"CORSMiddleware",
|
10
|
+
"RateLimitMiddleware",
|
11
|
+
"StorageBackend",
|
12
|
+
"RedisBackend",
|
13
|
+
"InMemoryBackend",
|
14
|
+
"CompressionMiddleware",
|
15
|
+
"EdgeCacheMiddleware",
|
16
|
+
"MiddlewareConfig",
|
17
|
+
]
|
hypern/middleware/base.py
CHANGED
@@ -1,18 +1,13 @@
|
|
1
|
-
from
|
2
|
-
from hypern.hypern import
|
1
|
+
from typing import Optional
|
2
|
+
from hypern.hypern import MiddlewareConfig
|
3
3
|
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
def __init__(self) -> None:
|
9
|
-
super().__init__()
|
10
|
-
self.app = None
|
5
|
+
class Middleware:
|
6
|
+
def __init__(self, config: Optional[MiddlewareConfig] = None):
|
7
|
+
self.config = config or MiddlewareConfig.default()
|
11
8
|
|
12
|
-
|
13
|
-
|
14
|
-
pass
|
9
|
+
async def before_request(self, request):
|
10
|
+
return request
|
15
11
|
|
16
|
-
|
17
|
-
|
18
|
-
pass
|
12
|
+
async def after_request(self, response):
|
13
|
+
return response
|
@@ -0,0 +1,177 @@
|
|
1
|
+
import hashlib
|
2
|
+
from datetime import datetime, timezone
|
3
|
+
from typing import Dict, List, Optional
|
4
|
+
|
5
|
+
from hypern.hypern import Header, MiddlewareConfig, Request, Response
|
6
|
+
|
7
|
+
from .base import Middleware
|
8
|
+
|
9
|
+
|
10
|
+
class CacheConfig:
|
11
|
+
"""
|
12
|
+
Configuration class for caching middleware.
|
13
|
+
|
14
|
+
Attributes:
|
15
|
+
max_age (int): The maximum age (in seconds) for the cache. Default is 3600 seconds (1 hour).
|
16
|
+
s_maxage (Optional[int]): The shared maximum age (in seconds) for the cache. Default is None.
|
17
|
+
stale_while_revalidate (Optional[int]): The time (in seconds) the cache can be used while revalidation is performed. Default is None.
|
18
|
+
stale_if_error (Optional[int]): The time (in seconds) the cache can be used if an error occurs during revalidation. Default is None.
|
19
|
+
vary_by (List[str]): List of headers to vary the cache by. Default is ['Accept', 'Accept-Encoding'].
|
20
|
+
cache_control (List[str]): List of cache control directives. Default is an empty list.
|
21
|
+
include_query_string (bool): Whether to include the query string in the cache key. Default is True.
|
22
|
+
exclude_paths (List[str]): List of paths to exclude from caching. Default is ['/admin', '/api/private'].
|
23
|
+
exclude_methods (List[str]): List of HTTP methods to exclude from caching. Default is ['POST', 'PUT', 'DELETE', 'PATCH'].
|
24
|
+
private_paths (List[str]): List of paths to be marked as private. Default is an empty list.
|
25
|
+
cache_by_headers (List[str]): List of headers to include in the cache key. Default is an empty list.
|
26
|
+
"""
|
27
|
+
|
28
|
+
def __init__(
|
29
|
+
self,
|
30
|
+
max_age: int = 3600, # 1 hour default
|
31
|
+
s_maxage: Optional[int] = None,
|
32
|
+
stale_while_revalidate: Optional[int] = None,
|
33
|
+
stale_if_error: Optional[int] = None,
|
34
|
+
vary_by: List[str] = None,
|
35
|
+
cache_control: List[str] = None,
|
36
|
+
include_query_string: bool = True,
|
37
|
+
exclude_paths: List[str] = None,
|
38
|
+
exclude_methods: List[str] = None,
|
39
|
+
private_paths: List[str] = None,
|
40
|
+
cache_by_headers: List[str] = None,
|
41
|
+
):
|
42
|
+
self.max_age = max_age
|
43
|
+
self.s_maxage = s_maxage
|
44
|
+
self.stale_while_revalidate = stale_while_revalidate
|
45
|
+
self.stale_if_error = stale_if_error
|
46
|
+
self.vary_by = vary_by or ["accept", "accept-encoding"]
|
47
|
+
self.cache_control = cache_control or []
|
48
|
+
self.include_query_string = include_query_string
|
49
|
+
self.exclude_paths = exclude_paths or ["/admin", "/api/private"]
|
50
|
+
self.exclude_methods = exclude_methods or ["POST", "PUT", "DELETE", "PATCH"]
|
51
|
+
self.private_paths = private_paths or []
|
52
|
+
self.cache_by_headers = cache_by_headers or []
|
53
|
+
|
54
|
+
|
55
|
+
class EdgeCacheMiddleware(Middleware):
|
56
|
+
"""
|
57
|
+
Middleware implementing edge caching strategies with support for:
|
58
|
+
- Cache-Control directives
|
59
|
+
- ETag generation
|
60
|
+
- Conditional requests (If-None-Match, If-Modified-Since)
|
61
|
+
- Vary header management
|
62
|
+
- CDN-specific headers
|
63
|
+
"""
|
64
|
+
|
65
|
+
def __init__(self, cache_config: CacheConfig | None = None, config: Optional[MiddlewareConfig] = None):
|
66
|
+
super().__init__(config)
|
67
|
+
self.cache_config = cache_config or CacheConfig()
|
68
|
+
self._etag_cache: Dict[str, str] = {}
|
69
|
+
self.request_context = {}
|
70
|
+
|
71
|
+
def _should_cache(self, request: Request, path: str) -> bool:
|
72
|
+
"""Determine if the request should be cached"""
|
73
|
+
if request.method in self.cache_config.exclude_methods:
|
74
|
+
return False
|
75
|
+
|
76
|
+
if any(excluded in path for excluded in self.cache_config.exclude_paths):
|
77
|
+
return False
|
78
|
+
|
79
|
+
return True
|
80
|
+
|
81
|
+
def _generate_cache_key(self, request: Request) -> str:
|
82
|
+
"""Generate a unique cache key based on request attributes"""
|
83
|
+
components = [request.method, request.path]
|
84
|
+
|
85
|
+
if self.cache_config.include_query_string:
|
86
|
+
components.append(str(request.query_params))
|
87
|
+
|
88
|
+
for header in self.cache_config.cache_by_headers:
|
89
|
+
value = request.headers.get(str(header).lower())
|
90
|
+
if value:
|
91
|
+
components.append(f"{header}:{value}")
|
92
|
+
|
93
|
+
return hashlib.sha256(":".join(components).encode()).hexdigest()
|
94
|
+
|
95
|
+
def _generate_etag(self, response: Response) -> str:
|
96
|
+
"""Generate ETag for response content"""
|
97
|
+
content = response.description
|
98
|
+
if not isinstance(content, bytes):
|
99
|
+
content = str(content).encode()
|
100
|
+
return hashlib.sha256(content).hexdigest()
|
101
|
+
|
102
|
+
def _build_cache_control(self, path: str) -> str:
|
103
|
+
"""Build Cache-Control header value"""
|
104
|
+
directives = []
|
105
|
+
|
106
|
+
# Determine public/private caching
|
107
|
+
if any(private in path for private in self.cache_config.private_paths):
|
108
|
+
directives.append("private")
|
109
|
+
else:
|
110
|
+
directives.append("public")
|
111
|
+
|
112
|
+
# Add max-age directives
|
113
|
+
directives.append(f"max-age={self.cache_config.max_age}")
|
114
|
+
|
115
|
+
if self.cache_config.s_maxage is not None:
|
116
|
+
directives.append(f"s-maxage={self.cache_config.s_maxage}")
|
117
|
+
|
118
|
+
if self.cache_config.stale_while_revalidate is not None:
|
119
|
+
directives.append(f"stale-while-revalidate={self.cache_config.stale_while_revalidate}")
|
120
|
+
|
121
|
+
if self.cache_config.stale_if_error is not None:
|
122
|
+
directives.append(f"stale-if-error={self.cache_config.stale_if_error}")
|
123
|
+
|
124
|
+
# Add custom cache control directives
|
125
|
+
directives.extend(self.cache_config.cache_control)
|
126
|
+
|
127
|
+
return ", ".join(directives)
|
128
|
+
|
129
|
+
def cleanup_context(self, context_id: str):
|
130
|
+
try:
|
131
|
+
del self.request_context[context_id]
|
132
|
+
except Exception:
|
133
|
+
pass
|
134
|
+
|
135
|
+
def before_request(self, request: Request) -> Request | Response:
|
136
|
+
"""Handle conditional requests"""
|
137
|
+
if not self._should_cache(request, request.path):
|
138
|
+
return request
|
139
|
+
|
140
|
+
cache_key = self._generate_cache_key(request)
|
141
|
+
etag = self._etag_cache.get(cache_key)
|
142
|
+
|
143
|
+
if etag:
|
144
|
+
if_none_match = request.headers.get("if-none-match")
|
145
|
+
if if_none_match and if_none_match == etag:
|
146
|
+
return Response(status_code=304, description=b"", headers=Header({"ETag": etag}))
|
147
|
+
self.request_context[request.context_id] = request
|
148
|
+
return request
|
149
|
+
|
150
|
+
def after_request(self, response: Response) -> Response:
|
151
|
+
"""Add caching headers to response"""
|
152
|
+
request = self.request_context.get(response.context_id)
|
153
|
+
self.cleanup_context(response.context_id)
|
154
|
+
if not self._should_cache(request, request.path):
|
155
|
+
response.headers.set("Cache-Control", "no-store")
|
156
|
+
return response
|
157
|
+
|
158
|
+
# Generate and store ETag
|
159
|
+
cache_key = self._generate_cache_key(request)
|
160
|
+
etag = self._generate_etag(response)
|
161
|
+
self._etag_cache[cache_key] = etag
|
162
|
+
|
163
|
+
# Set cache headers
|
164
|
+
response.headers.update(
|
165
|
+
{
|
166
|
+
"Cache-Control": self._build_cache_control(request.path),
|
167
|
+
"ETag": etag,
|
168
|
+
"Vary": ", ".join(self.cache_config.vary_by),
|
169
|
+
"Last-Modified": datetime.now(tz=timezone.utc).strftime("%a, %d %b %Y %H:%M:%S GMT"),
|
170
|
+
}
|
171
|
+
)
|
172
|
+
|
173
|
+
# Add CDN-specific headers
|
174
|
+
response.headers.set("CDN-Cache-Control", response.headers["Cache-Control"])
|
175
|
+
response.headers.set("Surrogate-Control", f"max-age={self.cache_config.s_maxage or self.cache_config.max_age}")
|
176
|
+
|
177
|
+
return response
|
@@ -0,0 +1,78 @@
|
|
1
|
+
import gzip
|
2
|
+
import zlib
|
3
|
+
from typing import List, Optional
|
4
|
+
|
5
|
+
from hypern.hypern import Request, Response
|
6
|
+
|
7
|
+
from .base import Middleware, MiddlewareConfig
|
8
|
+
|
9
|
+
|
10
|
+
class CompressionMiddleware(Middleware):
|
11
|
+
"""
|
12
|
+
Middleware for compressing response content using gzip or deflate encoding.
|
13
|
+
"""
|
14
|
+
|
15
|
+
def __init__(
|
16
|
+
self, config: Optional[MiddlewareConfig] = None, min_size: int = 500, compression_level: int = 6, include_types: Optional[List[str]] = None
|
17
|
+
) -> None:
|
18
|
+
"""
|
19
|
+
Initialize compression middleware.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
min_size: Minimum response size in bytes to trigger compression
|
23
|
+
compression_level: Compression level (1-9, higher = better compression but slower)
|
24
|
+
include_types: List of content types to compress (defaults to common text types)
|
25
|
+
"""
|
26
|
+
super().__init__(config)
|
27
|
+
self.min_size = min_size
|
28
|
+
self.compression_level = compression_level
|
29
|
+
self.include_types = include_types or [
|
30
|
+
"text/plain",
|
31
|
+
"text/html",
|
32
|
+
"text/css",
|
33
|
+
"text/javascript",
|
34
|
+
"application/javascript",
|
35
|
+
"application/json",
|
36
|
+
"application/xml",
|
37
|
+
"application/x-yaml",
|
38
|
+
]
|
39
|
+
|
40
|
+
def before_request(self, request: Request) -> Request:
|
41
|
+
return request
|
42
|
+
|
43
|
+
def after_request(self, response: Response) -> Response:
|
44
|
+
# Check if response should be compressed
|
45
|
+
content_type = (response.headers.get("content-type") or "").split(";")[0].lower()
|
46
|
+
content_encoding = (response.headers.get("content-encoding") or "").lower()
|
47
|
+
|
48
|
+
# Skip if:
|
49
|
+
# - Content is already encoded
|
50
|
+
# - Content type is not in include list
|
51
|
+
# - Content length is below minimum size
|
52
|
+
if content_encoding or content_type not in self.include_types or len(response.description.encode()) < self.min_size:
|
53
|
+
return response
|
54
|
+
|
55
|
+
# Get accepted encodings from request
|
56
|
+
accept_encoding = (response.headers.get("accept-encoding") or "").lower()
|
57
|
+
|
58
|
+
if "gzip" in accept_encoding:
|
59
|
+
# Use gzip compression
|
60
|
+
response.description = gzip.compress(
|
61
|
+
response.description if isinstance(response.description, bytes) else str(response.description).encode(), compresslevel=self.compression_level
|
62
|
+
)
|
63
|
+
response.headers.set("content-encoding", "gzip")
|
64
|
+
|
65
|
+
elif "deflate" in accept_encoding:
|
66
|
+
# Use deflate compression
|
67
|
+
response.description = zlib.compress(
|
68
|
+
response.description if isinstance(response.description, bytes) else str(response.description).encode(), level=self.compression_level
|
69
|
+
)
|
70
|
+
response.headers.set("content-encoding", "deflate")
|
71
|
+
|
72
|
+
# Update content length after compression
|
73
|
+
response.headers.set("content-length", str(len(response.description)))
|
74
|
+
|
75
|
+
# Add Vary header to indicate content varies by Accept-Encoding
|
76
|
+
response.headers.set("vary", "Accept-Encoding")
|
77
|
+
|
78
|
+
return response
|
hypern/middleware/cors.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
|
-
from typing import List
|
1
|
+
from typing import List, Optional
|
2
2
|
from .base import Middleware
|
3
|
+
from hypern.hypern import MiddlewareConfig
|
3
4
|
|
4
5
|
|
5
6
|
class CORSMiddleware(Middleware):
|
@@ -8,8 +9,10 @@ class CORSMiddleware(Middleware):
|
|
8
9
|
methods, and headers.
|
9
10
|
"""
|
10
11
|
|
11
|
-
def __init__(
|
12
|
-
|
12
|
+
def __init__(
|
13
|
+
self, config: Optional[MiddlewareConfig] = None, allow_origins: List[str] = None, allow_methods: List[str] = None, allow_headers: List[str] = None
|
14
|
+
) -> None:
|
15
|
+
super().__init__(config)
|
13
16
|
self.allow_origins = allow_origins or []
|
14
17
|
self.allow_methods = allow_methods or []
|
15
18
|
self.allow_headers = allow_headers or []
|
hypern/middleware/limit.py
CHANGED
@@ -1,10 +1,11 @@
|
|
1
|
+
from typing import Optional
|
1
2
|
import time
|
2
3
|
from abc import ABC, abstractmethod
|
3
4
|
from threading import Lock
|
4
5
|
|
5
6
|
from hypern.hypern import Request, Response
|
6
7
|
|
7
|
-
from .base import Middleware
|
8
|
+
from .base import Middleware, MiddlewareConfig
|
8
9
|
|
9
10
|
|
10
11
|
class StorageBackend(ABC):
|
@@ -102,14 +103,14 @@ class RateLimitMiddleware(Middleware):
|
|
102
103
|
Requests per minute for a given IP address.
|
103
104
|
"""
|
104
105
|
|
105
|
-
def __init__(self, storage_backend, requests_per_minute=60, window_size=60):
|
106
|
-
super().__init__()
|
106
|
+
def __init__(self, storage_backend, config: Optional[MiddlewareConfig] = None, requests_per_minute=60, window_size=60):
|
107
|
+
super().__init__(config)
|
107
108
|
self.storage = storage_backend
|
108
109
|
self.requests_per_minute = requests_per_minute
|
109
110
|
self.window_size = window_size
|
110
111
|
|
111
112
|
def get_request_identifier(self, request: Request):
|
112
|
-
return request.
|
113
|
+
return request.remote_addr
|
113
114
|
|
114
115
|
def before_request(self, request: Request):
|
115
116
|
"""
|