hypern 0.3.11__cp310-cp310-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hypern/__init__.py +24 -0
- hypern/application.py +495 -0
- hypern/args_parser.py +73 -0
- hypern/auth/__init__.py +0 -0
- hypern/auth/authorization.py +2 -0
- hypern/background.py +4 -0
- hypern/caching/__init__.py +6 -0
- hypern/caching/backend.py +31 -0
- hypern/caching/redis_backend.py +201 -0
- hypern/caching/strategies.py +208 -0
- hypern/cli/__init__.py +0 -0
- hypern/cli/commands.py +0 -0
- hypern/config.py +246 -0
- hypern/database/__init__.py +0 -0
- hypern/database/sqlalchemy/__init__.py +4 -0
- hypern/database/sqlalchemy/config.py +66 -0
- hypern/database/sqlalchemy/repository.py +290 -0
- hypern/database/sqlx/__init__.py +36 -0
- hypern/database/sqlx/field.py +246 -0
- hypern/database/sqlx/migrate.py +263 -0
- hypern/database/sqlx/model.py +117 -0
- hypern/database/sqlx/query.py +904 -0
- hypern/datastructures.py +40 -0
- hypern/enum.py +13 -0
- hypern/exceptions/__init__.py +34 -0
- hypern/exceptions/base.py +62 -0
- hypern/exceptions/common.py +12 -0
- hypern/exceptions/errors.py +15 -0
- hypern/exceptions/formatters.py +56 -0
- hypern/exceptions/http.py +76 -0
- hypern/gateway/__init__.py +6 -0
- hypern/gateway/aggregator.py +32 -0
- hypern/gateway/gateway.py +41 -0
- hypern/gateway/proxy.py +60 -0
- hypern/gateway/service.py +52 -0
- hypern/hypern.cpython-310-darwin.so +0 -0
- hypern/hypern.pyi +333 -0
- hypern/i18n/__init__.py +0 -0
- hypern/logging/__init__.py +3 -0
- hypern/logging/logger.py +82 -0
- hypern/middleware/__init__.py +17 -0
- hypern/middleware/base.py +13 -0
- hypern/middleware/cache.py +177 -0
- hypern/middleware/compress.py +78 -0
- hypern/middleware/cors.py +41 -0
- hypern/middleware/i18n.py +1 -0
- hypern/middleware/limit.py +177 -0
- hypern/middleware/security.py +184 -0
- hypern/openapi/__init__.py +5 -0
- hypern/openapi/schemas.py +51 -0
- hypern/openapi/swagger.py +3 -0
- hypern/processpool.py +139 -0
- hypern/py.typed +0 -0
- hypern/reload.py +46 -0
- hypern/response/__init__.py +3 -0
- hypern/response/response.py +142 -0
- hypern/routing/__init__.py +5 -0
- hypern/routing/dispatcher.py +70 -0
- hypern/routing/endpoint.py +30 -0
- hypern/routing/parser.py +98 -0
- hypern/routing/queue.py +175 -0
- hypern/routing/route.py +280 -0
- hypern/scheduler.py +5 -0
- hypern/worker.py +274 -0
- hypern/ws/__init__.py +4 -0
- hypern/ws/channel.py +80 -0
- hypern/ws/heartbeat.py +74 -0
- hypern/ws/room.py +76 -0
- hypern/ws/route.py +26 -0
- hypern-0.3.11.dist-info/METADATA +134 -0
- hypern-0.3.11.dist-info/RECORD +73 -0
- hypern-0.3.11.dist-info/WHEEL +4 -0
- hypern-0.3.11.dist-info/licenses/LICENSE +24 -0
hypern/reload.py
ADDED
@@ -0,0 +1,46 @@
|
|
1
|
+
import sys
|
2
|
+
import time
|
3
|
+
import subprocess
|
4
|
+
from watchdog.events import FileSystemEventHandler
|
5
|
+
import signal
|
6
|
+
import os
|
7
|
+
|
8
|
+
from .logging import logger
|
9
|
+
|
10
|
+
|
11
|
+
class EventHandler(FileSystemEventHandler):
|
12
|
+
def __init__(self, file_path: str, directory_path: str) -> None:
|
13
|
+
self.file_path = file_path
|
14
|
+
self.directory_path = directory_path
|
15
|
+
self.process = None
|
16
|
+
self.last_reload = time.time()
|
17
|
+
|
18
|
+
def reload(self):
|
19
|
+
# Kill all existing processes with the same command
|
20
|
+
current_cmd = [sys.executable, *sys.argv]
|
21
|
+
|
22
|
+
try:
|
23
|
+
# Find and kill existing processes
|
24
|
+
for proc in subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE).communicate()[0].decode().splitlines():
|
25
|
+
if all(str(arg) in proc for arg in current_cmd):
|
26
|
+
pid = int(proc.split()[1])
|
27
|
+
try:
|
28
|
+
os.kill(pid, signal.SIGKILL) # NOSONAR
|
29
|
+
logger.debug(f"Killed process with PID {pid}")
|
30
|
+
except ProcessLookupError:
|
31
|
+
pass
|
32
|
+
|
33
|
+
# Start new process
|
34
|
+
self.process = subprocess.Popen(current_cmd)
|
35
|
+
self.last_reload = time.time()
|
36
|
+
logger.debug("Server reloaded successfully")
|
37
|
+
|
38
|
+
except Exception as e:
|
39
|
+
logger.error(f"Reload failed: {e}")
|
40
|
+
|
41
|
+
def on_modified(self, event) -> None:
|
42
|
+
if time.time() - self.last_reload < 0.5:
|
43
|
+
return
|
44
|
+
|
45
|
+
time.sleep(0.2) # Ensure file is written
|
46
|
+
self.reload()
|
@@ -0,0 +1,142 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import typing
|
4
|
+
from urllib.parse import quote
|
5
|
+
from hypern.hypern import Response as InternalResponse, Header
|
6
|
+
import orjson
|
7
|
+
import msgpack
|
8
|
+
|
9
|
+
from hypern.background import BackgroundTask, BackgroundTasks
|
10
|
+
|
11
|
+
|
12
|
+
class BaseResponse:
|
13
|
+
media_type = None
|
14
|
+
charset = "utf-8"
|
15
|
+
|
16
|
+
def __init__(
|
17
|
+
self,
|
18
|
+
content: typing.Any = None,
|
19
|
+
status_code: int = 200,
|
20
|
+
headers: typing.Mapping[str, str] | None = None,
|
21
|
+
media_type: str | None = None,
|
22
|
+
backgrounds: typing.List[BackgroundTask] | None = None,
|
23
|
+
) -> None:
|
24
|
+
self.status_code = status_code
|
25
|
+
if media_type is not None:
|
26
|
+
self.media_type = media_type
|
27
|
+
self.body = self.render(content)
|
28
|
+
self.init_headers(headers)
|
29
|
+
self.backgrounds = backgrounds
|
30
|
+
|
31
|
+
def render(self, content: typing.Any) -> bytes | memoryview:
|
32
|
+
if content is None:
|
33
|
+
return b""
|
34
|
+
if isinstance(content, (bytes, memoryview)):
|
35
|
+
return content
|
36
|
+
if isinstance(content, str):
|
37
|
+
return content.encode(self.charset)
|
38
|
+
return orjson.dumps(content) # type: ignore
|
39
|
+
|
40
|
+
def init_headers(self, headers: typing.Mapping[str, str] | None = None) -> None:
|
41
|
+
if headers is None:
|
42
|
+
raw_headers: dict = {}
|
43
|
+
populate_content_length = True
|
44
|
+
populate_content_type = True
|
45
|
+
else:
|
46
|
+
raw_headers = {k.lower(): v for k, v in headers.items()}
|
47
|
+
keys = raw_headers.keys()
|
48
|
+
populate_content_length = "content-length" not in keys
|
49
|
+
populate_content_type = "content-type" not in keys
|
50
|
+
|
51
|
+
body = getattr(self, "body", None)
|
52
|
+
if body is not None and populate_content_length and not (self.status_code < 200 or self.status_code in (204, 304)):
|
53
|
+
content_length = str(len(body))
|
54
|
+
raw_headers.setdefault("content-length", content_length)
|
55
|
+
|
56
|
+
content_type = self.media_type
|
57
|
+
if content_type is not None and populate_content_type:
|
58
|
+
if content_type.startswith("text/") and "charset=" not in content_type.lower():
|
59
|
+
content_type += "; charset=" + self.charset
|
60
|
+
raw_headers.setdefault("content-type", content_type)
|
61
|
+
|
62
|
+
self.raw_headers = raw_headers
|
63
|
+
|
64
|
+
|
65
|
+
def to_response(cls):
|
66
|
+
class ResponseWrapper(cls):
|
67
|
+
def __new__(cls, *args, **kwargs):
|
68
|
+
instance = super().__new__(cls)
|
69
|
+
instance.__init__(*args, **kwargs)
|
70
|
+
# Execute background tasks
|
71
|
+
task_manager = BackgroundTasks()
|
72
|
+
if instance.backgrounds:
|
73
|
+
for task in instance.backgrounds:
|
74
|
+
task_manager.add_task(task)
|
75
|
+
task_manager.execute_all()
|
76
|
+
del task_manager
|
77
|
+
|
78
|
+
headers = Header(instance.raw_headers)
|
79
|
+
return InternalResponse(
|
80
|
+
status_code=instance.status_code,
|
81
|
+
headers=headers,
|
82
|
+
description=instance.body,
|
83
|
+
)
|
84
|
+
|
85
|
+
return ResponseWrapper
|
86
|
+
|
87
|
+
|
88
|
+
@to_response
|
89
|
+
class Response(BaseResponse):
|
90
|
+
media_type = None
|
91
|
+
charset = "utf-8"
|
92
|
+
|
93
|
+
|
94
|
+
@to_response
|
95
|
+
class JSONResponse(BaseResponse):
|
96
|
+
media_type = "application/json"
|
97
|
+
|
98
|
+
|
99
|
+
@to_response
|
100
|
+
class HTMLResponse(BaseResponse):
|
101
|
+
media_type = "text/html"
|
102
|
+
|
103
|
+
|
104
|
+
@to_response
|
105
|
+
class PlainTextResponse(BaseResponse):
|
106
|
+
media_type = "text/plain"
|
107
|
+
|
108
|
+
|
109
|
+
@to_response
|
110
|
+
class RedirectResponse(BaseResponse):
|
111
|
+
def __init__(
|
112
|
+
self,
|
113
|
+
url: str,
|
114
|
+
status_code: int = 307,
|
115
|
+
headers: typing.Mapping[str, str] | None = None,
|
116
|
+
backgrounds: typing.List[BackgroundTask] | None = None,
|
117
|
+
) -> None:
|
118
|
+
super().__init__(content=b"", status_code=status_code, headers=headers, backgrounds=backgrounds)
|
119
|
+
self.raw_headers["location"] = quote(str(url), safe=":/%#?=@[]!$&'()*+,;")
|
120
|
+
|
121
|
+
|
122
|
+
@to_response
|
123
|
+
class FileResponse(BaseResponse):
|
124
|
+
def __init__(
|
125
|
+
self,
|
126
|
+
content: bytes | memoryview,
|
127
|
+
filename: str,
|
128
|
+
status_code: int = 200,
|
129
|
+
headers: typing.Mapping[str, str] | None = None,
|
130
|
+
backgrounds: typing.List[BackgroundTask] | None = None,
|
131
|
+
) -> None:
|
132
|
+
super().__init__(content=content, status_code=status_code, headers=headers, backgrounds=backgrounds)
|
133
|
+
self.raw_headers["content-disposition"] = f'attachment; filename="{filename}"'
|
134
|
+
self.raw_headers.setdefault("content-type", "application/octet-stream")
|
135
|
+
self.raw_headers.setdefault("content-length", str(len(content)))
|
136
|
+
|
137
|
+
|
138
|
+
@to_response
|
139
|
+
class BinaryResponse(BaseResponse):
|
140
|
+
def __init__(self, content: bytes):
|
141
|
+
super().__init__(status_code=200, media_type="application/x-msgpack", headers={"Content-Type": "application/x-msgpack"})
|
142
|
+
self.content = msgpack.packb(content)
|
@@ -0,0 +1,70 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
from __future__ import annotations
|
3
|
+
|
4
|
+
import asyncio
|
5
|
+
import functools
|
6
|
+
import inspect
|
7
|
+
import traceback
|
8
|
+
import typing
|
9
|
+
|
10
|
+
import orjson
|
11
|
+
from pydantic import BaseModel
|
12
|
+
|
13
|
+
from hypern.exceptions import HTTPException
|
14
|
+
from hypern.hypern import Request, Response
|
15
|
+
from hypern.response import JSONResponse
|
16
|
+
|
17
|
+
from .parser import InputHandler
|
18
|
+
from hypern.config import context_store
|
19
|
+
|
20
|
+
|
21
|
+
def is_async_callable(obj: typing.Any) -> bool:
|
22
|
+
while isinstance(obj, functools.partial):
|
23
|
+
obj = obj.funcz
|
24
|
+
return asyncio.iscoroutinefunction(obj) or (callable(obj) and asyncio.iscoroutinefunction(obj.__call__))
|
25
|
+
|
26
|
+
|
27
|
+
async def run_in_threadpool(func: typing.Callable, *args, **kwargs):
|
28
|
+
if kwargs: # pragma: no cover
|
29
|
+
# run_sync doesn't accept 'kwargs', so bind them in here
|
30
|
+
func = functools.partial(func, **kwargs)
|
31
|
+
return await asyncio.to_thread(func, *args)
|
32
|
+
|
33
|
+
|
34
|
+
async def dispatch(handler, request: Request, inject: typing.Dict[str, typing.Any]) -> Response:
|
35
|
+
try:
|
36
|
+
# set context for global handler
|
37
|
+
context_store.set_context(request.context_id)
|
38
|
+
|
39
|
+
is_async = is_async_callable(handler)
|
40
|
+
signature = inspect.signature(handler)
|
41
|
+
input_handler = InputHandler(request)
|
42
|
+
_response_type = signature.return_annotation
|
43
|
+
_kwargs = await input_handler.get_input_handler(signature, inject)
|
44
|
+
|
45
|
+
if is_async:
|
46
|
+
response = await handler(**_kwargs) # type: ignore
|
47
|
+
else:
|
48
|
+
response = await run_in_threadpool(handler, **_kwargs)
|
49
|
+
if not isinstance(response, Response):
|
50
|
+
if isinstance(_response_type, type) and issubclass(_response_type, BaseModel):
|
51
|
+
response = _response_type.model_validate(response).model_dump(mode="json") # type: ignore
|
52
|
+
response = JSONResponse(
|
53
|
+
content=orjson.dumps({"message": response, "error_code": None}),
|
54
|
+
status_code=200,
|
55
|
+
)
|
56
|
+
|
57
|
+
except Exception as e:
|
58
|
+
_res: typing.Dict = {"message": "", "error_code": "UNKNOWN_ERROR"}
|
59
|
+
if isinstance(e, HTTPException):
|
60
|
+
_res = e.to_dict()
|
61
|
+
_status = e.status_code
|
62
|
+
else:
|
63
|
+
traceback.print_exc()
|
64
|
+
_res["message"] = str(e)
|
65
|
+
_status = 400
|
66
|
+
response = JSONResponse(
|
67
|
+
content=orjson.dumps(_res),
|
68
|
+
status_code=_status,
|
69
|
+
)
|
70
|
+
return response
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
from __future__ import annotations
|
3
|
+
|
4
|
+
import typing
|
5
|
+
from typing import Any, Dict
|
6
|
+
|
7
|
+
import orjson
|
8
|
+
|
9
|
+
from hypern.hypern import Request, Response
|
10
|
+
from hypern.response import JSONResponse
|
11
|
+
|
12
|
+
from .dispatcher import dispatch
|
13
|
+
|
14
|
+
|
15
|
+
class HTTPEndpoint:
|
16
|
+
def __init__(self, *args, **kwargs) -> None:
|
17
|
+
super().__init__(*args, **kwargs)
|
18
|
+
|
19
|
+
def method_not_allowed(self, request: Request) -> Response:
|
20
|
+
return JSONResponse(
|
21
|
+
description=orjson.dumps({"message": "Method Not Allowed", "error_code": "METHOD_NOT_ALLOW"}),
|
22
|
+
status_code=405,
|
23
|
+
)
|
24
|
+
|
25
|
+
async def dispatch(self, request: Request, inject: Dict[str, Any]) -> Response:
|
26
|
+
handler_name = "get" if request.method == "HEAD" and not hasattr(self, "head") else request.method.lower()
|
27
|
+
handler: typing.Callable[[Request], typing.Any] = getattr( # type: ignore
|
28
|
+
self, handler_name, self.method_not_allowed
|
29
|
+
)
|
30
|
+
return await dispatch(handler, request, inject)
|
hypern/routing/parser.py
ADDED
@@ -0,0 +1,98 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
from __future__ import annotations
|
3
|
+
|
4
|
+
import inspect
|
5
|
+
import typing
|
6
|
+
|
7
|
+
import orjson
|
8
|
+
from pydantic import BaseModel, ValidationError
|
9
|
+
from pydash import get
|
10
|
+
|
11
|
+
from hypern.auth.authorization import Authorization
|
12
|
+
from hypern.exceptions import BadRequestException
|
13
|
+
from hypern.exceptions import ValidationException
|
14
|
+
from hypern.hypern import Request
|
15
|
+
|
16
|
+
|
17
|
+
class ParamParser:
|
18
|
+
def __init__(self, request: Request):
|
19
|
+
self.request = request
|
20
|
+
|
21
|
+
def parse_data_by_name(self, param_name: str) -> dict:
|
22
|
+
param_name = param_name.lower()
|
23
|
+
data_parsers = {
|
24
|
+
"query_params": self._parse_query_params,
|
25
|
+
"path_params": self._parse_path_params,
|
26
|
+
"form_data": self._parse_form_data,
|
27
|
+
}
|
28
|
+
|
29
|
+
parser = data_parsers.get(param_name)
|
30
|
+
if not parser:
|
31
|
+
raise BadRequestException(details={"message": f"Invalid parameter name: {param_name}"})
|
32
|
+
return parser()
|
33
|
+
|
34
|
+
def _parse_query_params(self) -> dict:
|
35
|
+
query_params = self.request.query_params.to_dict()
|
36
|
+
return {k: v[0] for k, v in query_params.items()}
|
37
|
+
|
38
|
+
def _parse_path_params(self) -> dict:
|
39
|
+
return lambda: dict(self.request.path_params.items())
|
40
|
+
|
41
|
+
def _parse_form_data(self) -> dict:
|
42
|
+
return self.request.json()
|
43
|
+
|
44
|
+
|
45
|
+
class InputHandler:
|
46
|
+
def __init__(self, request):
|
47
|
+
self.request = request
|
48
|
+
self.param_parser = ParamParser(request)
|
49
|
+
|
50
|
+
async def parse_pydantic_model(self, param_name: str, model_class: typing.Type[BaseModel]) -> BaseModel:
|
51
|
+
try:
|
52
|
+
data = self.param_parser.parse_data_by_name(param_name)
|
53
|
+
return model_class(**data)
|
54
|
+
except ValidationError as e:
|
55
|
+
invalid_fields = orjson.loads(e.json())
|
56
|
+
raise ValidationException(
|
57
|
+
details=[
|
58
|
+
{
|
59
|
+
"field": get(item, "loc")[0],
|
60
|
+
"msg": get(item, "msg"),
|
61
|
+
}
|
62
|
+
for item in invalid_fields
|
63
|
+
]
|
64
|
+
)
|
65
|
+
|
66
|
+
async def handle_special_params(self, param_name: str) -> typing.Any:
|
67
|
+
special_params = {
|
68
|
+
"request": lambda: self.request,
|
69
|
+
}
|
70
|
+
return special_params.get(param_name, lambda: None)()
|
71
|
+
|
72
|
+
async def get_input_handler(self, signature: inspect.Signature, inject: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:
|
73
|
+
"""
|
74
|
+
Parse the request data and return the kwargs for the handler
|
75
|
+
"""
|
76
|
+
kwargs = {}
|
77
|
+
|
78
|
+
for param in signature.parameters.values():
|
79
|
+
name = param.name
|
80
|
+
ptype = param.annotation
|
81
|
+
|
82
|
+
# Handle Pydantic models
|
83
|
+
if isinstance(ptype, type) and issubclass(ptype, BaseModel):
|
84
|
+
kwargs[name] = await self.parse_pydantic_model(name, ptype)
|
85
|
+
continue
|
86
|
+
|
87
|
+
# Handle Authorization
|
88
|
+
if isinstance(ptype, type) and issubclass(ptype, Authorization):
|
89
|
+
kwargs[name] = await ptype().validate(self.request)
|
90
|
+
continue
|
91
|
+
|
92
|
+
# Handle special parameters
|
93
|
+
special_value = await self.handle_special_params(name)
|
94
|
+
if special_value is not None:
|
95
|
+
kwargs[name] = special_value
|
96
|
+
if name in inject:
|
97
|
+
kwargs[name] = inject[name]
|
98
|
+
return kwargs
|
hypern/routing/queue.py
ADDED
@@ -0,0 +1,175 @@
|
|
1
|
+
import asyncio
|
2
|
+
import time
|
3
|
+
from contextlib import asynccontextmanager
|
4
|
+
from dataclasses import dataclass, field
|
5
|
+
from queue import PriorityQueue
|
6
|
+
from typing import Any, Dict
|
7
|
+
|
8
|
+
from hypern.hypern import Request, Response
|
9
|
+
from hypern.response import JSONResponse
|
10
|
+
from hypern.routing import HTTPEndpoint
|
11
|
+
from hypern.logging import logger
|
12
|
+
|
13
|
+
|
14
|
+
@dataclass(order=True)
|
15
|
+
class PrioritizedRequest:
|
16
|
+
priority: int
|
17
|
+
timestamp: float = field(default_factory=time.time)
|
18
|
+
request: Request | None = field(default=None, compare=False)
|
19
|
+
future: asyncio.Future = field(compare=False, default_factory=asyncio.Future)
|
20
|
+
|
21
|
+
|
22
|
+
class QueuedHTTPEndpoint(HTTPEndpoint):
|
23
|
+
"""
|
24
|
+
HTTPEndpoint with request queuing capabilities for high-load scenarios.
|
25
|
+
"""
|
26
|
+
|
27
|
+
def __init__(self, *args, **kwargs):
|
28
|
+
super().__init__(*args, **kwargs)
|
29
|
+
# Queue configuration
|
30
|
+
self._max_concurrent = kwargs.get("max_concurrent", 100)
|
31
|
+
self._queue_size = kwargs.get("queue_size", 1000)
|
32
|
+
self._request_timeout = kwargs.get("request_timeout", 30)
|
33
|
+
|
34
|
+
# Initialize queuing system
|
35
|
+
self._request_queue: PriorityQueue = PriorityQueue(maxsize=self._queue_size)
|
36
|
+
self._active_requests = 0
|
37
|
+
self._lock = None # Will be initialized when needed
|
38
|
+
self._request_semaphore = None # Will be initialized when needed
|
39
|
+
self._shutdown = False
|
40
|
+
self._queue_task = None
|
41
|
+
self._initialized = False
|
42
|
+
|
43
|
+
# Metrics
|
44
|
+
self._metrics = {"processed_requests": 0, "queued_requests": 0, "rejected_requests": 0, "avg_wait_time": 0.0}
|
45
|
+
|
46
|
+
self._fully_message = "Request queue is full"
|
47
|
+
|
48
|
+
async def _initialize(self):
|
49
|
+
"""Initialize async components when first request arrives"""
|
50
|
+
if not self._initialized:
|
51
|
+
self._lock = asyncio.Lock()
|
52
|
+
self._request_semaphore = asyncio.Semaphore(self._max_concurrent)
|
53
|
+
self._queue_task = asyncio.create_task(self._process_queue())
|
54
|
+
self._initialized = True
|
55
|
+
|
56
|
+
@asynccontextmanager
|
57
|
+
async def _queue_context(self, request: Request, priority: int = 10):
|
58
|
+
"""Context manager for handling request queuing."""
|
59
|
+
if self._shutdown:
|
60
|
+
raise RuntimeError("Endpoint is shutting down")
|
61
|
+
|
62
|
+
await self._initialize() # Ensure async components are initialized
|
63
|
+
|
64
|
+
request_future = asyncio.Future()
|
65
|
+
prioritized_request = PrioritizedRequest(priority=priority, timestamp=time.time(), request=request, future=request_future)
|
66
|
+
|
67
|
+
try:
|
68
|
+
if self._request_queue.qsize() >= self._queue_size:
|
69
|
+
self._metrics["rejected_requests"] += 1
|
70
|
+
raise asyncio.QueueFull(self._fully_message)
|
71
|
+
|
72
|
+
await self._enqueue_request(prioritized_request)
|
73
|
+
yield await asyncio.wait_for(request_future, timeout=self._request_timeout)
|
74
|
+
|
75
|
+
except asyncio.TimeoutError:
|
76
|
+
self._metrics["rejected_requests"] += 1
|
77
|
+
raise asyncio.TimeoutError("Request timed out while waiting in queue")
|
78
|
+
finally:
|
79
|
+
if not request_future.done():
|
80
|
+
request_future.cancel()
|
81
|
+
|
82
|
+
async def _enqueue_request(self, request: PrioritizedRequest):
|
83
|
+
"""Add request to the queue."""
|
84
|
+
try:
|
85
|
+
self._request_queue.put_nowait(request)
|
86
|
+
self._metrics["queued_requests"] += 1
|
87
|
+
except asyncio.QueueFull:
|
88
|
+
self._metrics["rejected_requests"] += 1
|
89
|
+
raise asyncio.QueueFull(self._fully_message)
|
90
|
+
|
91
|
+
async def _process_queue(self):
|
92
|
+
"""Background task to process queued requests."""
|
93
|
+
while not self._shutdown:
|
94
|
+
try:
|
95
|
+
if not self._request_queue.empty():
|
96
|
+
async with self._lock:
|
97
|
+
if self._active_requests >= self._max_concurrent:
|
98
|
+
await asyncio.sleep(0.1)
|
99
|
+
continue
|
100
|
+
|
101
|
+
request = self._request_queue.get_nowait()
|
102
|
+
wait_time = time.time() - request.timestamp
|
103
|
+
self._metrics["avg_wait_time"] = (self._metrics["avg_wait_time"] * self._metrics["processed_requests"] + wait_time) / (
|
104
|
+
self._metrics["processed_requests"] + 1
|
105
|
+
)
|
106
|
+
|
107
|
+
if not request.future.cancelled():
|
108
|
+
self._active_requests += 1
|
109
|
+
asyncio.create_task(self._handle_request(request))
|
110
|
+
|
111
|
+
await asyncio.sleep(0.01)
|
112
|
+
except Exception as e:
|
113
|
+
logger.error(f"Error processing queue: {e}")
|
114
|
+
await asyncio.sleep(1)
|
115
|
+
|
116
|
+
async def _handle_request(self, request: PrioritizedRequest):
|
117
|
+
"""Handle individual request."""
|
118
|
+
try:
|
119
|
+
async with self._request_semaphore:
|
120
|
+
response = await super().dispatch(request.request, {})
|
121
|
+
if not request.future.done():
|
122
|
+
request.future.set_result(response)
|
123
|
+
except Exception as e:
|
124
|
+
if not request.future.done():
|
125
|
+
request.future.set_exception(e)
|
126
|
+
finally:
|
127
|
+
self._active_requests -= 1
|
128
|
+
self._metrics["processed_requests"] += 1
|
129
|
+
self._metrics["queued_requests"] -= 1
|
130
|
+
|
131
|
+
async def dispatch(self, request: Request, inject: Dict[str, Any]) -> Response:
|
132
|
+
"""
|
133
|
+
Enhanced dispatch method with request queuing.
|
134
|
+
"""
|
135
|
+
try:
|
136
|
+
priority = self._get_request_priority(request)
|
137
|
+
|
138
|
+
async with self._queue_context(request, priority) as response:
|
139
|
+
return response
|
140
|
+
|
141
|
+
except asyncio.QueueFull:
|
142
|
+
return JSONResponse(description={"error": "Server too busy", "message": self._fully_message, "retry_after": 5}, status_code=503)
|
143
|
+
except asyncio.TimeoutError:
|
144
|
+
return JSONResponse(
|
145
|
+
description={
|
146
|
+
"error": "Request timeout",
|
147
|
+
"message": "Request timed out while waiting in queue",
|
148
|
+
},
|
149
|
+
status_code=504,
|
150
|
+
)
|
151
|
+
except Exception as e:
|
152
|
+
return JSONResponse(description={"error": "Internal server error", "message": str(e)}, status_code=500)
|
153
|
+
|
154
|
+
def _get_request_priority(self, request: Request) -> int:
|
155
|
+
"""
|
156
|
+
Determine request priority. Override this method to implement
|
157
|
+
custom priority logic.
|
158
|
+
"""
|
159
|
+
if request.method == "GET":
|
160
|
+
return 5
|
161
|
+
return 10
|
162
|
+
|
163
|
+
async def shutdown(self):
|
164
|
+
"""Gracefully shutdown the endpoint."""
|
165
|
+
self._shutdown = True
|
166
|
+
if self._queue_task and not self._queue_task.done():
|
167
|
+
self._queue_task.cancel()
|
168
|
+
try:
|
169
|
+
await self._queue_task
|
170
|
+
except asyncio.CancelledError:
|
171
|
+
pass
|
172
|
+
|
173
|
+
def get_metrics(self) -> Dict[str, Any]:
|
174
|
+
"""Get current queue metrics."""
|
175
|
+
return {**self._metrics, "current_queue_size": self._request_queue.qsize(), "active_requests": self._active_requests}
|