hypern 0.3.0__cp312-cp312-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hypern/__init__.py +4 -0
- hypern/application.py +405 -0
- hypern/args_parser.py +59 -0
- hypern/auth/__init__.py +0 -0
- hypern/auth/authorization.py +2 -0
- hypern/background.py +4 -0
- hypern/caching/__init__.py +0 -0
- hypern/caching/base/__init__.py +8 -0
- hypern/caching/base/backend.py +3 -0
- hypern/caching/base/key_maker.py +8 -0
- hypern/caching/cache_manager.py +56 -0
- hypern/caching/cache_tag.py +10 -0
- hypern/caching/custom_key_maker.py +11 -0
- hypern/caching/redis_backend.py +3 -0
- hypern/cli/__init__.py +0 -0
- hypern/cli/commands.py +0 -0
- hypern/config.py +149 -0
- hypern/datastructures.py +40 -0
- hypern/db/__init__.py +0 -0
- hypern/db/nosql/__init__.py +25 -0
- hypern/db/nosql/addons/__init__.py +4 -0
- hypern/db/nosql/addons/color.py +16 -0
- hypern/db/nosql/addons/daterange.py +30 -0
- hypern/db/nosql/addons/encrypted.py +53 -0
- hypern/db/nosql/addons/password.py +134 -0
- hypern/db/nosql/addons/unicode.py +10 -0
- hypern/db/sql/__init__.py +179 -0
- hypern/db/sql/addons/__init__.py +14 -0
- hypern/db/sql/addons/color.py +16 -0
- hypern/db/sql/addons/daterange.py +23 -0
- hypern/db/sql/addons/datetime.py +22 -0
- hypern/db/sql/addons/encrypted.py +58 -0
- hypern/db/sql/addons/password.py +171 -0
- hypern/db/sql/addons/ts_vector.py +46 -0
- hypern/db/sql/addons/unicode.py +15 -0
- hypern/db/sql/repository.py +290 -0
- hypern/enum.py +13 -0
- hypern/exceptions.py +97 -0
- hypern/hypern.cp312-win32.pyd +0 -0
- hypern/hypern.pyi +295 -0
- hypern/i18n/__init__.py +0 -0
- hypern/logging/__init__.py +3 -0
- hypern/logging/logger.py +82 -0
- hypern/middleware/__init__.py +5 -0
- hypern/middleware/base.py +18 -0
- hypern/middleware/cors.py +38 -0
- hypern/middleware/i18n.py +1 -0
- hypern/middleware/limit.py +176 -0
- hypern/openapi/__init__.py +5 -0
- hypern/openapi/schemas.py +53 -0
- hypern/openapi/swagger.py +3 -0
- hypern/processpool.py +137 -0
- hypern/py.typed +0 -0
- hypern/reload.py +60 -0
- hypern/response/__init__.py +3 -0
- hypern/response/response.py +134 -0
- hypern/routing/__init__.py +4 -0
- hypern/routing/dispatcher.py +67 -0
- hypern/routing/endpoint.py +30 -0
- hypern/routing/parser.py +100 -0
- hypern/routing/route.py +284 -0
- hypern/scheduler.py +5 -0
- hypern/security.py +44 -0
- hypern/worker.py +30 -0
- hypern/ws.py +16 -0
- hypern-0.3.0.dist-info/METADATA +128 -0
- hypern-0.3.0.dist-info/RECORD +69 -0
- hypern-0.3.0.dist-info/WHEEL +4 -0
- hypern-0.3.0.dist-info/licenses/LICENSE +24 -0
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from threading import Lock
|
|
4
|
+
|
|
5
|
+
from hypern.hypern import Request, Response
|
|
6
|
+
|
|
7
|
+
from .base import Middleware
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class StorageBackend(ABC):
|
|
11
|
+
@abstractmethod
|
|
12
|
+
def increment(self, key, amount=1, expire=None):
|
|
13
|
+
pass
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def get(self, key):
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class RedisBackend(StorageBackend):
|
|
21
|
+
def __init__(self, redis_client):
|
|
22
|
+
self.redis = redis_client
|
|
23
|
+
|
|
24
|
+
def increment(self, key, amount=1, expire=None):
|
|
25
|
+
"""
|
|
26
|
+
The `increment` function increments a value in Redis by a specified amount and optionally sets an
|
|
27
|
+
expiration time for the key.
|
|
28
|
+
|
|
29
|
+
:param key: The `key` parameter in the `increment` method is used to specify the key in the Redis
|
|
30
|
+
database that you want to increment
|
|
31
|
+
:param amount: The `amount` parameter in the `increment` method specifies the value by which the
|
|
32
|
+
key's current value should be incremented. By default, it is set to 1, meaning that if no specific
|
|
33
|
+
amount is provided, the key's value will be incremented by 1, defaults to 1 (optional)
|
|
34
|
+
:param expire: The `expire` parameter in the `increment` method is used to specify the expiration
|
|
35
|
+
time for the key in Redis. If a value is provided for `expire`, the key will expire after the
|
|
36
|
+
specified number of seconds. If `expire` is not provided (i.e., it is `None`
|
|
37
|
+
:return: The `increment` method returns the result of incrementing the value of the key by the
|
|
38
|
+
specified amount. If an expiration time is provided, it also sets the expiration time for the key in
|
|
39
|
+
Redis. The method returns the updated value of the key after the increment operation.
|
|
40
|
+
"""
|
|
41
|
+
with self.redis.pipeline() as pipe:
|
|
42
|
+
pipe.incr(key, amount)
|
|
43
|
+
if expire:
|
|
44
|
+
pipe.expire(key, int(expire))
|
|
45
|
+
return pipe.execute()[0]
|
|
46
|
+
|
|
47
|
+
def get(self, key):
|
|
48
|
+
return int(self.redis.get(key) or 0)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class InMemoryBackend(StorageBackend):
|
|
52
|
+
def __init__(self):
|
|
53
|
+
self.storage = {}
|
|
54
|
+
|
|
55
|
+
def increment(self, key, amount=1, expire=None):
|
|
56
|
+
"""
|
|
57
|
+
The `increment` function updates the value associated with a key in a storage dictionary by a
|
|
58
|
+
specified amount and optionally sets an expiration time.
|
|
59
|
+
|
|
60
|
+
:param key: The `key` parameter in the `increment` method is used to identify the value that needs
|
|
61
|
+
to be incremented in the storage. It serves as a unique identifier for the value being manipulated
|
|
62
|
+
:param amount: The `amount` parameter in the `increment` method specifies the value by which the
|
|
63
|
+
existing value associated with the given `key` should be incremented. By default, if no `amount` is
|
|
64
|
+
provided, it will increment the value by 1, defaults to 1 (optional)
|
|
65
|
+
:param expire: The `expire` parameter in the `increment` method is used to specify the expiration
|
|
66
|
+
time for the key-value pair being incremented. If a value is provided for the `expire` parameter, it
|
|
67
|
+
sets the expiration time for the key in the storage dictionary to the current time plus the
|
|
68
|
+
specified expiration duration
|
|
69
|
+
:return: The function `increment` returns the updated value of the key in the storage after
|
|
70
|
+
incrementing it by the specified amount.
|
|
71
|
+
"""
|
|
72
|
+
if key not in self.storage:
|
|
73
|
+
self.storage[key] = {"value": 0, "expire": None}
|
|
74
|
+
self.storage[key]["value"] += amount
|
|
75
|
+
if expire:
|
|
76
|
+
self.storage[key]["expire"] = time.time() + expire
|
|
77
|
+
return self.storage[key]["value"]
|
|
78
|
+
|
|
79
|
+
def get(self, key):
|
|
80
|
+
"""
|
|
81
|
+
This Python function retrieves the value associated with a given key from a storage dictionary,
|
|
82
|
+
checking for expiration before returning the value or 0 if the key is not found.
|
|
83
|
+
|
|
84
|
+
:param key: The `key` parameter is used to specify the key of the item you want to retrieve from the
|
|
85
|
+
storage. The function checks if the key exists in the storage dictionary and returns the
|
|
86
|
+
corresponding value if it does. If the key has an expiration time set and it has expired, the
|
|
87
|
+
function deletes the key
|
|
88
|
+
:return: The `get` method returns the value associated with the given key if the key is present in
|
|
89
|
+
the storage and has not expired. If the key is not found or has expired, it returns 0.
|
|
90
|
+
"""
|
|
91
|
+
if key in self.storage:
|
|
92
|
+
if self.storage[key]["expire"] and time.time() > self.storage[key]["expire"]:
|
|
93
|
+
del self.storage[key]
|
|
94
|
+
return 0
|
|
95
|
+
return self.storage[key]["value"]
|
|
96
|
+
return 0
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class RateLimitMiddleware(Middleware):
|
|
100
|
+
"""
|
|
101
|
+
The RateLimitMiddleware class implements rate limiting functionality to restrict the number of
|
|
102
|
+
Requests per minute for a given IP address.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
def __init__(self, storage_backend, requests_per_minute=60, window_size=60):
|
|
106
|
+
super().__init__()
|
|
107
|
+
self.storage = storage_backend
|
|
108
|
+
self.requests_per_minute = requests_per_minute
|
|
109
|
+
self.window_size = window_size
|
|
110
|
+
|
|
111
|
+
def get_request_identifier(self, request: Request):
|
|
112
|
+
return request.ip_addr
|
|
113
|
+
|
|
114
|
+
def before_request(self, request: Request):
|
|
115
|
+
"""
|
|
116
|
+
The `before_request` function checks the request rate limit and returns a 429 status code if the
|
|
117
|
+
limit is exceeded.
|
|
118
|
+
|
|
119
|
+
:param request: The `request` parameter in the `before_request` method is of type `Request`. It
|
|
120
|
+
is used to represent an incoming HTTP request that the server will process
|
|
121
|
+
:type request: Request
|
|
122
|
+
:return: The code snippet is a method called `before_request` that takes in a `Request` object
|
|
123
|
+
as a parameter.
|
|
124
|
+
"""
|
|
125
|
+
identifier = self.get_request_identifier(request)
|
|
126
|
+
current_time = int(time.time())
|
|
127
|
+
window_key = f"{identifier}:{current_time // self.window_size}"
|
|
128
|
+
|
|
129
|
+
request_count = self.storage.increment(window_key, expire=self.window_size)
|
|
130
|
+
|
|
131
|
+
if request_count > self.requests_per_minute:
|
|
132
|
+
return Response(status_code=429, description=b"Too Many Requests", headers={"Retry-After": str(self.window_size)})
|
|
133
|
+
|
|
134
|
+
return request
|
|
135
|
+
|
|
136
|
+
def after_request(self, response):
|
|
137
|
+
return response
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
class ConcurrentRequestMiddleware(Middleware):
|
|
141
|
+
# The `ConcurrentRequestMiddleware` class limits the number of concurrent requests and returns a 429
|
|
142
|
+
# status code with a Retry-After header if the limit is reached.
|
|
143
|
+
def __init__(self, max_concurrent_requests=100):
|
|
144
|
+
super().__init__()
|
|
145
|
+
self.max_concurrent_requests = max_concurrent_requests
|
|
146
|
+
self.current_requests = 0
|
|
147
|
+
self.lock = Lock()
|
|
148
|
+
|
|
149
|
+
def get_request_identifier(self, request):
|
|
150
|
+
return request.remote_addr
|
|
151
|
+
|
|
152
|
+
def before_request(self, request):
|
|
153
|
+
"""
|
|
154
|
+
The `before_request` function limits the number of concurrent requests and returns a 429 status code
|
|
155
|
+
with a Retry-After header if the limit is reached.
|
|
156
|
+
|
|
157
|
+
:param request: The `before_request` method in the code snippet is a method that is called before
|
|
158
|
+
processing each incoming request. It checks if the number of current requests is within the allowed
|
|
159
|
+
limit (`max_concurrent_requests`). If the limit is exceeded, it returns a 429 status code with a
|
|
160
|
+
"Too Many Requests
|
|
161
|
+
:return: the `request` object after checking if the number of current requests is within the allowed
|
|
162
|
+
limit. If the limit is exceeded, it returns a 429 status code response with a "Too Many Requests"
|
|
163
|
+
description and a "Retry-After" header set to 5.
|
|
164
|
+
"""
|
|
165
|
+
|
|
166
|
+
with self.lock:
|
|
167
|
+
if self.current_requests >= self.max_concurrent_requests:
|
|
168
|
+
return Response(status_code=429, description="Too Many Requests", headers={"Retry-After": "5"})
|
|
169
|
+
self.current_requests += 1
|
|
170
|
+
|
|
171
|
+
return request
|
|
172
|
+
|
|
173
|
+
def after_request(self, response):
|
|
174
|
+
with self.lock:
|
|
175
|
+
self.current_requests -= 1
|
|
176
|
+
return response
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from hypern.hypern import BaseSchemaGenerator, Route as InternalRoute
|
|
5
|
+
import typing
|
|
6
|
+
import orjson
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class EndpointInfo(typing.NamedTuple):
|
|
10
|
+
path: str
|
|
11
|
+
http_method: str
|
|
12
|
+
func: typing.Callable[..., typing.Any]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class SchemaGenerator(BaseSchemaGenerator):
|
|
16
|
+
def __init__(self, base_schema: dict[str, typing.Any]) -> None:
|
|
17
|
+
self.base_schema = base_schema
|
|
18
|
+
|
|
19
|
+
def get_endpoints(self, routes: list[InternalRoute]) -> list[EndpointInfo]:
|
|
20
|
+
"""
|
|
21
|
+
Given the routes, yields the following information:
|
|
22
|
+
|
|
23
|
+
- path
|
|
24
|
+
eg: /users/
|
|
25
|
+
- http_method
|
|
26
|
+
one of 'get', 'post', 'put', 'patch', 'delete', 'options'
|
|
27
|
+
- func
|
|
28
|
+
method ready to extract the docstring
|
|
29
|
+
"""
|
|
30
|
+
endpoints_info: list[EndpointInfo] = []
|
|
31
|
+
|
|
32
|
+
for route in routes:
|
|
33
|
+
method = route.method.lower()
|
|
34
|
+
endpoints_info.append(EndpointInfo(path=route.path, http_method=method, func=route.function.handler))
|
|
35
|
+
return endpoints_info
|
|
36
|
+
|
|
37
|
+
def get_schema(self, app) -> dict[str, typing.Any]:
|
|
38
|
+
schema = dict(self.base_schema)
|
|
39
|
+
schema.setdefault("paths", {})
|
|
40
|
+
endpoints_info = self.get_endpoints(app.router.routes)
|
|
41
|
+
|
|
42
|
+
for endpoint in endpoints_info:
|
|
43
|
+
parsed = self.parse_docstring(endpoint.func)
|
|
44
|
+
|
|
45
|
+
if not parsed:
|
|
46
|
+
continue
|
|
47
|
+
|
|
48
|
+
if endpoint.path not in schema["paths"]:
|
|
49
|
+
schema["paths"][endpoint.path] = {}
|
|
50
|
+
|
|
51
|
+
schema["paths"][endpoint.path][endpoint.http_method] = orjson.loads(parsed)
|
|
52
|
+
|
|
53
|
+
return schema
|
hypern/processpool.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
import signal
|
|
4
|
+
import sys
|
|
5
|
+
from typing import Any, Dict, List
|
|
6
|
+
|
|
7
|
+
from multiprocess import Process
|
|
8
|
+
from watchdog.observers import Observer
|
|
9
|
+
|
|
10
|
+
from .hypern import FunctionInfo, Router, Server, SocketHeld, WebsocketRouter
|
|
11
|
+
from .logging import logger
|
|
12
|
+
from .reload import EventHandler
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def run_processes(
|
|
16
|
+
host: str,
|
|
17
|
+
port: int,
|
|
18
|
+
workers: int,
|
|
19
|
+
processes: int,
|
|
20
|
+
max_blocking_threads: int,
|
|
21
|
+
router: Router,
|
|
22
|
+
websocket_router: WebsocketRouter,
|
|
23
|
+
injectables: Dict[str, Any],
|
|
24
|
+
before_request: List[FunctionInfo],
|
|
25
|
+
after_request: List[FunctionInfo],
|
|
26
|
+
response_headers: Dict[str, str],
|
|
27
|
+
reload: bool = True,
|
|
28
|
+
) -> List[Process]:
|
|
29
|
+
socket = SocketHeld(host, port)
|
|
30
|
+
|
|
31
|
+
process_pool = init_processpool(
|
|
32
|
+
router, websocket_router, socket, workers, processes, max_blocking_threads, injectables, before_request, after_request, response_headers
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
def terminating_signal_handler(_sig, _frame):
|
|
36
|
+
logger.info("Terminating server!!")
|
|
37
|
+
for process in process_pool:
|
|
38
|
+
process.kill()
|
|
39
|
+
|
|
40
|
+
signal.signal(signal.SIGINT, terminating_signal_handler)
|
|
41
|
+
signal.signal(signal.SIGTERM, terminating_signal_handler)
|
|
42
|
+
|
|
43
|
+
if reload:
|
|
44
|
+
# Set up file system watcher for auto-reload
|
|
45
|
+
watch_dirs = [os.getcwd()]
|
|
46
|
+
observer = Observer()
|
|
47
|
+
reload_handler = EventHandler(file_path=sys.argv[0], directory_path=os.getcwd())
|
|
48
|
+
|
|
49
|
+
for directory in watch_dirs:
|
|
50
|
+
observer.schedule(reload_handler, directory, recursive=True)
|
|
51
|
+
|
|
52
|
+
observer.start()
|
|
53
|
+
|
|
54
|
+
logger.info(f"Server started at http://{host}:{port}")
|
|
55
|
+
logger.info("Press Ctrl + C to stop")
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
for process in process_pool:
|
|
59
|
+
logger.debug(f"Process {process.pid} started")
|
|
60
|
+
process.join()
|
|
61
|
+
except KeyboardInterrupt:
|
|
62
|
+
pass
|
|
63
|
+
finally:
|
|
64
|
+
if reload:
|
|
65
|
+
observer.stop()
|
|
66
|
+
observer.join()
|
|
67
|
+
|
|
68
|
+
return process_pool
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def init_processpool(
|
|
72
|
+
router: Router,
|
|
73
|
+
websocket_router: WebsocketRouter,
|
|
74
|
+
socket: SocketHeld,
|
|
75
|
+
workers: int,
|
|
76
|
+
processes: int,
|
|
77
|
+
max_blocking_threads: int,
|
|
78
|
+
injectables: Dict[str, Any],
|
|
79
|
+
before_request: List[FunctionInfo],
|
|
80
|
+
after_request: List[FunctionInfo],
|
|
81
|
+
response_headers: Dict[str, str],
|
|
82
|
+
) -> List[Process]:
|
|
83
|
+
process_pool = []
|
|
84
|
+
|
|
85
|
+
for _ in range(processes):
|
|
86
|
+
copied_socket = socket.try_clone()
|
|
87
|
+
process = Process(
|
|
88
|
+
target=spawn_process,
|
|
89
|
+
args=(router, websocket_router, copied_socket, workers, max_blocking_threads, injectables, before_request, after_request, response_headers),
|
|
90
|
+
)
|
|
91
|
+
process.start()
|
|
92
|
+
process_pool.append(process)
|
|
93
|
+
|
|
94
|
+
return process_pool
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def initialize_event_loop():
|
|
98
|
+
if sys.platform.startswith("win32") or sys.platform.startswith("linux-cross"):
|
|
99
|
+
loop = asyncio.new_event_loop()
|
|
100
|
+
asyncio.set_event_loop(loop)
|
|
101
|
+
return loop
|
|
102
|
+
else:
|
|
103
|
+
import uvloop
|
|
104
|
+
|
|
105
|
+
uvloop.install()
|
|
106
|
+
loop = uvloop.new_event_loop()
|
|
107
|
+
asyncio.set_event_loop(loop)
|
|
108
|
+
return loop
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def spawn_process(
|
|
112
|
+
router: Router,
|
|
113
|
+
websocket_router: WebsocketRouter,
|
|
114
|
+
socket: SocketHeld,
|
|
115
|
+
workers: int,
|
|
116
|
+
max_blocking_threads: int,
|
|
117
|
+
injectables: Dict[str, Any],
|
|
118
|
+
before_request: List[FunctionInfo],
|
|
119
|
+
after_request: List[FunctionInfo],
|
|
120
|
+
response_headers: Dict[str, str],
|
|
121
|
+
):
|
|
122
|
+
loop = initialize_event_loop()
|
|
123
|
+
|
|
124
|
+
server = Server()
|
|
125
|
+
server.set_router(router=router)
|
|
126
|
+
server.set_websocket_router(websocket_router=websocket_router)
|
|
127
|
+
server.set_injected(injected=injectables)
|
|
128
|
+
server.set_before_hooks(hooks=before_request)
|
|
129
|
+
server.set_after_hooks(hooks=after_request)
|
|
130
|
+
server.set_response_headers(headers=response_headers)
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
server.start(socket, workers, max_blocking_threads)
|
|
134
|
+
loop = asyncio.get_event_loop()
|
|
135
|
+
loop.run_forever()
|
|
136
|
+
except KeyboardInterrupt:
|
|
137
|
+
loop.close()
|
hypern/py.typed
ADDED
|
File without changes
|
hypern/reload.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import time
|
|
3
|
+
import subprocess
|
|
4
|
+
from watchdog.events import FileSystemEventHandler
|
|
5
|
+
|
|
6
|
+
from .logging import logger
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class EventHandler(FileSystemEventHandler):
|
|
10
|
+
def __init__(self, file_path: str, directory_path: str) -> None:
|
|
11
|
+
self.file_path = file_path
|
|
12
|
+
self.directory_path = directory_path
|
|
13
|
+
self.process = None # Keep track of the subprocess
|
|
14
|
+
self.last_reload = time.time() # Keep track of the last reload. EventHandler is initialized with the process.
|
|
15
|
+
|
|
16
|
+
def stop_server(self):
|
|
17
|
+
if self.process:
|
|
18
|
+
try:
|
|
19
|
+
# Check if the process is still alive
|
|
20
|
+
if self.process.poll() is None: # None means the process is still running
|
|
21
|
+
self.process.terminate() # Gracefully terminate the process
|
|
22
|
+
self.process.wait(timeout=5) # Wait for the process to exit
|
|
23
|
+
else:
|
|
24
|
+
logger.error("Process is not running.")
|
|
25
|
+
except subprocess.TimeoutExpired:
|
|
26
|
+
logger.error("Process did not terminate in time. Forcing termination.")
|
|
27
|
+
self.process.kill() # Forcefully kill the process if it doesn't stop
|
|
28
|
+
except ProcessLookupError:
|
|
29
|
+
logger.error("Process does not exist.")
|
|
30
|
+
except Exception as e:
|
|
31
|
+
logger.error(f"An error occurred while stopping the server: {e}")
|
|
32
|
+
else:
|
|
33
|
+
logger.debug("No process to stop.")
|
|
34
|
+
|
|
35
|
+
def reload(self):
|
|
36
|
+
self.stop_server()
|
|
37
|
+
logger.debug("Reloading the server")
|
|
38
|
+
prev_process = self.process
|
|
39
|
+
if prev_process:
|
|
40
|
+
prev_process.kill()
|
|
41
|
+
|
|
42
|
+
self.process = subprocess.Popen(
|
|
43
|
+
[sys.executable, *sys.argv],
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
self.last_reload = time.time()
|
|
47
|
+
|
|
48
|
+
def on_modified(self, event) -> None:
|
|
49
|
+
"""
|
|
50
|
+
This function is a callback that will start a new server on every even change
|
|
51
|
+
|
|
52
|
+
:param event FSEvent: a data structure with info about the events
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
# Avoid reloading multiple times when watchdog detects multiple events
|
|
56
|
+
if time.time() - self.last_reload < 0.5:
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
time.sleep(0.2) # Wait for the file to be fully written
|
|
60
|
+
self.reload()
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
from urllib.parse import quote
|
|
5
|
+
from hypern.hypern import Response as InternalResponse, Header
|
|
6
|
+
import orjson
|
|
7
|
+
|
|
8
|
+
from hypern.background import BackgroundTask, BackgroundTasks
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BaseResponse:
|
|
12
|
+
media_type = None
|
|
13
|
+
charset = "utf-8"
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
content: typing.Any = None,
|
|
18
|
+
status_code: int = 200,
|
|
19
|
+
headers: typing.Mapping[str, str] | None = None,
|
|
20
|
+
media_type: str | None = None,
|
|
21
|
+
backgrounds: typing.List[BackgroundTask] | None = None,
|
|
22
|
+
) -> None:
|
|
23
|
+
self.status_code = status_code
|
|
24
|
+
if media_type is not None:
|
|
25
|
+
self.media_type = media_type
|
|
26
|
+
self.body = self.render(content)
|
|
27
|
+
self.init_headers(headers)
|
|
28
|
+
self.backgrounds = backgrounds
|
|
29
|
+
|
|
30
|
+
def render(self, content: typing.Any) -> bytes | memoryview:
|
|
31
|
+
if content is None:
|
|
32
|
+
return b""
|
|
33
|
+
if isinstance(content, (bytes, memoryview)):
|
|
34
|
+
return content
|
|
35
|
+
if isinstance(content, str):
|
|
36
|
+
return content.encode(self.charset)
|
|
37
|
+
return orjson.dumps(content) # type: ignore
|
|
38
|
+
|
|
39
|
+
def init_headers(self, headers: typing.Mapping[str, str] | None = None) -> None:
|
|
40
|
+
if headers is None:
|
|
41
|
+
raw_headers: dict = {}
|
|
42
|
+
populate_content_length = True
|
|
43
|
+
populate_content_type = True
|
|
44
|
+
else:
|
|
45
|
+
raw_headers = {k.lower(): v for k, v in headers.items()}
|
|
46
|
+
keys = raw_headers.keys()
|
|
47
|
+
populate_content_length = "content-length" not in keys
|
|
48
|
+
populate_content_type = "content-type" not in keys
|
|
49
|
+
|
|
50
|
+
body = getattr(self, "body", None)
|
|
51
|
+
if body is not None and populate_content_length and not (self.status_code < 200 or self.status_code in (204, 304)):
|
|
52
|
+
content_length = str(len(body))
|
|
53
|
+
raw_headers.setdefault("content-length", content_length)
|
|
54
|
+
|
|
55
|
+
content_type = self.media_type
|
|
56
|
+
if content_type is not None and populate_content_type:
|
|
57
|
+
if content_type.startswith("text/") and "charset=" not in content_type.lower():
|
|
58
|
+
content_type += "; charset=" + self.charset
|
|
59
|
+
raw_headers.setdefault("content-type", content_type)
|
|
60
|
+
|
|
61
|
+
self.raw_headers = raw_headers
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def to_response(cls):
|
|
65
|
+
class ResponseWrapper(cls):
|
|
66
|
+
def __new__(cls, *args, **kwargs):
|
|
67
|
+
instance = super().__new__(cls)
|
|
68
|
+
instance.__init__(*args, **kwargs)
|
|
69
|
+
# Execute background tasks
|
|
70
|
+
task_manager = BackgroundTasks()
|
|
71
|
+
if instance.backgrounds:
|
|
72
|
+
for task in instance.backgrounds:
|
|
73
|
+
task_manager.add_task(task)
|
|
74
|
+
task_manager.execute_all()
|
|
75
|
+
del task_manager
|
|
76
|
+
|
|
77
|
+
headers = Header(instance.raw_headers)
|
|
78
|
+
return InternalResponse(
|
|
79
|
+
status_code=instance.status_code,
|
|
80
|
+
headers=headers,
|
|
81
|
+
description=instance.body,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
return ResponseWrapper
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@to_response
|
|
88
|
+
class Response(BaseResponse):
|
|
89
|
+
media_type = None
|
|
90
|
+
charset = "utf-8"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@to_response
|
|
94
|
+
class JSONResponse(BaseResponse):
|
|
95
|
+
media_type = "application/json"
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@to_response
|
|
99
|
+
class HTMLResponse(BaseResponse):
|
|
100
|
+
media_type = "text/html"
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@to_response
|
|
104
|
+
class PlainTextResponse(BaseResponse):
|
|
105
|
+
media_type = "text/plain"
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@to_response
|
|
109
|
+
class RedirectResponse(BaseResponse):
|
|
110
|
+
def __init__(
|
|
111
|
+
self,
|
|
112
|
+
url: str,
|
|
113
|
+
status_code: int = 307,
|
|
114
|
+
headers: typing.Mapping[str, str] | None = None,
|
|
115
|
+
backgrounds: typing.List[BackgroundTask] | None = None,
|
|
116
|
+
) -> None:
|
|
117
|
+
super().__init__(content=b"", status_code=status_code, headers=headers, backgrounds=backgrounds)
|
|
118
|
+
self.raw_headers["location"] = quote(str(url), safe=":/%#?=@[]!$&'()*+,;")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@to_response
|
|
122
|
+
class FileResponse(BaseResponse):
|
|
123
|
+
def __init__(
|
|
124
|
+
self,
|
|
125
|
+
content: bytes | memoryview,
|
|
126
|
+
filename: str,
|
|
127
|
+
status_code: int = 200,
|
|
128
|
+
headers: typing.Mapping[str, str] | None = None,
|
|
129
|
+
backgrounds: typing.List[BackgroundTask] | None = None,
|
|
130
|
+
) -> None:
|
|
131
|
+
super().__init__(content=content, status_code=status_code, headers=headers, backgrounds=backgrounds)
|
|
132
|
+
self.raw_headers["content-disposition"] = f'attachment; filename="{filename}"'
|
|
133
|
+
self.raw_headers.setdefault("content-type", "application/octet-stream")
|
|
134
|
+
self.raw_headers.setdefault("content-length", str(len(content)))
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import asyncio
|
|
5
|
+
import functools
|
|
6
|
+
import inspect
|
|
7
|
+
import traceback
|
|
8
|
+
import typing
|
|
9
|
+
|
|
10
|
+
import orjson
|
|
11
|
+
from pydantic import BaseModel
|
|
12
|
+
|
|
13
|
+
from hypern.exceptions import BaseException
|
|
14
|
+
from hypern.hypern import Request, Response
|
|
15
|
+
from hypern.response import JSONResponse
|
|
16
|
+
|
|
17
|
+
from .parser import InputHandler
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def is_async_callable(obj: typing.Any) -> bool:
|
|
21
|
+
while isinstance(obj, functools.partial):
|
|
22
|
+
obj = obj.funcz
|
|
23
|
+
return asyncio.iscoroutinefunction(obj) or (callable(obj) and asyncio.iscoroutinefunction(obj.__call__))
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
async def run_in_threadpool(func: typing.Callable, *args, **kwargs):
|
|
27
|
+
if kwargs: # pragma: no cover
|
|
28
|
+
# run_sync doesn't accept 'kwargs', so bind them in here
|
|
29
|
+
func = functools.partial(func, **kwargs)
|
|
30
|
+
return await asyncio.to_thread(func, *args)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def dispatch(handler, request: Request, inject: typing.Dict[str, typing.Any]) -> Response:
|
|
34
|
+
try:
|
|
35
|
+
is_async = is_async_callable(handler)
|
|
36
|
+
signature = inspect.signature(handler)
|
|
37
|
+
input_handler = InputHandler(request)
|
|
38
|
+
_response_type = signature.return_annotation
|
|
39
|
+
_kwargs = await input_handler.get_input_handler(signature, inject)
|
|
40
|
+
|
|
41
|
+
if is_async:
|
|
42
|
+
response = await handler(**_kwargs) # type: ignore
|
|
43
|
+
else:
|
|
44
|
+
response = await run_in_threadpool(handler, **_kwargs)
|
|
45
|
+
if not isinstance(response, Response):
|
|
46
|
+
if isinstance(_response_type, type) and issubclass(_response_type, BaseModel):
|
|
47
|
+
response = _response_type.model_validate(response).model_dump(mode="json") # type: ignore
|
|
48
|
+
response = JSONResponse(
|
|
49
|
+
content=orjson.dumps({"message": response, "error_code": None}),
|
|
50
|
+
status_code=200,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
except Exception as e:
|
|
54
|
+
_res: typing.Dict = {"message": "", "error_code": "UNKNOWN_ERROR"}
|
|
55
|
+
if isinstance(e, BaseException):
|
|
56
|
+
_res["error_code"] = e.error_code
|
|
57
|
+
_res["message"] = e.msg
|
|
58
|
+
_status = e.status
|
|
59
|
+
else:
|
|
60
|
+
traceback.print_exc()
|
|
61
|
+
_res["message"] = str(e)
|
|
62
|
+
_status = 400
|
|
63
|
+
response = JSONResponse(
|
|
64
|
+
content=orjson.dumps(_res),
|
|
65
|
+
status_code=_status,
|
|
66
|
+
)
|
|
67
|
+
return response
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import typing
|
|
5
|
+
from typing import Any, Dict
|
|
6
|
+
|
|
7
|
+
import orjson
|
|
8
|
+
|
|
9
|
+
from hypern.hypern import Request, Response
|
|
10
|
+
from hypern.response import JSONResponse
|
|
11
|
+
|
|
12
|
+
from .dispatcher import dispatch
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class HTTPEndpoint:
|
|
16
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
17
|
+
super().__init__(*args, **kwargs)
|
|
18
|
+
|
|
19
|
+
def method_not_allowed(self, request: Request) -> Response:
|
|
20
|
+
return JSONResponse(
|
|
21
|
+
description=orjson.dumps({"message": "Method Not Allowed", "error_code": "METHOD_NOT_ALLOW"}),
|
|
22
|
+
status_code=405,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
async def dispatch(self, request: Request, inject: Dict[str, Any]) -> Response:
|
|
26
|
+
handler_name = "get" if request.method == "HEAD" and not hasattr(self, "head") else request.method.lower()
|
|
27
|
+
handler: typing.Callable[[Request], typing.Any] = getattr( # type: ignore
|
|
28
|
+
self, handler_name, self.method_not_allowed
|
|
29
|
+
)
|
|
30
|
+
return await dispatch(handler, request, inject)
|