localstack-extensions-utils 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- localstack_extensions/__init__.py +0 -0
- localstack_extensions/utils/__init__.py +23 -0
- localstack_extensions/utils/docker.py +290 -0
- localstack_extensions/utils/h2_proxy.py +181 -0
- localstack_extensions/utils/tcp_protocol_router.py +179 -0
- localstack_extensions_utils-0.1.0.dist-info/METADATA +72 -0
- localstack_extensions_utils-0.1.0.dist-info/RECORD +9 -0
- localstack_extensions_utils-0.1.0.dist-info/WHEEL +5 -0
- localstack_extensions_utils-0.1.0.dist-info/top_level.txt +1 -0
|
File without changes
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from localstack_extensions.utils.docker import (
|
|
2
|
+
ProxiedDockerContainerExtension,
|
|
3
|
+
ProxyResource,
|
|
4
|
+
)
|
|
5
|
+
from localstack_extensions.utils.h2_proxy import (
|
|
6
|
+
TcpForwarder,
|
|
7
|
+
apply_http2_patches_for_grpc_support,
|
|
8
|
+
get_headers_from_data_stream,
|
|
9
|
+
get_headers_from_frames,
|
|
10
|
+
get_frames_from_http2_stream,
|
|
11
|
+
ProxyRequestMatcher,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"ProxiedDockerContainerExtension",
|
|
16
|
+
"ProxyResource",
|
|
17
|
+
"TcpForwarder",
|
|
18
|
+
"apply_http2_patches_for_grpc_support",
|
|
19
|
+
"get_headers_from_data_stream",
|
|
20
|
+
"get_headers_from_frames",
|
|
21
|
+
"get_frames_from_http2_stream",
|
|
22
|
+
"ProxyRequestMatcher",
|
|
23
|
+
]
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import logging
|
|
3
|
+
from functools import cache
|
|
4
|
+
from typing import Callable
|
|
5
|
+
import requests
|
|
6
|
+
|
|
7
|
+
from localstack.config import is_env_true
|
|
8
|
+
from localstack_extensions.utils.h2_proxy import (
|
|
9
|
+
apply_http2_patches_for_grpc_support,
|
|
10
|
+
)
|
|
11
|
+
from localstack.utils.docker_utils import DOCKER_CLIENT
|
|
12
|
+
from localstack.extensions.api import Extension, http
|
|
13
|
+
from localstack.http import Request
|
|
14
|
+
from localstack.utils.container_utils.container_client import (
|
|
15
|
+
PortMappings,
|
|
16
|
+
SimpleVolumeBind,
|
|
17
|
+
)
|
|
18
|
+
from localstack.utils.net import get_addressable_container_host
|
|
19
|
+
from localstack.utils.sync import retry
|
|
20
|
+
from rolo import route
|
|
21
|
+
from rolo.proxy import Proxy
|
|
22
|
+
from rolo.routing import RuleAdapter, WithHost
|
|
23
|
+
from werkzeug.datastructures import Headers
|
|
24
|
+
|
|
25
|
+
LOG = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ProxiedDockerContainerExtension(Extension):
|
|
29
|
+
"""
|
|
30
|
+
Utility class to create a LocalStack Extension which runs a Docker container that exposes a service
|
|
31
|
+
on one or more ports, with requests being proxied to that container through the LocalStack gateway.
|
|
32
|
+
|
|
33
|
+
Requests may potentially use HTTP2 with binary content as the protocol (e.g., gRPC over HTTP2).
|
|
34
|
+
To ensure proper routing of requests, subclasses can define the `http2_ports`.
|
|
35
|
+
|
|
36
|
+
For services requiring raw TCP proxying (e.g., native database protocols), use the `tcp_ports`
|
|
37
|
+
parameter to enable transparent TCP forwarding to the container.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
name: str
|
|
41
|
+
"""Name of this extension, which must be overridden in a subclass."""
|
|
42
|
+
image_name: str
|
|
43
|
+
"""Docker image name"""
|
|
44
|
+
container_ports: list[int]
|
|
45
|
+
"""List of network ports of the Docker container spun up by the extension"""
|
|
46
|
+
host: str | None
|
|
47
|
+
"""
|
|
48
|
+
Optional host on which to expose the container endpoints.
|
|
49
|
+
Can be either a static hostname, or a pattern like `<regex("(.+\\.)?"):subdomain>myext.<domain>`
|
|
50
|
+
"""
|
|
51
|
+
path: str | None
|
|
52
|
+
"""Optional path on which to expose the container endpoints."""
|
|
53
|
+
command: list[str] | None
|
|
54
|
+
"""Optional command (and flags) to execute in the container."""
|
|
55
|
+
env_vars: dict[str, str] | None
|
|
56
|
+
"""Optional environment variables to pass to the container."""
|
|
57
|
+
volumes: list[SimpleVolumeBind] | None
|
|
58
|
+
"""Optional volumes to mount into the container."""
|
|
59
|
+
health_check_fn: Callable[[], None] | None
|
|
60
|
+
"""
|
|
61
|
+
Optional custom health check function. If not provided, defaults to HTTP GET on main_port.
|
|
62
|
+
The function should raise an exception if the health check fails.
|
|
63
|
+
"""
|
|
64
|
+
health_check_retries: int
|
|
65
|
+
"""Number of times to retry the health check before giving up."""
|
|
66
|
+
health_check_sleep: float
|
|
67
|
+
"""Time in seconds to sleep between health check retries."""
|
|
68
|
+
|
|
69
|
+
request_to_port_router: Callable[[Request], int] | None
|
|
70
|
+
"""Callable that returns the target port for a given request, for routing purposes"""
|
|
71
|
+
http2_ports: list[int] | None
|
|
72
|
+
"""List of ports for which HTTP2 proxy forwarding into the container should be enabled."""
|
|
73
|
+
tcp_ports: list[int] | None
|
|
74
|
+
"""
|
|
75
|
+
List of container ports for raw TCP proxying through the gateway.
|
|
76
|
+
Enables transparent TCP forwarding for protocols that don't use HTTP (e.g., native DB protocols).
|
|
77
|
+
|
|
78
|
+
When tcp_ports is set, the extension must implement tcp_connection_matcher() to identify
|
|
79
|
+
its traffic by inspecting initial connection bytes.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
tcp_connection_matcher: Callable[[bytes], bool] | None
|
|
83
|
+
"""
|
|
84
|
+
Optional function to identify TCP connections belonging to this extension.
|
|
85
|
+
|
|
86
|
+
Called with initial connection bytes (up to 512 bytes) to determine if this extension
|
|
87
|
+
should handle the connection. Return True to claim the connection, False otherwise.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
def __init__(
|
|
91
|
+
self,
|
|
92
|
+
image_name: str,
|
|
93
|
+
container_ports: list[int],
|
|
94
|
+
host: str | None = None,
|
|
95
|
+
path: str | None = None,
|
|
96
|
+
command: list[str] | None = None,
|
|
97
|
+
env_vars: dict[str, str] | None = None,
|
|
98
|
+
volumes: list[SimpleVolumeBind] | None = None,
|
|
99
|
+
health_check_fn: Callable[[], None] | None = None,
|
|
100
|
+
health_check_retries: int = 60,
|
|
101
|
+
health_check_sleep: float = 1.0,
|
|
102
|
+
request_to_port_router: Callable[[Request], int] | None = None,
|
|
103
|
+
http2_ports: list[int] | None = None,
|
|
104
|
+
tcp_ports: list[int] | None = None,
|
|
105
|
+
):
|
|
106
|
+
self.image_name = image_name
|
|
107
|
+
if not container_ports:
|
|
108
|
+
raise ValueError("container_ports is required")
|
|
109
|
+
self.container_ports = container_ports
|
|
110
|
+
self.host = host
|
|
111
|
+
self.path = path
|
|
112
|
+
self.container_name = re.sub(r"\W", "-", f"ls-ext-{self.name}")
|
|
113
|
+
self.command = command
|
|
114
|
+
self.env_vars = env_vars
|
|
115
|
+
self.volumes = volumes
|
|
116
|
+
self.health_check_fn = health_check_fn
|
|
117
|
+
self.health_check_retries = health_check_retries
|
|
118
|
+
self.health_check_sleep = health_check_sleep
|
|
119
|
+
self.request_to_port_router = request_to_port_router
|
|
120
|
+
self.http2_ports = http2_ports
|
|
121
|
+
self.tcp_ports = tcp_ports
|
|
122
|
+
self.main_port = self.container_ports[0]
|
|
123
|
+
self.container_host = get_addressable_container_host()
|
|
124
|
+
|
|
125
|
+
def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
|
|
126
|
+
if self.path:
|
|
127
|
+
raise NotImplementedError(
|
|
128
|
+
"Path-based routing not yet implemented for this extension"
|
|
129
|
+
)
|
|
130
|
+
# note: for simplicity, starting the external container at startup - could be optimized over time ...
|
|
131
|
+
self.start_container()
|
|
132
|
+
# add resource for HTTP/1.1 requests
|
|
133
|
+
resource = RuleAdapter(ProxyResource(self.container_host, self.main_port))
|
|
134
|
+
if self.host:
|
|
135
|
+
resource = WithHost(self.host, [resource])
|
|
136
|
+
router.add(resource)
|
|
137
|
+
|
|
138
|
+
# apply patches to serve HTTP/2 requests
|
|
139
|
+
for port in self.http2_ports or []:
|
|
140
|
+
apply_http2_patches_for_grpc_support(
|
|
141
|
+
self.container_host, port, self.http2_request_matcher
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# set up raw TCP proxies with protocol detection
|
|
145
|
+
if self.tcp_ports:
|
|
146
|
+
self._setup_tcp_protocol_routing()
|
|
147
|
+
|
|
148
|
+
def _setup_tcp_protocol_routing(self):
|
|
149
|
+
"""
|
|
150
|
+
Set up TCP routing on the LocalStack gateway for this extension.
|
|
151
|
+
|
|
152
|
+
This method patches the gateway's HTTP protocol handler to intercept TCP
|
|
153
|
+
connections and allow this extension to claim them via tcp_connection_matcher().
|
|
154
|
+
This enables multiple TCP protocols to share the main gateway port (4566).
|
|
155
|
+
|
|
156
|
+
Uses monkeypatching to intercept dataReceived() before HTTP processing.
|
|
157
|
+
"""
|
|
158
|
+
from localstack_extensions.utils.tcp_protocol_router import (
|
|
159
|
+
patch_gateway_for_tcp_routing,
|
|
160
|
+
register_tcp_extension,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# Get the connection matcher from the extension
|
|
164
|
+
matcher = getattr(self, "tcp_connection_matcher", None)
|
|
165
|
+
if not matcher:
|
|
166
|
+
LOG.warning(
|
|
167
|
+
f"Extension {self.name} has tcp_ports but no tcp_connection_matcher(). "
|
|
168
|
+
"TCP routing will not work without a matcher."
|
|
169
|
+
)
|
|
170
|
+
return
|
|
171
|
+
|
|
172
|
+
# Apply gateway patches (only happens once globally)
|
|
173
|
+
patch_gateway_for_tcp_routing()
|
|
174
|
+
|
|
175
|
+
# Register this extension for TCP routing
|
|
176
|
+
# Use first port as the default target port
|
|
177
|
+
target_port = self.tcp_ports[0] if self.tcp_ports else self.main_port
|
|
178
|
+
|
|
179
|
+
register_tcp_extension(
|
|
180
|
+
extension_name=self.name,
|
|
181
|
+
matcher=matcher,
|
|
182
|
+
backend_host=self.container_host,
|
|
183
|
+
backend_port=target_port,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
LOG.info(
|
|
187
|
+
f"Registered TCP extension {self.name} -> {self.container_host}:{target_port} on gateway"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
def http2_request_matcher(self, headers: Headers) -> bool:
|
|
191
|
+
"""
|
|
192
|
+
Define whether an HTTP2 request should be proxied, based on request headers.
|
|
193
|
+
|
|
194
|
+
Default implementation returns False (no HTTP2 proxying).
|
|
195
|
+
Override this method in subclasses that need HTTP2 proxying.
|
|
196
|
+
"""
|
|
197
|
+
return False
|
|
198
|
+
|
|
199
|
+
def on_platform_shutdown(self):
|
|
200
|
+
self._remove_container()
|
|
201
|
+
|
|
202
|
+
@cache
|
|
203
|
+
def start_container(self) -> None:
|
|
204
|
+
LOG.debug("Starting extension container %s", self.container_name)
|
|
205
|
+
|
|
206
|
+
port_mapping = PortMappings()
|
|
207
|
+
for port in self.container_ports:
|
|
208
|
+
port_mapping.add(port)
|
|
209
|
+
|
|
210
|
+
kwargs = {}
|
|
211
|
+
if self.command:
|
|
212
|
+
kwargs["command"] = self.command
|
|
213
|
+
if self.env_vars:
|
|
214
|
+
kwargs["env_vars"] = self.env_vars
|
|
215
|
+
if self.volumes:
|
|
216
|
+
kwargs["volumes"] = self.volumes
|
|
217
|
+
|
|
218
|
+
try:
|
|
219
|
+
DOCKER_CLIENT.run_container(
|
|
220
|
+
self.image_name,
|
|
221
|
+
detach=True,
|
|
222
|
+
remove=True,
|
|
223
|
+
name=self.container_name,
|
|
224
|
+
ports=port_mapping,
|
|
225
|
+
**kwargs,
|
|
226
|
+
)
|
|
227
|
+
except Exception as e:
|
|
228
|
+
LOG.debug("Failed to start container %s: %s", self.container_name, e)
|
|
229
|
+
# allow running the container in a local server in dev mode
|
|
230
|
+
if not is_env_true(f"{self.name.upper().replace('-', '_')}_DEV_MODE"):
|
|
231
|
+
raise
|
|
232
|
+
|
|
233
|
+
# Use custom health check if provided, otherwise default to HTTP GET
|
|
234
|
+
health_check = self.health_check_fn or self._default_health_check
|
|
235
|
+
|
|
236
|
+
try:
|
|
237
|
+
retry(
|
|
238
|
+
health_check,
|
|
239
|
+
retries=self.health_check_retries,
|
|
240
|
+
sleep=self.health_check_sleep,
|
|
241
|
+
)
|
|
242
|
+
except Exception as e:
|
|
243
|
+
LOG.info("Failed to connect to container %s: %s", self.container_name, e)
|
|
244
|
+
self._remove_container()
|
|
245
|
+
raise
|
|
246
|
+
|
|
247
|
+
def _default_health_check(self) -> None:
|
|
248
|
+
"""Default health check: HTTP GET request to the main port."""
|
|
249
|
+
response = requests.get(f"http://{self.container_host}:{self.main_port}/")
|
|
250
|
+
assert response.ok
|
|
251
|
+
|
|
252
|
+
def _remove_container(self):
|
|
253
|
+
LOG.debug("Stopping extension container %s", self.container_name)
|
|
254
|
+
DOCKER_CLIENT.remove_container(
|
|
255
|
+
self.container_name, force=True, check_existence=False
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
class ProxyResource:
|
|
260
|
+
"""
|
|
261
|
+
Simple proxy resource that forwards incoming requests from the
|
|
262
|
+
LocalStack Gateway to the target Docker container.
|
|
263
|
+
"""
|
|
264
|
+
|
|
265
|
+
host: str
|
|
266
|
+
port: int
|
|
267
|
+
|
|
268
|
+
def __init__(self, host: str, port: int):
|
|
269
|
+
self.host = host
|
|
270
|
+
self.port = port
|
|
271
|
+
|
|
272
|
+
@route("/<path:path>")
|
|
273
|
+
def index(self, request: Request, path: str, *args, **kwargs):
|
|
274
|
+
return self._proxy_request(request, forward_path=f"/{path}")
|
|
275
|
+
|
|
276
|
+
def _proxy_request(self, request: Request, forward_path: str, *args, **kwargs):
|
|
277
|
+
base_url = f"http://{self.host}:{self.port}"
|
|
278
|
+
proxy = Proxy(forward_base_url=base_url)
|
|
279
|
+
|
|
280
|
+
# update content length (may have changed due to content compression)
|
|
281
|
+
if request.method not in ("GET", "OPTIONS"):
|
|
282
|
+
request.headers["Content-Length"] = str(len(request.data))
|
|
283
|
+
|
|
284
|
+
# make sure we're forwarding the correct Host header
|
|
285
|
+
request.headers["Host"] = f"localhost:{self.port}"
|
|
286
|
+
|
|
287
|
+
# forward the request to the target
|
|
288
|
+
result = proxy.forward(request, forward_path=forward_path)
|
|
289
|
+
|
|
290
|
+
return result
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import socket
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Iterable, Callable
|
|
5
|
+
|
|
6
|
+
from h2.frame_buffer import FrameBuffer
|
|
7
|
+
from hpack import Decoder
|
|
8
|
+
from hyperframe.frame import HeadersFrame, Frame
|
|
9
|
+
from twisted.internet import reactor
|
|
10
|
+
|
|
11
|
+
from localstack.utils.patch import patch
|
|
12
|
+
from twisted.web._http2 import H2Connection
|
|
13
|
+
from werkzeug.datastructures import Headers
|
|
14
|
+
|
|
15
|
+
LOG = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
ProxyRequestMatcher = Callable[[Headers], bool]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TcpForwarder:
|
|
22
|
+
"""Simple helper class for bidirectional forwarding of TCP traffic."""
|
|
23
|
+
|
|
24
|
+
buffer_size: int = 1024
|
|
25
|
+
"""Data buffer size for receiving data from upstream socket."""
|
|
26
|
+
|
|
27
|
+
def __init__(self, port: int, host: str = "localhost"):
|
|
28
|
+
self.port = port
|
|
29
|
+
self.host = host
|
|
30
|
+
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
31
|
+
self._socket.connect((self.host, self.port))
|
|
32
|
+
self._closed = False
|
|
33
|
+
|
|
34
|
+
def receive_loop(self, callback):
|
|
35
|
+
while data := self.recv(self.buffer_size):
|
|
36
|
+
callback(data)
|
|
37
|
+
|
|
38
|
+
def recv(self, length):
|
|
39
|
+
try:
|
|
40
|
+
return self._socket.recv(length)
|
|
41
|
+
except OSError as e:
|
|
42
|
+
if self._closed:
|
|
43
|
+
return None
|
|
44
|
+
else:
|
|
45
|
+
raise e
|
|
46
|
+
|
|
47
|
+
def send(self, data):
|
|
48
|
+
self._socket.sendall(data)
|
|
49
|
+
|
|
50
|
+
def close(self):
|
|
51
|
+
if self._closed:
|
|
52
|
+
return
|
|
53
|
+
LOG.debug(f"Closing connection to upstream HTTP2 server on port {self.port}")
|
|
54
|
+
self._closed = True
|
|
55
|
+
try:
|
|
56
|
+
self._socket.shutdown(socket.SHUT_RDWR)
|
|
57
|
+
self._socket.close()
|
|
58
|
+
except Exception:
|
|
59
|
+
# swallow exceptions here (e.g., "bad file descriptor")
|
|
60
|
+
pass
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
patched_connection = False
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def apply_http2_patches_for_grpc_support(
|
|
67
|
+
target_host: str, target_port: int, http2_request_matcher: ProxyRequestMatcher
|
|
68
|
+
):
|
|
69
|
+
"""
|
|
70
|
+
Apply some patches to proxy incoming gRPC requests and forward them to a target port.
|
|
71
|
+
Note: this is a very brute-force approach and needs to be fixed/enhanced over time!
|
|
72
|
+
"""
|
|
73
|
+
LOG.debug(f"Enabling proxying to backend {target_host}:{target_port}")
|
|
74
|
+
global patched_connection
|
|
75
|
+
assert not patched_connection, (
|
|
76
|
+
"It is not safe to patch H2Connection twice with this function"
|
|
77
|
+
)
|
|
78
|
+
patched_connection = True
|
|
79
|
+
|
|
80
|
+
class ForwardingState(Enum):
|
|
81
|
+
UNDECIDED = "undecided"
|
|
82
|
+
FORWARDING = "forwarding"
|
|
83
|
+
PASSTHROUGH = "passthrough"
|
|
84
|
+
|
|
85
|
+
class ForwardingBuffer:
|
|
86
|
+
"""
|
|
87
|
+
A buffer atop the HTTP2 client connection, that will hold
|
|
88
|
+
data until the ProxyRequestMatcher tells us whether to send it
|
|
89
|
+
to the backend, or leave it to the default handler.
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
backend: TcpForwarder
|
|
93
|
+
buffer: list
|
|
94
|
+
state: ForwardingState
|
|
95
|
+
|
|
96
|
+
def __init__(self, http_response_stream):
|
|
97
|
+
self.http_response_stream = http_response_stream
|
|
98
|
+
LOG.debug(
|
|
99
|
+
f"Starting TCP forwarder to port {target_port} for new HTTP2 connection"
|
|
100
|
+
)
|
|
101
|
+
self.backend = TcpForwarder(target_port, host=target_host)
|
|
102
|
+
self.buffer = []
|
|
103
|
+
self.state = ForwardingState.UNDECIDED
|
|
104
|
+
reactor.getThreadPool().callInThread(
|
|
105
|
+
self.backend.receive_loop, self.received_from_backend
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def received_from_backend(self, data):
|
|
109
|
+
self.http_response_stream.write(data)
|
|
110
|
+
|
|
111
|
+
def received_from_http2_client(self, data, default_handler: Callable):
|
|
112
|
+
match self.state:
|
|
113
|
+
case ForwardingState.PASSTHROUGH:
|
|
114
|
+
default_handler(data)
|
|
115
|
+
case ForwardingState.FORWARDING:
|
|
116
|
+
assert not self.buffer
|
|
117
|
+
# Keep sending data to the backend for the lifetime of this connection
|
|
118
|
+
self.backend.send(data)
|
|
119
|
+
case ForwardingState.UNDECIDED:
|
|
120
|
+
self.buffer.append(data)
|
|
121
|
+
|
|
122
|
+
if headers := get_headers_from_data_stream(self.buffer):
|
|
123
|
+
buffered_data = b"".join(self.buffer)
|
|
124
|
+
self.buffer = []
|
|
125
|
+
|
|
126
|
+
if http2_request_matcher(headers):
|
|
127
|
+
self.state = ForwardingState.FORWARDING
|
|
128
|
+
self.backend.send(buffered_data)
|
|
129
|
+
else:
|
|
130
|
+
self.state = ForwardingState.PASSTHROUGH
|
|
131
|
+
# if this is not a target request, then call the default handler
|
|
132
|
+
default_handler(buffered_data)
|
|
133
|
+
|
|
134
|
+
def close(self):
|
|
135
|
+
self.backend.close()
|
|
136
|
+
|
|
137
|
+
@patch(H2Connection.connectionMade)
|
|
138
|
+
def _connectionMade(fn, self, *args, **kwargs):
|
|
139
|
+
self._ls_forwarding_buffer = ForwardingBuffer(self.transport)
|
|
140
|
+
|
|
141
|
+
@patch(H2Connection.dataReceived)
|
|
142
|
+
def _dataReceived(fn, self, data, *args, **kwargs):
|
|
143
|
+
self._ls_forwarding_buffer.received_from_http2_client(
|
|
144
|
+
data, lambda d: fn(self, d, *args, **kwargs)
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
@patch(H2Connection.connectionLost)
|
|
148
|
+
def connectionLost(fn, self, *args, **kwargs):
|
|
149
|
+
self._ls_forwarding_buffer.close()
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def get_headers_from_data_stream(data_list: Iterable[bytes]) -> Headers:
|
|
153
|
+
"""Get headers from a data stream (list of bytes data), if any headers are contained."""
|
|
154
|
+
stream = b"".join(data_list)
|
|
155
|
+
return get_headers_from_frames(get_frames_from_http2_stream(stream))
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def get_headers_from_frames(frames: Iterable[Frame]) -> Headers:
|
|
159
|
+
"""Parse the given list of HTTP2 frames and return a dict of headers, if any"""
|
|
160
|
+
result = {}
|
|
161
|
+
decoder = Decoder()
|
|
162
|
+
for frame in frames:
|
|
163
|
+
if isinstance(frame, HeadersFrame):
|
|
164
|
+
try:
|
|
165
|
+
headers = decoder.decode(frame.data)
|
|
166
|
+
result.update(dict(headers))
|
|
167
|
+
except Exception:
|
|
168
|
+
pass
|
|
169
|
+
return Headers(result)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def get_frames_from_http2_stream(data: bytes) -> Iterable[Frame]:
|
|
173
|
+
"""Parse the data from an HTTP2 stream into an iterable of frames"""
|
|
174
|
+
buffer = FrameBuffer(server=True)
|
|
175
|
+
buffer.max_frame_size = 16384
|
|
176
|
+
try:
|
|
177
|
+
buffer.add_data(data)
|
|
178
|
+
for frame in buffer:
|
|
179
|
+
yield frame
|
|
180
|
+
except Exception:
|
|
181
|
+
pass
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Protocol-detecting TCP router for LocalStack Gateway.
|
|
3
|
+
|
|
4
|
+
This module provides a Twisted protocol that detects the protocol from initial
|
|
5
|
+
connection bytes and routes to the appropriate backend, enabling multiple TCP
|
|
6
|
+
protocols to share a single gateway port.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
from twisted.internet import reactor
|
|
11
|
+
from twisted.protocols.portforward import ProxyClient, ProxyClientFactory
|
|
12
|
+
from twisted.web.http import HTTPChannel
|
|
13
|
+
|
|
14
|
+
from localstack.utils.patch import patch
|
|
15
|
+
from localstack import config
|
|
16
|
+
|
|
17
|
+
LOG = logging.getLogger(__name__)
|
|
18
|
+
LOG.setLevel(logging.DEBUG if config.DEBUG else logging.INFO)
|
|
19
|
+
|
|
20
|
+
# Global registry of extensions with TCP matchers
|
|
21
|
+
# List of tuples: (extension_name, matcher_func, backend_host, backend_port)
|
|
22
|
+
_tcp_extensions = []
|
|
23
|
+
_gateway_patched = False
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class TcpProxyClient(ProxyClient):
|
|
27
|
+
"""Backend TCP connection for protocol-detected connections."""
|
|
28
|
+
|
|
29
|
+
def connectionMade(self):
|
|
30
|
+
"""Called when backend connection is established."""
|
|
31
|
+
server = self.factory.server
|
|
32
|
+
|
|
33
|
+
# Set up peer relationship
|
|
34
|
+
server.set_tcp_peer(self)
|
|
35
|
+
|
|
36
|
+
# Unregister any existing producer on server transport (HTTPChannel may have one)
|
|
37
|
+
try:
|
|
38
|
+
server.transport.unregisterProducer()
|
|
39
|
+
except Exception:
|
|
40
|
+
pass # No producer was registered, which is fine
|
|
41
|
+
|
|
42
|
+
# Enable flow control
|
|
43
|
+
self.transport.registerProducer(server.transport, True)
|
|
44
|
+
server.transport.registerProducer(self.transport, True)
|
|
45
|
+
|
|
46
|
+
# Send buffered data from detection phase
|
|
47
|
+
if hasattr(self.factory, "initial_data"):
|
|
48
|
+
initial_data = self.factory.initial_data
|
|
49
|
+
self.transport.write(initial_data)
|
|
50
|
+
del self.factory.initial_data
|
|
51
|
+
|
|
52
|
+
def dataReceived(self, data):
|
|
53
|
+
"""Forward data from backend to client."""
|
|
54
|
+
self.factory.server.transport.write(data)
|
|
55
|
+
|
|
56
|
+
def connectionLost(self, reason):
|
|
57
|
+
"""Backend connection closed."""
|
|
58
|
+
self.factory.server.transport.loseConnection()
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def patch_gateway_for_tcp_routing():
|
|
62
|
+
"""
|
|
63
|
+
Patch the LocalStack gateway to enable protocol detection and TCP routing.
|
|
64
|
+
|
|
65
|
+
This monkeypatches the HTTPChannel class used by the gateway to intercept
|
|
66
|
+
connections and detect TCP protocols before HTTP processing.
|
|
67
|
+
"""
|
|
68
|
+
global _gateway_patched
|
|
69
|
+
|
|
70
|
+
if _gateway_patched:
|
|
71
|
+
return
|
|
72
|
+
|
|
73
|
+
# Patch HTTPChannel to use our protocol-detecting version
|
|
74
|
+
@patch(HTTPChannel.__init__)
|
|
75
|
+
def _patched_init(fn, self, *args, **kwargs):
|
|
76
|
+
# Call original init
|
|
77
|
+
fn(self, *args, **kwargs)
|
|
78
|
+
# Add our detection attributes
|
|
79
|
+
self._detection_buffer = []
|
|
80
|
+
self._detecting = True
|
|
81
|
+
self._tcp_peer = None
|
|
82
|
+
|
|
83
|
+
@patch(HTTPChannel.dataReceived)
|
|
84
|
+
def _patched_dataReceived(fn, self, data):
|
|
85
|
+
"""Intercept data to allow extensions to claim TCP connections."""
|
|
86
|
+
if not getattr(self, "_detecting", False):
|
|
87
|
+
# Already decided - either proxying TCP or processing HTTP
|
|
88
|
+
if getattr(self, "_tcp_peer", None):
|
|
89
|
+
# TCP proxying mode
|
|
90
|
+
self._tcp_peer.transport.write(data)
|
|
91
|
+
else:
|
|
92
|
+
# HTTP mode - pass to original
|
|
93
|
+
fn(self, data)
|
|
94
|
+
return
|
|
95
|
+
|
|
96
|
+
# Still detecting - buffer data
|
|
97
|
+
if not hasattr(self, "_detection_buffer"):
|
|
98
|
+
self._detection_buffer = []
|
|
99
|
+
self._detection_buffer.append(data)
|
|
100
|
+
buffered_data = b"".join(self._detection_buffer)
|
|
101
|
+
|
|
102
|
+
# Try each registered extension's matcher
|
|
103
|
+
if len(buffered_data) >= 8:
|
|
104
|
+
for ext_name, matcher, backend_host, backend_port in _tcp_extensions:
|
|
105
|
+
try:
|
|
106
|
+
if matcher(buffered_data):
|
|
107
|
+
# Switch to TCP proxy mode
|
|
108
|
+
self._detecting = False
|
|
109
|
+
self.transport.pauseProducing()
|
|
110
|
+
|
|
111
|
+
# Create backend connection
|
|
112
|
+
client_factory = ProxyClientFactory()
|
|
113
|
+
client_factory.protocol = TcpProxyClient
|
|
114
|
+
client_factory.server = self
|
|
115
|
+
client_factory.initial_data = buffered_data
|
|
116
|
+
|
|
117
|
+
reactor.connectTCP(backend_host, backend_port, client_factory)
|
|
118
|
+
return
|
|
119
|
+
except Exception as e:
|
|
120
|
+
LOG.debug(f"Error in matcher for {ext_name}: {e}")
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
# No extension claimed the connection
|
|
124
|
+
self._detecting = False
|
|
125
|
+
# Feed buffered data to HTTP handler
|
|
126
|
+
for chunk in self._detection_buffer:
|
|
127
|
+
fn(self, chunk)
|
|
128
|
+
self._detection_buffer = []
|
|
129
|
+
|
|
130
|
+
@patch(HTTPChannel.connectionLost)
|
|
131
|
+
def _patched_connectionLost(fn, self, reason):
|
|
132
|
+
"""Handle connection close."""
|
|
133
|
+
tcp_peer = getattr(self, "_tcp_peer", None)
|
|
134
|
+
if tcp_peer:
|
|
135
|
+
tcp_peer.transport.loseConnection()
|
|
136
|
+
self._tcp_peer = None
|
|
137
|
+
fn(self, reason)
|
|
138
|
+
|
|
139
|
+
# Monkey-patch the set_tcp_peer method onto HTTPChannel
|
|
140
|
+
def set_tcp_peer(self, peer):
|
|
141
|
+
"""Called when backend TCP connection is established."""
|
|
142
|
+
self._tcp_peer = peer
|
|
143
|
+
self.transport.resumeProducing()
|
|
144
|
+
|
|
145
|
+
HTTPChannel.set_tcp_peer = set_tcp_peer
|
|
146
|
+
|
|
147
|
+
_gateway_patched = True
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def register_tcp_extension(
|
|
151
|
+
extension_name: str,
|
|
152
|
+
matcher: callable,
|
|
153
|
+
backend_host: str,
|
|
154
|
+
backend_port: int,
|
|
155
|
+
):
|
|
156
|
+
"""
|
|
157
|
+
Register an extension for TCP connection routing.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
extension_name: Name of the extension
|
|
161
|
+
matcher: Function that takes bytes and returns bool to claim connection
|
|
162
|
+
backend_host: Backend host to route to
|
|
163
|
+
backend_port: Backend port to route to
|
|
164
|
+
"""
|
|
165
|
+
_tcp_extensions.append((extension_name, matcher, backend_host, backend_port))
|
|
166
|
+
LOG.info(
|
|
167
|
+
f"Registered TCP extension {extension_name} -> {backend_host}:{backend_port}"
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def unregister_tcp_extension(extension_name: str):
|
|
172
|
+
"""Unregister an extension from TCP routing."""
|
|
173
|
+
global _tcp_extensions
|
|
174
|
+
_tcp_extensions = [
|
|
175
|
+
(name, matcher, host, port)
|
|
176
|
+
for name, matcher, host, port in _tcp_extensions
|
|
177
|
+
if name != extension_name
|
|
178
|
+
]
|
|
179
|
+
LOG.info(f"Unregistered TCP extension {extension_name}")
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: localstack-extensions-utils
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Utility library for LocalStack Extensions
|
|
5
|
+
Author: LocalStack Team
|
|
6
|
+
Project-URL: Homepage, https://github.com/localstack/localstack-extensions
|
|
7
|
+
Keywords: LocalStack,Extensions,Utils
|
|
8
|
+
Requires-Python: >=3.10
|
|
9
|
+
Description-Content-Type: text/markdown; charset=UTF-8
|
|
10
|
+
Requires-Dist: h2
|
|
11
|
+
Requires-Dist: hpack
|
|
12
|
+
Requires-Dist: httpx
|
|
13
|
+
Requires-Dist: hyperframe
|
|
14
|
+
Requires-Dist: priority
|
|
15
|
+
Requires-Dist: requests
|
|
16
|
+
Requires-Dist: rolo
|
|
17
|
+
Requires-Dist: twisted
|
|
18
|
+
Provides-Extra: dev
|
|
19
|
+
Requires-Dist: boto3; extra == "dev"
|
|
20
|
+
Requires-Dist: build; extra == "dev"
|
|
21
|
+
Requires-Dist: jsonpatch; extra == "dev"
|
|
22
|
+
Requires-Dist: localstack; extra == "dev"
|
|
23
|
+
Requires-Dist: pytest; extra == "dev"
|
|
24
|
+
Requires-Dist: ruff; extra == "dev"
|
|
25
|
+
Provides-Extra: test
|
|
26
|
+
Requires-Dist: pytest>=7.0; extra == "test"
|
|
27
|
+
Requires-Dist: pytest-timeout>=2.0; extra == "test"
|
|
28
|
+
Requires-Dist: localstack; extra == "test"
|
|
29
|
+
Requires-Dist: jsonpatch; extra == "test"
|
|
30
|
+
Requires-Dist: grpcio>=1.60.0; extra == "test"
|
|
31
|
+
Requires-Dist: grpcio-tools>=1.60.0; extra == "test"
|
|
32
|
+
|
|
33
|
+
LocalStack Extensions Utils
|
|
34
|
+
===========================
|
|
35
|
+
|
|
36
|
+
A utility library providing common functionality for building [LocalStack Extensions](https://github.com/localstack/localstack-extensions).
|
|
37
|
+
|
|
38
|
+
## Usage
|
|
39
|
+
|
|
40
|
+
To use this library in your LocalStack extension, add it to the `dependencies` in your extension's `pyproject.toml`:
|
|
41
|
+
|
|
42
|
+
```toml
|
|
43
|
+
[project]
|
|
44
|
+
dependencies = [
|
|
45
|
+
"localstack-extensions-utils",
|
|
46
|
+
]
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Or, to install directly from the GitHub repository:
|
|
50
|
+
|
|
51
|
+
```toml
|
|
52
|
+
[project]
|
|
53
|
+
dependencies = [
|
|
54
|
+
"localstack-extensions-utils @ git+https://github.com/localstack/localstack-extensions.git#subdirectory=utils",
|
|
55
|
+
]
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Then import the utilities in your extension code, for example:
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
from localstack_extensions.utils import ProxiedDockerContainerExtension
|
|
62
|
+
|
|
63
|
+
...
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Dependencies
|
|
67
|
+
|
|
68
|
+
This library requires LocalStack to be installed as it uses various LocalStack utilities for Docker management and networking.
|
|
69
|
+
|
|
70
|
+
## License
|
|
71
|
+
|
|
72
|
+
The code in this repo is available under the Apache 2.0 license.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
localstack_extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
localstack_extensions/utils/__init__.py,sha256=YJUb7XLH4ClD2Axfjj3dbnEVjdrGSfj7Ju2Tp-6lf3o,611
|
|
3
|
+
localstack_extensions/utils/docker.py,sha256=uLzxOg1aqcMlf27gokfeQcVc9mper_sfXDEmphWFnMg,11093
|
|
4
|
+
localstack_extensions/utils/h2_proxy.py,sha256=CK6_GwyFcSqJ7If_gk-lrB-L6UkI8QmY9LgLL0IoN6s,6185
|
|
5
|
+
localstack_extensions/utils/tcp_protocol_router.py,sha256=maWaM_3QfX4ESL1MZFPmBAeDw8cbiFLXqXN9eGnO6zs,6241
|
|
6
|
+
localstack_extensions_utils-0.1.0.dist-info/METADATA,sha256=rgQDBwMOSr1D_OCLK_GnW5n_Bink7p2VU21mPN1pqQM,2061
|
|
7
|
+
localstack_extensions_utils-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
8
|
+
localstack_extensions_utils-0.1.0.dist-info/top_level.txt,sha256=IJKbCHLaXVJ9YF0y8GtxlUc4fsJOW-H50t1-rSE1S6U,22
|
|
9
|
+
localstack_extensions_utils-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
localstack_extensions
|