whoopapi 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
whoopapi/__init__.py ADDED
@@ -0,0 +1,8 @@
1
+ # flake8: noqa
2
+
3
+ from .logging import LOG_CRITICAL, LOG_ERROR, LOG_INFO, LOG_PRETTY, LOG_WARNING
4
+ from .protocol_handlers import RequestHandler, StaticFileHandler, WebsocketHandler
5
+ from .utilities import Application, start_application
6
+ from .wrappers import HttpRequest, HttpResponse
7
+
8
+ # TODO : Document the src
whoopapi/constants.py ADDED
@@ -0,0 +1,319 @@
1
+ WEBSOCKET_ACCEPT_SUFFIX = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
2
+ WEBSOCKET_FRAME_MASK = b"abcd"
3
+ DEFAULT_STRING_ENCODING = "utf-8"
4
+
5
+
6
+ class HttpHeaders:
7
+ ACCEPT = "accept"
8
+ ACCEPT_CH = "accept-ch"
9
+ ACCEPT_CHARSET = "accept-charset"
10
+ ACCEPT_ENCODING = "accept-encoding"
11
+ ACCEPT_LANGUAGE = "accept-language"
12
+ ACCEPT_PATCH = "accept-patch"
13
+ ACCEPT_POST = "accept-post"
14
+ ACCEPT_RANGES = "accept-ranges"
15
+ ACCESS_CONTROL_ALLOW_CREDENTIALS = "access-control-allow-credentials"
16
+ ACCESS_CONTROL_ALLOW_HEADERS = "access-control-allow-headers"
17
+ ACCESS_CONTROL_ALLOW_METHODS = "access-control-allow-methods"
18
+ ACCESS_CONTROL_ALLOW_ORIGIN = "access-control-allow-origin"
19
+ ACCESS_CONTROL_EXPOSE_HEADERS = "access-control-expose-headers"
20
+ ACCESS_CONTROL_MAX_AGE = "access-control-max-age"
21
+ ACCESS_CONTROL_REQUEST_HEADERS = "access-control-request-headers"
22
+ ACCESS_CONTROL_REQUEST_METHOD = "access-control-request-method"
23
+ AGE = "age"
24
+ ALLOW = "allow"
25
+ ALT_SVC = "alt-svc"
26
+ ALT_USED = "alt-used"
27
+ ATTRIBUTION_REPORTING_ELIGIBLE = "attribution-reporting-eligible"
28
+ ATTRIBUTION_REPORTING_REGISTER_SOURCE = "attribution-reporting-register-source"
29
+ ATTRIBUTION_REPORTING_REGISTER_TRIGGER = "attribution-reporting-register-trigger"
30
+ AUTHORIZATION = "authorization"
31
+ CACHE_CONTROL = "cache-control"
32
+ CLEAR_SITE_DATA = "clear-site-data"
33
+ CONNECTION = "connection"
34
+ CONTENT_DIGEST = "content-digest"
35
+ CONTENT_DISPOSITION = "content-disposition"
36
+ CONTENT_DPR = "content-dpr"
37
+ CONTENT_ENCODING = "content-encoding"
38
+ CONTENT_LANGUAGE = "content-language"
39
+ CONTENT_LENGTH = "content-length"
40
+ CONTENT_LOCATION = "content-location"
41
+ CONTENT_RANGE = "content-range"
42
+ CONTENT_SECURITY_POLICY = "content-security-policy"
43
+ CONTENT_SECURITY_POLICY_REPORT_ONLY = "content-security-policy-report-only"
44
+ CONTENT_TYPE = "content-type"
45
+ COOKIE = "cookie"
46
+ CRITICAL_CH = "critical-ch"
47
+ CROSS_ORIGIN_EMBEDDER_POLICY = "cross-origin-embedder-policy"
48
+ CROSS_ORIGIN_OPENER_POLICY = "cross-origin-opener-policy"
49
+ CROSS_ORIGIN_RESOURCE_POLICY = "cross-origin-resource-policy"
50
+ DATE = "date"
51
+ DEVICE_MEMORY = "device-memory"
52
+ DIGEST = "digest"
53
+ DNT = "dnt"
54
+ DOWNLINK = "downlink"
55
+ DPR = "dpr"
56
+ EARLY_DATA = "early-data"
57
+ ECT = "ect"
58
+ ETAG = "etag"
59
+ EXPECT = "expect"
60
+ EXPECT_CT = "expect-ct"
61
+ EXPIRES = "expires"
62
+ FORWARDED = "forwarded"
63
+ FROM = "from"
64
+ HOST = "host"
65
+ IF_MATCH = "if-match"
66
+ IF_MODIFIED_SINCE = "if-modified-since"
67
+ IF_NONE_MATCH = "if-none-match"
68
+ IF_RANGE = "if-range"
69
+ IF_UNMODIFIED_SINCE = "if-unmodified-since"
70
+ KEEP_ALIVE = "keep-alive"
71
+ LAST_MODIFIED = "last-modified"
72
+ LINK = "link"
73
+ LOCATION = "location"
74
+ MAX_FORWARDS = "max-forwards"
75
+ NEL = "nel"
76
+ NO_VARY_SEARCH = "no-vary-search"
77
+ OBSERVE_BROWSING_TOPICS = "observe-browsing-topics"
78
+ ORIGIN = "origin"
79
+ ORIGIN_AGENT_CLUSTER = "origin-agent-cluster"
80
+ PERMISSIONS_POLICY = "permissions-policy"
81
+ PRAGMA = "pragma"
82
+ PRIORITY = "priority"
83
+ PROXY_AUTHENTICATE = "proxy-authenticate"
84
+ PROXY_AUTHORIZATION = "proxy-authorization"
85
+ RANGE = "range"
86
+ REFERER = "referer"
87
+ REFERRER_POLICY = "referrer-policy"
88
+ REPORTING_ENDPOINTS = "reporting-endpoints"
89
+ REPR_DIGEST = "repr-digest"
90
+ RETRY_AFTER = "retry-after"
91
+ RTT = "rtt"
92
+ SAVE_DATA = "save-data"
93
+ SEC_BROWSING_TOPICS = "sec-browsing-topics"
94
+ SEC_CH_PREFERS_COLOR_SCHEME = "sec-ch-prefers-color-scheme"
95
+ SEC_CH_PREFERS_REDUCED_MOTION = "sec-ch-prefers-reduced-motion"
96
+ SEC_CH_PREFERS_REDUCED_TRANSPARENCY = "sec-ch-prefers-reduced-transparency"
97
+ SEC_CH_UA = "sec-ch-ua"
98
+ SEC_CH_UA_ARCH = "sec-ch-ua-arch"
99
+ SEC_CH_UA_BITNESS = "sec-ch-ua-bitness"
100
+ SEC_CH_UA_FULL_VERSION = "sec-ch-ua-full-version"
101
+ SEC_CH_UA_FULL_VERSION_LIST = "sec-ch-ua-full-version-list"
102
+ SEC_CH_UA_MOBILE = "sec-ch-ua-mobile"
103
+ SEC_CH_UA_MODEL = "sec-ch-ua-model"
104
+ SEC_CH_UA_PLATFORM = "sec-ch-ua-platform"
105
+ SEC_CH_UA_PLATFORM_VERSION = "sec-ch-ua-platform-version"
106
+ SEC_FETCH_DEST = "sec-fetch-dest"
107
+ SEC_FETCH_MODE = "sec-fetch-mode"
108
+ SEC_FETCH_SITE = "sec-fetch-site"
109
+ SEC_FETCH_USER = "sec-fetch-user"
110
+ SEC_GPC = "sec-gpc"
111
+ SEC_PURPOSE = "sec-purpose"
112
+ SEC_WEBSOCKET_ACCEPT = "sec-websocket-accept"
113
+ SEC_WEBSOCKET_KEY = "sec-websocket-key"
114
+ SEC_WEBSOCKET_VERSION = "sec-websocket-version"
115
+ SERVER = "server"
116
+ SERVER_TIMING = "server-timing"
117
+ SERVICE_WORKER_NAVIGATION_PRELOAD = "service-worker-navigation-preload"
118
+ SET_COOKIE = "set-cookie"
119
+ SET_LOGIN = "set-login"
120
+ SOURCEMAP = "sourcemap"
121
+ SPECULATION_RULES = "speculation-rules"
122
+ STRICT_TRANSPORT_SECURITY = "strict-transport-security"
123
+ SUPPORTS_LOADING_MODE = "supports-loading-mode"
124
+ TE = "te"
125
+ TIMING_ALLOW_ORIGIN = "timing-allow-origin"
126
+ TK = "tk"
127
+ TRAILER = "trailer"
128
+ TRANSFER_ENCODING = "transfer-encoding"
129
+ UPGRADE = "upgrade"
130
+ UPGRADE_INSECURE_REQUESTS = "upgrade-insecure-requests"
131
+ USER_AGENT = "user-agent"
132
+ VARY = "vary"
133
+ VIA = "via"
134
+ VIEWPORT_WIDTH = "viewport-width"
135
+ WANT_CONTENT_DIGEST = "want-content-digest"
136
+ WANT_DIGEST = "want-digest"
137
+ WANT_REPR_DIGEST = "want-repr-digest"
138
+ WARNING = "warning"
139
+ WIDTH = "width"
140
+ WWW_AUTHENTICATE = "www-authenticate"
141
+ X_CONTENT_TYPE_OPTIONS = "x-content-type-options"
142
+ X_DNS_PREFETCH_CONTROL = "x-dns-prefetch-control"
143
+ X_FORWARDED_FOR = "x-forwarded-for"
144
+ X_FORWARDED_HOST = "x-forwarded-host"
145
+ X_FORWARDED_PROTO = "x-forwarded-proto"
146
+ X_FRAME_OPTIONS = "x-frame-options"
147
+ X_XSS_PROTECTION = "x-xss-protection"
148
+
149
+
150
+ class HttpMethods:
151
+ GET = "get"
152
+ POST = "post"
153
+ PUT = "put"
154
+ DELETE = "delete"
155
+ PATCH = "patch"
156
+
157
+
158
+ class HttpContentTypes:
159
+ APPLICATION_OCTET_STREAM = "application/octet-stream"
160
+ APPLICATION_JSON = "application/json"
161
+ MULTIPART_FORM_DATA = "multipart/form-data"
162
+ TEXT_HTML = "text/html"
163
+ TEXT_PLAIN = "text/plain"
164
+
165
+
166
+ class WebProtocols:
167
+ HTTP = "http"
168
+ WS = "ws"
169
+ HTTPS = "https"
170
+ WSS = "wss"
171
+
172
+
173
+ class HttpStatusCodes_:
174
+ def __init__(self):
175
+ self.C_100 = "100 Continue"
176
+ self.C_101 = "101 Switching Protocols"
177
+ self.C_102 = "102 Processing"
178
+ self.C_200 = "200 OK"
179
+ self.C_201 = "201 Created"
180
+ self.C_202 = "202 Accepted"
181
+ self.C_203 = "203 Non-Authoritative Information"
182
+ self.C_204 = "204 No Content"
183
+ self.C_205 = "205 Reset Content"
184
+ self.C_206 = "206 Partial Content"
185
+ self.C_207 = "207 Multi-Status"
186
+ self.C_208 = "208 Already Reported"
187
+ self.C_226 = "226 IM Used"
188
+ self.C_300 = "300 Multiple Choices"
189
+ self.C_301 = "301 Moved Permanently"
190
+ self.C_302 = "302 Found"
191
+ self.C_303 = "303 See Other"
192
+ self.C_304 = "304 Not Modified"
193
+ self.C_305 = "305 Use Proxy"
194
+ self.C_306 = "306 Reserved"
195
+ self.C_307 = "307 Temporary Redirect"
196
+ self.C_308 = "308 Permanent Redirect"
197
+ self.C_400 = "400 Bad Request"
198
+ self.C_401 = "401 Unauthorized"
199
+ self.C_402 = "402 Payment Required"
200
+ self.C_403 = "403 Forbidden"
201
+ self.C_404 = "404 Not Found"
202
+ self.C_405 = "405 Method Not Allowed"
203
+ self.C_406 = "406 Not Acceptable"
204
+ self.C_407 = "407 Proxy Authentication Required"
205
+ self.C_408 = "408 Request Timeout"
206
+ self.C_409 = "409 Conflict"
207
+ self.C_410 = "410 Gone"
208
+ self.C_411 = "411 Length Required"
209
+ self.C_412 = "412 Precondition Failed"
210
+ self.C_413 = "413 Request Entity Too Large"
211
+ self.C_414 = "414 Request-URI Too Long"
212
+ self.C_415 = "415 Unsupported Media Type"
213
+ self.C_416 = "416 Requested Range Not Satisfiable"
214
+ self.C_417 = "417 Expectation Failed"
215
+ self.C_422 = "422 Unprocessable Entity"
216
+ self.C_423 = "423 Locked"
217
+ self.C_424 = "424 Failed Dependency"
218
+ self.C_426 = "426 Upgrade Required"
219
+ self.C_428 = "428 Precondition Required"
220
+ self.C_429 = "429 Too Many Requests"
221
+ self.C_431 = "431 Request Header Fields Too Large"
222
+ self.C_500 = "500 Internal Server Error"
223
+ self.C_501 = "501 Not Implemented"
224
+ self.C_502 = "502 Bad Gateway"
225
+ self.C_503 = "503 Service Unavailable"
226
+ self.C_504 = "504 Gateway Timeout"
227
+ self.C_505 = "505 HTTP Version Not Supported"
228
+ self.C_506 = "506 Variant Also Negotiates (Experimental)"
229
+ self.C_507 = "507 Insufficient Storage"
230
+ self.C_508 = "508 Loop Detected"
231
+ self.C_510 = "510 Not Extended"
232
+ self.C_511 = "511 Network Authentication Required"
233
+
234
+
235
+ HttpStatusCodes = HttpStatusCodes_()
236
+
237
+
238
+ def get_http_status_code_message(code: int):
239
+ for item in HttpStatusCodes.__dict__.values():
240
+ if isinstance(item, str) and item.startswith(f"{code} "):
241
+ return item[item.index(" ") + 1]
242
+
243
+ return "Unknown"
244
+
245
+
246
+ def get_default_headers():
247
+ return {"Server": "WhoopAPI"}
248
+
249
+
250
+ def get_content_type_from_filename(filename: str):
251
+ extension = filename.split(".")[-1] if "." in filename else None
252
+ content_type = None
253
+
254
+ if extension:
255
+ content_type = {
256
+ "aac": "audio/aac",
257
+ "abw": "application/x-abiword",
258
+ "arc": "application/octet-stream",
259
+ "avi": "video/x-msvideo",
260
+ "azw": "application/vnd.amazon.ebook",
261
+ "bin": "application/octet-stream",
262
+ "bz": "application/x-bzip",
263
+ "bz2": "application/x-bzip2",
264
+ "csh": "application/x-csh",
265
+ "css": "text/css",
266
+ "csv": "text/csv",
267
+ "doc": "application/msword",
268
+ "epub": "application/epub+zip",
269
+ "gif": "image/gif",
270
+ "htm": "text/html",
271
+ "html": "text/html",
272
+ "ico": "image/x-icon",
273
+ "ics": "text/calendar",
274
+ "jar": "application/java-archive",
275
+ "jpeg": "image.jpeg",
276
+ "jpg": "image/jpeg",
277
+ "js": "application/javascript",
278
+ "json": "application/json",
279
+ "mid": "audio/midi",
280
+ "midi": "audio/midi",
281
+ "mpeg": "video/mpeg",
282
+ "mpkg": "application/vnd.apple.installer+xml",
283
+ "odp": "application/vnd.oasis.opendocument.presentation",
284
+ "ods": "application/vnd.oasis.opendocument.spreadsheet",
285
+ "odt": "application/vnd.oasis.opendocument.text",
286
+ "oga": "audio/ogg",
287
+ "ogv": "video/ogg",
288
+ "ogx": "application/ogg",
289
+ "pdf": "application/pdf",
290
+ "ppt": "application/vnd.ms-powerpoint",
291
+ "rar": "application/x-rar-compressed",
292
+ "rtf": "application/rtf",
293
+ "sh": "application/x-sh",
294
+ "svg": "image/svg+xml",
295
+ "swf": "application/x-shockwave-flash",
296
+ "tar": "application/x-tar",
297
+ "tif": "image/tiff",
298
+ "tiff": "image/tiff",
299
+ "ttf": "font/ttf",
300
+ "vsd": "application/vnd.visio",
301
+ "wav": "audio/x-wav",
302
+ "weba": "audio/webm",
303
+ "webm": "video/webm",
304
+ "webp": "image/webp",
305
+ "woff": "font/woff",
306
+ "woff2": "font/woff2",
307
+ "xhtml": "application/xhtml+xml",
308
+ "xls": "application/vnd.ms-excel",
309
+ "xml": "application/xml",
310
+ "xul": "application/vnd.mozilla.xul+xml",
311
+ "zip": "application/zip",
312
+ "3gp": "video/3gpp",
313
+ # "audio/3gpp if it doesn't contain video": "",
314
+ "3g2": "video/3gpp2",
315
+ # "audio/3gpp2 if it doesn't contain video": "",
316
+ "7z": "application/x-7z-compressed",
317
+ }.get(extension, None)
318
+
319
+ return content_type if content_type else HttpContentTypes.APPLICATION_OCTET_STREAM
whoopapi/logging.py ADDED
@@ -0,0 +1,21 @@
1
+ import pprint
2
+
3
+
4
+ def LOG_INFO(*args):
5
+ return print(*args)
6
+
7
+
8
+ def LOG_WARNING(*args):
9
+ return print(*args)
10
+
11
+
12
+ def LOG_ERROR(*args):
13
+ return print(*args)
14
+
15
+
16
+ def LOG_CRITICAL(*args):
17
+ return print(*args)
18
+
19
+
20
+ def LOG_PRETTY(*args):
21
+ return pprint.pprint(*args)
File without changes
@@ -0,0 +1,251 @@
1
+ import gzip
2
+ import json
3
+ import re
4
+ import zlib
5
+ from email.parser import BytesParser
6
+ from email.policy import HTTP
7
+
8
+ import brotli
9
+
10
+ from ..constants import HttpContentTypes, HttpHeaders
11
+
12
+
13
+ def parse_multipart_with_regex(boundary: str, body: bytes, charset: str = "utf-8"):
14
+ """
15
+ Parse multipart/form-data using regular expressions only.
16
+ Note: This is less reliable than the email library version.
17
+ Args:
18
+ boundary: The multipart boundary string
19
+ body: The raw request body bytes
20
+ charset: Character encoding for text fields
21
+ Returns:
22
+ Dictionary with form fields and file data
23
+ """
24
+ # Convert boundary to bytes and prepare pattern
25
+ boundary_bytes = boundary.encode("ascii")
26
+ pattern = re.compile(
27
+ b"--"
28
+ + re.escape(boundary_bytes)
29
+ + b"\r\n"
30
+ + b"((?:.|\r\n)*?)\r\n\r\n" # Headers
31
+ + b"((?:.|\r\n)*?)" # Content
32
+ + b"(?=\r\n--"
33
+ + re.escape(boundary_bytes)
34
+ + b"(?:--)?\r\n)",
35
+ re.DOTALL,
36
+ )
37
+
38
+ form_data = {}
39
+ files = {}
40
+
41
+ for match in pattern.finditer(body):
42
+ headers_part, content = match.groups()
43
+
44
+ # Parse headers
45
+ headers = {}
46
+ for header_line in headers_part.split(b"\r\n"):
47
+ if b":" in header_line:
48
+ name, value = header_line.split(b":", 1)
49
+ headers[name.strip().lower()] = value.strip()
50
+
51
+ # Get field name
52
+ content_disposition = headers.get(b"content-disposition", b"")
53
+ name_match = re.search(rb'name="([^"]+)"', content_disposition)
54
+ if not name_match:
55
+ continue
56
+
57
+ name = name_match.group(1).decode("ascii")
58
+
59
+ # Check for filename
60
+ filename_match = re.search(rb'filename="([^"]+)"', content_disposition)
61
+ if filename_match:
62
+ # File upload
63
+ files[name] = {
64
+ "filename": filename_match.group(1).decode("ascii"),
65
+ "content_type": headers.get(
66
+ b"content-type", b"application/octet-stream"
67
+ ).decode("ascii"),
68
+ "data": content,
69
+ "size": len(content),
70
+ }
71
+ else:
72
+ # Regular form field
73
+ try:
74
+ form_data[name] = content.decode(charset)
75
+ except UnicodeDecodeError:
76
+ form_data[name] = content
77
+
78
+ return form_data, files
79
+
80
+
81
+ def parse_multipart_enhanced(
82
+ content_type: str, boundary: str, body: bytes
83
+ ) -> tuple[dict, dict]:
84
+ """
85
+ Parse multipart/form-data body into a dictionary of form fields.
86
+ Uses email library
87
+ Args:
88
+ content_type: The Content-Type header value
89
+ boundary: The boundary value from content type
90
+ body: The raw request body bytes
91
+ Returns:
92
+ Dictionary with field names as keys and field values (str for text, bytes for files)
93
+ """
94
+ combined_content_type = f"{content_type}; boundary={boundary}"
95
+ headers = {"Content-Type": combined_content_type, "Content-Length": str(len(body))}
96
+ msg = BytesParser(policy=HTTP).parsebytes(
97
+ b"\r\n".join([f"{k}: {v}".encode("ascii") for k, v in headers.items()])
98
+ + b"\r\n\r\n"
99
+ + body
100
+ )
101
+
102
+ form_data = {}
103
+ files = {}
104
+
105
+ for part in msg.iter_parts():
106
+ disposition = part.get("Content-Disposition", "")
107
+ name_match = re.search(r'name="([^"]+)"', disposition)
108
+ if not name_match:
109
+ continue
110
+
111
+ name = name_match.group(1)
112
+ filename_match = re.search(r'filename="([^"]+)"', disposition)
113
+ payload = part.get_payload(decode=True)
114
+
115
+ if filename_match:
116
+ # Handle file upload
117
+ file_info = {
118
+ "filename": filename_match.group(1),
119
+ "content_type": part.get_content_type(),
120
+ "data": payload,
121
+ "size": len(payload),
122
+ }
123
+
124
+ # Handle multiple files with same name
125
+ if name in files:
126
+ if isinstance(files[name], list):
127
+ files[name].append(file_info)
128
+ else:
129
+ files[name] = [files[name], file_info]
130
+ else:
131
+ files[name] = file_info
132
+ else:
133
+ # Handle regular form field
134
+ try:
135
+ form_data[name] = payload.decode("utf-8")
136
+ except UnicodeDecodeError:
137
+ form_data[name] = payload
138
+
139
+ return form_data, files
140
+
141
+
142
+ def parse_json(content_type: str, data: bytes):
143
+ return json.loads(data)
144
+
145
+
146
+ def handle_compression(headers: dict, body: bytes) -> tuple[dict, bytes]:
147
+ """
148
+ Detects compression and decompresses HTTP request body.
149
+ Returns tuple of (modified_headers, decompressed_body).
150
+
151
+ Args:
152
+ headers: Dictionary of HTTP request headers (case-insensitive keys)
153
+ body: Raw request body as bytes
154
+
155
+ Returns:
156
+ Tuple of (headers with removed content-encoding, decompressed body)
157
+ """
158
+ decompressed_body = body
159
+
160
+ if "content-encoding" in headers:
161
+ encodings = [e.strip().lower() for e in headers["content-encoding"].split(",")]
162
+
163
+ # Apply decompressions in reverse order (as per RFC 7231)
164
+ for encoding in reversed(encodings):
165
+ try:
166
+ if encoding == "gzip" or encoding == "x-gzip":
167
+ decompressed_body = gzip.decompress(decompressed_body)
168
+
169
+ elif encoding == "deflate":
170
+ try:
171
+ decompressed_body = zlib.decompress(decompressed_body)
172
+ except zlib.error:
173
+ # Some servers send raw deflate without zlib header
174
+ decompressed_body = zlib.decompress(
175
+ decompressed_body, -zlib.MAX_WBITS
176
+ )
177
+
178
+ elif encoding == "br":
179
+ decompressed_body = brotli.decompress(decompressed_body)
180
+
181
+ elif encoding == "compress" or encoding == "x-compress":
182
+ raise NotImplementedError("compress encoding not supported")
183
+
184
+ except Exception as e:
185
+ raise ValueError(f"Decompression failed for {encoding}: {str(e)}")
186
+
187
+ # Remove content-encoding header
188
+ new_headers = headers.copy()
189
+ del new_headers["content-encoding"]
190
+ return new_headers, decompressed_body
191
+
192
+ if len(body) >= 2:
193
+ try:
194
+ # Check for gzip magic number (1f 8b)
195
+ if body[:2] == b"\x1f\x8b":
196
+ decompressed_body = gzip.decompress(body)
197
+ return headers, decompressed_body
198
+
199
+ # Check for zlib header (78 01, 78 9C, 78 DA)
200
+ elif body[0] == 0x78 and body[1] in {0x01, 0x9C, 0xDA}:
201
+ decompressed_body = zlib.decompress(body)
202
+ return headers, decompressed_body
203
+
204
+ # Check for brotli (starts with CE 2F or 1E)
205
+ elif len(body) > 3 and body[0] in {0xCE, 0x1E} and body[1] == 0x2F:
206
+ decompressed_body = brotli.decompress(body)
207
+ return headers, decompressed_body
208
+
209
+ except Exception as e:
210
+ raise ValueError(
211
+ f"Auto-detected compression but decompression failed: {str(e)}"
212
+ )
213
+
214
+ return headers, body
215
+
216
+
217
+ def parse_body(headers: dict, header_params: dict, data: bytes):
218
+ headers, data = handle_compression(headers=headers, body=data)
219
+
220
+ content_type = headers.get(HttpHeaders.CONTENT_TYPE, HttpContentTypes.TEXT_PLAIN)
221
+
222
+ json_data = None
223
+ form_data = None
224
+ files = None
225
+ text = None
226
+
227
+ if content_type in [
228
+ HttpContentTypes.TEXT_PLAIN,
229
+ HttpContentTypes.TEXT_HTML,
230
+ ]:
231
+ text = data.decode(encoding="utf8")
232
+
233
+ if content_type == HttpContentTypes.APPLICATION_JSON:
234
+ json_data = parse_json(content_type=content_type, data=data)
235
+
236
+ elif content_type == HttpContentTypes.MULTIPART_FORM_DATA:
237
+ boundary = header_params.get(HttpHeaders.CONTENT_TYPE, {}).get("boundary", "")
238
+ if not boundary:
239
+ raise Exception("Invalid multipart boundary.")
240
+
241
+ form_data, files = parse_multipart_enhanced(
242
+ content_type=content_type, boundary=boundary, body=data
243
+ )
244
+
245
+ return {
246
+ "json": json_data,
247
+ "form_data": form_data,
248
+ "files": files,
249
+ "text": text,
250
+ "raw": data,
251
+ }