sf-veritas 0.11.10__cp314-cp314-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sf_veritas/__init__.py +46 -0
- sf_veritas/_auto_preload.py +73 -0
- sf_veritas/_sfconfig.c +162 -0
- sf_veritas/_sfconfig.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfcrashhandler.c +267 -0
- sf_veritas/_sfcrashhandler.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastlog.c +953 -0
- sf_veritas/_sffastlog.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastnet.c +994 -0
- sf_veritas/_sffastnet.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastnetworkrequest.c +727 -0
- sf_veritas/_sffastnetworkrequest.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffuncspan.c +2791 -0
- sf_veritas/_sffuncspan.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffuncspan_config.c +730 -0
- sf_veritas/_sffuncspan_config.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfheadercheck.c +341 -0
- sf_veritas/_sfheadercheck.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfnetworkhop.c +1454 -0
- sf_veritas/_sfnetworkhop.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfservice.c +1223 -0
- sf_veritas/_sfservice.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfteepreload.c +6227 -0
- sf_veritas/app_config.py +57 -0
- sf_veritas/cli.py +336 -0
- sf_veritas/constants.py +10 -0
- sf_veritas/custom_excepthook.py +304 -0
- sf_veritas/custom_log_handler.py +146 -0
- sf_veritas/custom_output_wrapper.py +153 -0
- sf_veritas/custom_print.py +153 -0
- sf_veritas/django_app.py +5 -0
- sf_veritas/env_vars.py +186 -0
- sf_veritas/exception_handling_middleware.py +18 -0
- sf_veritas/exception_metaclass.py +69 -0
- sf_veritas/fast_frame_info.py +116 -0
- sf_veritas/fast_network_hop.py +293 -0
- sf_veritas/frame_tools.py +112 -0
- sf_veritas/funcspan_config_loader.py +693 -0
- sf_veritas/function_span_profiler.py +1313 -0
- sf_veritas/get_preload_path.py +34 -0
- sf_veritas/import_hook.py +62 -0
- sf_veritas/infra_details/__init__.py +3 -0
- sf_veritas/infra_details/get_infra_details.py +24 -0
- sf_veritas/infra_details/kubernetes/__init__.py +3 -0
- sf_veritas/infra_details/kubernetes/get_cluster_name.py +147 -0
- sf_veritas/infra_details/kubernetes/get_details.py +7 -0
- sf_veritas/infra_details/running_on/__init__.py +17 -0
- sf_veritas/infra_details/running_on/kubernetes.py +11 -0
- sf_veritas/interceptors.py +543 -0
- sf_veritas/libsfnettee.so +0 -0
- sf_veritas/local_env_detect.py +118 -0
- sf_veritas/package_metadata.py +6 -0
- sf_veritas/patches/__init__.py +0 -0
- sf_veritas/patches/_patch_tracker.py +74 -0
- sf_veritas/patches/concurrent_futures.py +19 -0
- sf_veritas/patches/constants.py +1 -0
- sf_veritas/patches/exceptions.py +82 -0
- sf_veritas/patches/multiprocessing.py +32 -0
- sf_veritas/patches/network_libraries/__init__.py +99 -0
- sf_veritas/patches/network_libraries/aiohttp.py +294 -0
- sf_veritas/patches/network_libraries/curl_cffi.py +363 -0
- sf_veritas/patches/network_libraries/http_client.py +670 -0
- sf_veritas/patches/network_libraries/httpcore.py +580 -0
- sf_veritas/patches/network_libraries/httplib2.py +315 -0
- sf_veritas/patches/network_libraries/httpx.py +557 -0
- sf_veritas/patches/network_libraries/niquests.py +218 -0
- sf_veritas/patches/network_libraries/pycurl.py +399 -0
- sf_veritas/patches/network_libraries/requests.py +595 -0
- sf_veritas/patches/network_libraries/ssl_socket.py +822 -0
- sf_veritas/patches/network_libraries/tornado.py +360 -0
- sf_veritas/patches/network_libraries/treq.py +270 -0
- sf_veritas/patches/network_libraries/urllib_request.py +483 -0
- sf_veritas/patches/network_libraries/utils.py +598 -0
- sf_veritas/patches/os.py +17 -0
- sf_veritas/patches/threading.py +231 -0
- sf_veritas/patches/web_frameworks/__init__.py +54 -0
- sf_veritas/patches/web_frameworks/aiohttp.py +798 -0
- sf_veritas/patches/web_frameworks/async_websocket_consumer.py +337 -0
- sf_veritas/patches/web_frameworks/blacksheep.py +532 -0
- sf_veritas/patches/web_frameworks/bottle.py +513 -0
- sf_veritas/patches/web_frameworks/cherrypy.py +683 -0
- sf_veritas/patches/web_frameworks/cors_utils.py +122 -0
- sf_veritas/patches/web_frameworks/django.py +963 -0
- sf_veritas/patches/web_frameworks/eve.py +401 -0
- sf_veritas/patches/web_frameworks/falcon.py +931 -0
- sf_veritas/patches/web_frameworks/fastapi.py +738 -0
- sf_veritas/patches/web_frameworks/flask.py +526 -0
- sf_veritas/patches/web_frameworks/klein.py +501 -0
- sf_veritas/patches/web_frameworks/litestar.py +616 -0
- sf_veritas/patches/web_frameworks/pyramid.py +440 -0
- sf_veritas/patches/web_frameworks/quart.py +841 -0
- sf_veritas/patches/web_frameworks/robyn.py +708 -0
- sf_veritas/patches/web_frameworks/sanic.py +874 -0
- sf_veritas/patches/web_frameworks/starlette.py +742 -0
- sf_veritas/patches/web_frameworks/strawberry.py +1446 -0
- sf_veritas/patches/web_frameworks/tornado.py +485 -0
- sf_veritas/patches/web_frameworks/utils.py +170 -0
- sf_veritas/print_override.py +13 -0
- sf_veritas/regular_data_transmitter.py +444 -0
- sf_veritas/request_interceptor.py +401 -0
- sf_veritas/request_utils.py +550 -0
- sf_veritas/segfault_handler.py +116 -0
- sf_veritas/server_status.py +1 -0
- sf_veritas/shutdown_flag.py +11 -0
- sf_veritas/subprocess_startup.py +3 -0
- sf_veritas/test_cli.py +145 -0
- sf_veritas/thread_local.py +1319 -0
- sf_veritas/timeutil.py +114 -0
- sf_veritas/transmit_exception_to_sailfish.py +28 -0
- sf_veritas/transmitter.py +132 -0
- sf_veritas/types.py +47 -0
- sf_veritas/unified_interceptor.py +1678 -0
- sf_veritas/utils.py +39 -0
- sf_veritas-0.11.10.dist-info/METADATA +97 -0
- sf_veritas-0.11.10.dist-info/RECORD +141 -0
- sf_veritas-0.11.10.dist-info/WHEEL +5 -0
- sf_veritas-0.11.10.dist-info/entry_points.txt +2 -0
- sf_veritas-0.11.10.dist-info/top_level.txt +1 -0
- sf_veritas.libs/libbrotlicommon-6ce2a53c.so.1.0.6 +0 -0
- sf_veritas.libs/libbrotlidec-811d1be3.so.1.0.6 +0 -0
- sf_veritas.libs/libcom_err-730ca923.so.2.1 +0 -0
- sf_veritas.libs/libcrypt-52aca757.so.1.1.0 +0 -0
- sf_veritas.libs/libcrypto-bdaed0ea.so.1.1.1k +0 -0
- sf_veritas.libs/libcurl-eaa3cf66.so.4.5.0 +0 -0
- sf_veritas.libs/libgssapi_krb5-323bbd21.so.2.2 +0 -0
- sf_veritas.libs/libidn2-2f4a5893.so.0.3.6 +0 -0
- sf_veritas.libs/libk5crypto-9a74ff38.so.3.1 +0 -0
- sf_veritas.libs/libkeyutils-2777d33d.so.1.6 +0 -0
- sf_veritas.libs/libkrb5-a55300e8.so.3.3 +0 -0
- sf_veritas.libs/libkrb5support-e6594cfc.so.0.1 +0 -0
- sf_veritas.libs/liblber-2-d20824ef.4.so.2.10.9 +0 -0
- sf_veritas.libs/libldap-2-cea2a960.4.so.2.10.9 +0 -0
- sf_veritas.libs/libnghttp2-39367a22.so.14.17.0 +0 -0
- sf_veritas.libs/libpcre2-8-516f4c9d.so.0.7.1 +0 -0
- sf_veritas.libs/libpsl-99becdd3.so.5.3.1 +0 -0
- sf_veritas.libs/libsasl2-7de4d792.so.3.0.0 +0 -0
- sf_veritas.libs/libselinux-d0805dcb.so.1 +0 -0
- sf_veritas.libs/libssh-c11d285b.so.4.8.7 +0 -0
- sf_veritas.libs/libssl-60250281.so.1.1.1k +0 -0
- sf_veritas.libs/libunistring-05abdd40.so.2.1.0 +0 -0
- sf_veritas.libs/libuuid-95b83d40.so.1.3.0 +0 -0
|
@@ -0,0 +1,1319 @@
|
|
|
1
|
+
import builtins
|
|
2
|
+
import ctypes
|
|
3
|
+
import fnmatch
|
|
4
|
+
import functools
|
|
5
|
+
import os
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
import traceback
|
|
9
|
+
import uuid
|
|
10
|
+
from contextlib import contextmanager
|
|
11
|
+
from contextvars import ContextVar
|
|
12
|
+
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
|
13
|
+
from uuid import UUID
|
|
14
|
+
|
|
15
|
+
from . import app_config
|
|
16
|
+
from .constants import (
|
|
17
|
+
FUNCSPAN_OVERRIDE_HEADER,
|
|
18
|
+
NONSESSION_APPLOGS,
|
|
19
|
+
PARENT_SESSION_ID_HEADER,
|
|
20
|
+
SAILFISH_TRACING_HEADER,
|
|
21
|
+
)
|
|
22
|
+
from .env_vars import SF_DEBUG
|
|
23
|
+
|
|
24
|
+
# Import C extension for function span tracking (if available)
|
|
25
|
+
try:
|
|
26
|
+
from . import _sffuncspan
|
|
27
|
+
|
|
28
|
+
_HAS_FUNCSPAN_NATIVE = True
|
|
29
|
+
except (ImportError, AttributeError):
|
|
30
|
+
_HAS_FUNCSPAN_NATIVE = False
|
|
31
|
+
|
|
32
|
+
# Check if LD_PRELOAD is active (cached for performance)
|
|
33
|
+
_ld_preload_active: Optional[bool] = None
|
|
34
|
+
|
|
35
|
+
# Cache SF_DEBUG flag at module load to avoid repeated checks in hot paths
|
|
36
|
+
_SF_DEBUG_ENABLED = False
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def is_ld_preload_active() -> bool:
|
|
40
|
+
"""
|
|
41
|
+
Check if LD_PRELOAD with _sfteepreload is active.
|
|
42
|
+
|
|
43
|
+
When LD_PRELOAD is active, the C extension handles UUID generation
|
|
44
|
+
and appending to X-Sf3-Rid headers (much faster than Python).
|
|
45
|
+
|
|
46
|
+
Returns True if:
|
|
47
|
+
- LD_PRELOAD env var contains 'libsfnettee.so'
|
|
48
|
+
- OR SF_TEEPRELOAD_ACTIVE env var is set to '1'
|
|
49
|
+
|
|
50
|
+
This is cached on first call for performance.
|
|
51
|
+
"""
|
|
52
|
+
global _ld_preload_active, _SF_DEBUG_ENABLED
|
|
53
|
+
|
|
54
|
+
if _ld_preload_active is not None:
|
|
55
|
+
return _ld_preload_active
|
|
56
|
+
|
|
57
|
+
# Check if LD_PRELOAD contains our library
|
|
58
|
+
ld_preload = os.getenv("LD_PRELOAD", "")
|
|
59
|
+
if "libsfnettee.so" in ld_preload:
|
|
60
|
+
_ld_preload_active = True
|
|
61
|
+
_SF_DEBUG_ENABLED = SF_DEBUG and app_config._interceptors_initialized
|
|
62
|
+
if _SF_DEBUG_ENABLED:
|
|
63
|
+
print(f"[thread_local] LD_PRELOAD active: {ld_preload}", log=False)
|
|
64
|
+
return True
|
|
65
|
+
|
|
66
|
+
# Check explicit activation flag (set by LD_PRELOAD library itself)
|
|
67
|
+
if os.getenv("SF_TEEPRELOAD_ACTIVE") == "1":
|
|
68
|
+
_ld_preload_active = True
|
|
69
|
+
_SF_DEBUG_ENABLED = SF_DEBUG and app_config._interceptors_initialized
|
|
70
|
+
if _SF_DEBUG_ENABLED:
|
|
71
|
+
print("[thread_local] SF_TEEPRELOAD_ACTIVE=1", log=False)
|
|
72
|
+
return True
|
|
73
|
+
|
|
74
|
+
_ld_preload_active = False
|
|
75
|
+
_SF_DEBUG_ENABLED = SF_DEBUG and app_config._interceptors_initialized
|
|
76
|
+
return False
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
# Eager initialization at module load for C TLS function pointer
|
|
80
|
+
_sf_tls_setter = None
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _init_c_tls_setter():
|
|
84
|
+
"""Initialize C TLS setter at module load time to avoid ctypes.CDLL overhead in hot path."""
|
|
85
|
+
global _sf_tls_setter
|
|
86
|
+
try:
|
|
87
|
+
# Use the main process (LD_PRELOAD library is in the global namespace)
|
|
88
|
+
_lib = ctypes.CDLL(None)
|
|
89
|
+
_fn = _lib.sf_set_parent_trace_id_tls
|
|
90
|
+
_fn.argtypes = [ctypes.c_char_p]
|
|
91
|
+
_fn.restype = None
|
|
92
|
+
_sf_tls_setter = _fn
|
|
93
|
+
except Exception:
|
|
94
|
+
_sf_tls_setter = False # don't retry every call
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
# Initialize at module load time (moves expensive CDLL call out of hot path)
|
|
98
|
+
_init_c_tls_setter()
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _set_c_tls_parent_trace_id(parent: str) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Set parent trace ID in C TLS for ultra-fast access by LD_PRELOAD hooks.
|
|
104
|
+
|
|
105
|
+
This avoids Python lookups in the C extension - the C hooks can read
|
|
106
|
+
the parent PRID directly from TLS with a single memory access.
|
|
107
|
+
|
|
108
|
+
CRITICAL: Must call clear_c_tls_parent_trace_id() at end of request to prevent stale data!
|
|
109
|
+
|
|
110
|
+
OPTIMIZED: Disabled when LD_PRELOAD active - C code reads from ContextVar directly (faster).
|
|
111
|
+
C function pointer initialized at module load time (not on first call).
|
|
112
|
+
"""
|
|
113
|
+
# PERFORMANCE: Skip TLS call when LD_PRELOAD active - C reads from ContextVar/shared registry
|
|
114
|
+
# This eliminates: string encoding (expensive!), thread-local attribute setting, ctypes overhead
|
|
115
|
+
if not _ld_preload_active and _sf_tls_setter:
|
|
116
|
+
# Keep bytes alive for the request lifetime to keep C pointer valid
|
|
117
|
+
b = parent.encode("ascii", "ignore")
|
|
118
|
+
_cached_outbound_headers_tls._tls_parent_prid_bytes = b # anchor
|
|
119
|
+
_sf_tls_setter(b)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def clear_c_tls_parent_trace_id() -> None:
|
|
123
|
+
"""
|
|
124
|
+
Clear parent trace ID from C TLS at end of request.
|
|
125
|
+
|
|
126
|
+
CRITICAL: Prevents stale data when threads are reused (e.g., thread pools).
|
|
127
|
+
Must be called at the end of EVERY request that set the C TLS.
|
|
128
|
+
|
|
129
|
+
OPTIMIZED: Disabled when LD_PRELOAD active - no TLS to clear.
|
|
130
|
+
"""
|
|
131
|
+
# PERFORMANCE: Skip when LD_PRELOAD active - nothing was set in TLS
|
|
132
|
+
if not _ld_preload_active and _sf_tls_setter and _sf_tls_setter is not False:
|
|
133
|
+
try:
|
|
134
|
+
# Set to NULL to clear
|
|
135
|
+
_sf_tls_setter(None)
|
|
136
|
+
# Clear the anchored bytes
|
|
137
|
+
if hasattr(_cached_outbound_headers_tls, "_tls_parent_prid_bytes"):
|
|
138
|
+
delattr(_cached_outbound_headers_tls, "_tls_parent_prid_bytes")
|
|
139
|
+
except Exception:
|
|
140
|
+
pass # Ignore errors during cleanup
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def clear_outbound_header_base() -> None:
|
|
144
|
+
"""
|
|
145
|
+
Clear outbound header base from ContextVar at end of request.
|
|
146
|
+
|
|
147
|
+
CRITICAL: Prevents stale X-Sf4-Prid data from persisting across requests.
|
|
148
|
+
Must be called at the end of EVERY request that set the outbound header base.
|
|
149
|
+
|
|
150
|
+
This ensures fresh header generation for each request with proper isolation.
|
|
151
|
+
"""
|
|
152
|
+
try:
|
|
153
|
+
outbound_header_base_ctx.set(None)
|
|
154
|
+
if _SF_DEBUG_ENABLED:
|
|
155
|
+
print(
|
|
156
|
+
"[clear_outbound_header_base] Cleared outbound_header_base_ctx ContextVar",
|
|
157
|
+
log=False,
|
|
158
|
+
)
|
|
159
|
+
except Exception as e:
|
|
160
|
+
# Don't let cleanup errors break the app
|
|
161
|
+
if _SF_DEBUG_ENABLED:
|
|
162
|
+
print(
|
|
163
|
+
f"[clear_outbound_header_base] ⚠️ Error during cleanup: {e}", log=False
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def clear_trace_id() -> None:
|
|
168
|
+
"""
|
|
169
|
+
Clear trace_id from ContextVar at end of request.
|
|
170
|
+
|
|
171
|
+
CRITICAL: Ensures fresh trace_id generation for requests without incoming X-Sf3-Rid header.
|
|
172
|
+
Must be called at the end of EVERY request that didn't have an incoming trace header.
|
|
173
|
+
|
|
174
|
+
Without this, get_or_set_sf_trace_id() reuses the trace_id from the previous request,
|
|
175
|
+
causing X-Sf4-Prid to remain constant across multiple requests (same parent_trace_id).
|
|
176
|
+
"""
|
|
177
|
+
try:
|
|
178
|
+
trace_id_ctx.set(None)
|
|
179
|
+
if _SF_DEBUG_ENABLED:
|
|
180
|
+
print("[clear_trace_id] Cleared trace_id_ctx ContextVar", log=False)
|
|
181
|
+
except Exception as e:
|
|
182
|
+
# Don't let cleanup errors break the app
|
|
183
|
+
if _SF_DEBUG_ENABLED:
|
|
184
|
+
print(f"[clear_trace_id] ⚠️ Error during cleanup: {e}", log=False)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
# Define context variables
|
|
188
|
+
trace_id_ctx = ContextVar("trace_id", default=None)
|
|
189
|
+
handled_exceptions_ctx = ContextVar("handled_exceptions", default=set())
|
|
190
|
+
reentrancy_guard_logging_active_ctx = ContextVar(
|
|
191
|
+
"reentrancy_guard_logging_active", default=False
|
|
192
|
+
)
|
|
193
|
+
reentrancy_guard_logging_preactive_ctx = ContextVar(
|
|
194
|
+
"reentrancy_guard_logging_preactive", default=False
|
|
195
|
+
)
|
|
196
|
+
reentrancy_guard_print_active_ctx = ContextVar(
|
|
197
|
+
"reentrancy_guard_print_active", default=False
|
|
198
|
+
)
|
|
199
|
+
reentrancy_guard_print_preactive_ctx = ContextVar(
|
|
200
|
+
"reentrancy_guard_print_preactive", default=False
|
|
201
|
+
)
|
|
202
|
+
reentrancy_guard_exception_active_ctx = ContextVar(
|
|
203
|
+
"reentrancy_guard_exception_active", default=False
|
|
204
|
+
)
|
|
205
|
+
reentrancy_guard_exception_preactive_ctx = ContextVar(
|
|
206
|
+
"reentrancy_guard_exception_preactive", default=False
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# Suppressors
|
|
210
|
+
suppress_network_recording_ctx = ContextVar("suppress_network_recording", default=False)
|
|
211
|
+
suppress_log_output_ctx = ContextVar("suppress_log_output", default=False)
|
|
212
|
+
|
|
213
|
+
# Current request path for route-based suppression
|
|
214
|
+
current_request_path_ctx = ContextVar("current_request_path", default=None)
|
|
215
|
+
|
|
216
|
+
# Function span capture override (for header propagation)
|
|
217
|
+
funcspan_override_ctx = ContextVar("funcspan_override", default=None)
|
|
218
|
+
|
|
219
|
+
# Current function span ID (synced from C profiler for async-safety)
|
|
220
|
+
# Updated by C profiler on every span push/pop to ensure async request isolation
|
|
221
|
+
current_span_id_ctx = ContextVar("current_span_id", default=None)
|
|
222
|
+
|
|
223
|
+
# Outbound header base (for ultra-fast header injection with cross-thread support)
|
|
224
|
+
outbound_header_base_ctx = ContextVar("outbound_header_base", default=None)
|
|
225
|
+
|
|
226
|
+
reentrancy_guard_sys_stdout_active_ctx = ContextVar(
|
|
227
|
+
"reentrancy_guard_sys_stdout_active", default=False
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
# Thread-local storage as a fallback
|
|
231
|
+
_thread_locals = threading.local()
|
|
232
|
+
|
|
233
|
+
_shared_trace_registry = {}
|
|
234
|
+
_shared_trace_registry_lock = threading.RLock()
|
|
235
|
+
|
|
236
|
+
# Shared registry for outbound header base (cross-thread support, same pattern as trace_id)
|
|
237
|
+
_shared_outbound_header_base_registry = {}
|
|
238
|
+
_shared_outbound_header_base_lock = threading.RLock()
|
|
239
|
+
|
|
240
|
+
# ULTRA-FAST: Cached headers dict in thread-local storage (NO LOCK, ~10-20ns access)
|
|
241
|
+
# This is the fully-built headers dict, ready to inject (no dict building overhead)
|
|
242
|
+
_cached_outbound_headers_tls = threading.local()
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
# ================================
|
|
246
|
+
# UUID Pre-generation Worker (Background Thread + Ring Buffer)
|
|
247
|
+
# ================================
|
|
248
|
+
# PERFORMANCE OPTIMIZATION: Pre-generate UUIDs to eliminate uuid.uuid4() call overhead
|
|
249
|
+
# - uuid.uuid4() is FAST (~1.6μs per call), but deque.popleft() is INSTANT (~0.1μs)
|
|
250
|
+
# - Savings: ~1.5μs per request (from 1.6μs → 0.1μs per UUID)
|
|
251
|
+
#
|
|
252
|
+
# Ring buffer: collections.deque with configurable size (thread-safe for producer/consumer)
|
|
253
|
+
# Memory overhead: ~1MB default (10,000 UUIDs * ~98 bytes/UUID with deque overhead)
|
|
254
|
+
# Refill strategy: Generate 100 UUIDs when buffer < 100 (keeps 100 in reserve)
|
|
255
|
+
# Startup: Pre-fills buffer to max size (configurable via SF_UUID_BUFFER_SIZE_MB)
|
|
256
|
+
#
|
|
257
|
+
# Environment Variables:
|
|
258
|
+
# SF_UUID_BUFFER_SIZE_MB: Buffer size in MB (default: 1MB = ~10,000 UUIDs)
|
|
259
|
+
# ================================
|
|
260
|
+
|
|
261
|
+
import atexit
|
|
262
|
+
import collections
|
|
263
|
+
|
|
264
|
+
# Calculate buffer size from environment variable
|
|
265
|
+
# Each UUID + deque overhead ≈ 98 bytes
|
|
266
|
+
# 1MB = 1,048,576 bytes → ~10,700 UUIDs
|
|
267
|
+
_UUID_BYTES_PER_ENTRY = 98 # Measured: 85 bytes (string) + 13 bytes (deque overhead)
|
|
268
|
+
_uuid_buffer_size_mb = float(os.getenv("SF_UUID_BUFFER_SIZE_MB", "1.0"))
|
|
269
|
+
_uuid_buffer_max_size = int(
|
|
270
|
+
(_uuid_buffer_size_mb * 1024 * 1024) / _UUID_BYTES_PER_ENTRY
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
# UUID ring buffer (lock-free for single producer/consumer)
|
|
274
|
+
_uuid_buffer = collections.deque(maxlen=_uuid_buffer_max_size)
|
|
275
|
+
_uuid_buffer_lock = (
|
|
276
|
+
threading.Lock()
|
|
277
|
+
) # Only used during refill to prevent duplicate work
|
|
278
|
+
_uuid_worker_running = False
|
|
279
|
+
_uuid_worker_thread = None
|
|
280
|
+
|
|
281
|
+
# Buffer thresholds
|
|
282
|
+
_UUID_BUFFER_REFILL_THRESHOLD = (
|
|
283
|
+
100 # Trigger refill when buffer < 100 (keep 100 in reserve)
|
|
284
|
+
)
|
|
285
|
+
_UUID_BUFFER_BATCH_SIZE = 100 # Generate 100 UUIDs per refill
|
|
286
|
+
_UUID_INITIAL_BUFFER_SIZE = _uuid_buffer_max_size # Pre-fill to max at startup
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def _uuid_generation_worker():
|
|
290
|
+
"""
|
|
291
|
+
Background daemon thread that pre-generates UUIDs and fills the ring buffer.
|
|
292
|
+
|
|
293
|
+
This worker runs continuously, checking the buffer level and refilling when needed.
|
|
294
|
+
The deque is thread-safe for append operations, so no lock needed for filling.
|
|
295
|
+
|
|
296
|
+
Performance: Generates ~500 UUIDs in ~25ms (50μs per UUID), then sleeps for 100ms.
|
|
297
|
+
CPU impact: Minimal due to sleep intervals and daemon thread priority.
|
|
298
|
+
"""
|
|
299
|
+
global _uuid_worker_running
|
|
300
|
+
|
|
301
|
+
while _uuid_worker_running:
|
|
302
|
+
try:
|
|
303
|
+
# Check buffer level (deque.__len__() is atomic)
|
|
304
|
+
current_size = len(_uuid_buffer)
|
|
305
|
+
|
|
306
|
+
if current_size < _UUID_BUFFER_REFILL_THRESHOLD:
|
|
307
|
+
# Refill buffer with batch generation
|
|
308
|
+
# Use lock to prevent multiple threads from refilling simultaneously
|
|
309
|
+
with _uuid_buffer_lock:
|
|
310
|
+
# Double-check after acquiring lock (another thread may have refilled)
|
|
311
|
+
if len(_uuid_buffer) < _UUID_BUFFER_REFILL_THRESHOLD:
|
|
312
|
+
needed = _UUID_BUFFER_BATCH_SIZE
|
|
313
|
+
for _ in range(needed):
|
|
314
|
+
# Generate UUID string (36 chars with dashes: xxxxxxxx-xxxx-4xxx-xxxx-xxxxxxxxxxxx)
|
|
315
|
+
_uuid_buffer.append(str(uuid.uuid4()))
|
|
316
|
+
|
|
317
|
+
if _SF_DEBUG_ENABLED:
|
|
318
|
+
print(
|
|
319
|
+
f"[UUID Worker] Refilled buffer: {len(_uuid_buffer)} UUIDs available",
|
|
320
|
+
log=False,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# Sleep to avoid busy-waiting (100ms = check buffer 10 times per second)
|
|
324
|
+
time.sleep(0.1)
|
|
325
|
+
|
|
326
|
+
except Exception as e:
|
|
327
|
+
# Don't let worker thread crash - log and continue
|
|
328
|
+
if _SF_DEBUG_ENABLED:
|
|
329
|
+
print(f"[UUID Worker] ⚠️ Error in worker thread: {e}", log=False)
|
|
330
|
+
time.sleep(1) # Back off on error
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def _start_uuid_worker():
|
|
334
|
+
"""
|
|
335
|
+
Start the UUID pre-generation worker thread.
|
|
336
|
+
|
|
337
|
+
Called once at module load time to initialize the background worker.
|
|
338
|
+
Thread is daemon=True so it doesn't block process shutdown.
|
|
339
|
+
|
|
340
|
+
AGGRESSIVE PRE-FILLING:
|
|
341
|
+
- Pre-fills buffer to max size at startup (~1.6μs per UUID)
|
|
342
|
+
- Default: 1MB = ~10,700 UUIDs = ~17ms startup time
|
|
343
|
+
- Configurable via SF_UUID_BUFFER_SIZE_MB environment variable
|
|
344
|
+
- Worker thread maintains buffer with 100 UUIDs in reserve
|
|
345
|
+
- Refills 100 UUIDs at a time when buffer drops below threshold
|
|
346
|
+
"""
|
|
347
|
+
global _uuid_worker_running, _uuid_worker_thread
|
|
348
|
+
|
|
349
|
+
if _uuid_worker_running:
|
|
350
|
+
return # Already started
|
|
351
|
+
|
|
352
|
+
# Pre-fill buffer to max size at startup (uuid4 is FAST: ~1.6μs per UUID)
|
|
353
|
+
# Default 10,700 UUIDs = ~17ms, configurable via SF_UUID_BUFFER_SIZE_MB
|
|
354
|
+
start_time = time.perf_counter() if _SF_DEBUG_ENABLED else 0
|
|
355
|
+
for _ in range(_UUID_INITIAL_BUFFER_SIZE):
|
|
356
|
+
_uuid_buffer.append(str(uuid.uuid4()))
|
|
357
|
+
|
|
358
|
+
if _SF_DEBUG_ENABLED:
|
|
359
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
360
|
+
buffer_size_kb = (_UUID_INITIAL_BUFFER_SIZE * _UUID_BYTES_PER_ENTRY) / 1024
|
|
361
|
+
print(
|
|
362
|
+
f"[UUID Worker] Pre-generated {len(_uuid_buffer):,} UUIDs "
|
|
363
|
+
f"({buffer_size_kb:.1f}KB) in {elapsed_ms:.2f}ms",
|
|
364
|
+
log=False,
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
# Start background worker thread (maintains buffer at threshold)
|
|
368
|
+
_uuid_worker_running = True
|
|
369
|
+
_uuid_worker_thread = threading.Thread(
|
|
370
|
+
target=_uuid_generation_worker,
|
|
371
|
+
name="SailfishUUIDWorker",
|
|
372
|
+
daemon=True, # Don't block process shutdown
|
|
373
|
+
)
|
|
374
|
+
_uuid_worker_thread.start()
|
|
375
|
+
|
|
376
|
+
if _SF_DEBUG_ENABLED:
|
|
377
|
+
print("[UUID Worker] Background worker thread started", log=False)
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def _stop_uuid_worker():
|
|
381
|
+
"""
|
|
382
|
+
Stop the UUID worker thread gracefully.
|
|
383
|
+
|
|
384
|
+
Called at process shutdown via atexit handler.
|
|
385
|
+
"""
|
|
386
|
+
global _uuid_worker_running
|
|
387
|
+
_uuid_worker_running = False
|
|
388
|
+
|
|
389
|
+
if _SF_DEBUG_ENABLED:
|
|
390
|
+
print("[UUID Worker] Stopping background worker thread", log=False)
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
def _get_pregenerated_uuid() -> str:
|
|
394
|
+
"""
|
|
395
|
+
Get a pre-generated UUID from the ring buffer.
|
|
396
|
+
|
|
397
|
+
This is the fast path for UUID generation - just pop from the deque (~0.1μs vs ~1.6μs).
|
|
398
|
+
Falls back to uuid.uuid4() if buffer is empty (extremely rare with 1MB buffer).
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
UUID string in format: xxxxxxxx-xxxx-4xxx-xxxx-xxxxxxxxxxxx (36 chars)
|
|
402
|
+
|
|
403
|
+
Performance:
|
|
404
|
+
- Normal path: ~0.1μs (deque.popleft() is O(1), instant)
|
|
405
|
+
- Fallback path: ~1.6μs (uuid.uuid4() when buffer exhausted - should never happen)
|
|
406
|
+
"""
|
|
407
|
+
try:
|
|
408
|
+
# Fast path: Pop from buffer (thread-safe, O(1), ~0.1μs)
|
|
409
|
+
return _uuid_buffer.popleft()
|
|
410
|
+
except IndexError:
|
|
411
|
+
# Fallback: Buffer exhausted (should NEVER happen with 10K+ pre-fill + 100 reserve)
|
|
412
|
+
if _SF_DEBUG_ENABLED:
|
|
413
|
+
print(
|
|
414
|
+
f"[UUID Worker] ⚠️ Buffer exhausted! Falling back to uuid.uuid4()",
|
|
415
|
+
log=False,
|
|
416
|
+
)
|
|
417
|
+
# Generate directly (fallback path, ~1.6μs)
|
|
418
|
+
return str(uuid.uuid4())
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
# Initialize UUID worker at module load time
|
|
422
|
+
_start_uuid_worker()
|
|
423
|
+
|
|
424
|
+
# Register shutdown handler to stop worker gracefully
|
|
425
|
+
atexit.register(_stop_uuid_worker)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def _set_shared_trace_id(trace_id: Optional[str]) -> None:
|
|
429
|
+
# PERFORMANCE: In LD_PRELOAD mode, skip lock (ContextVar is primary source)
|
|
430
|
+
if _ld_preload_active:
|
|
431
|
+
_shared_trace_registry["trace_id"] = trace_id
|
|
432
|
+
return
|
|
433
|
+
with _shared_trace_registry_lock:
|
|
434
|
+
_shared_trace_registry["trace_id"] = trace_id
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
def _set_shared_outbound_header_base(base_dict: Optional[dict]) -> None:
|
|
438
|
+
"""Store outbound header base in shared registry (works across threads)."""
|
|
439
|
+
# PERFORMANCE: In LD_PRELOAD mode, skip lock (ContextVar is primary source)
|
|
440
|
+
if _ld_preload_active:
|
|
441
|
+
_shared_outbound_header_base_registry["base_dict"] = base_dict
|
|
442
|
+
return
|
|
443
|
+
with _shared_outbound_header_base_lock:
|
|
444
|
+
_shared_outbound_header_base_registry["base_dict"] = base_dict
|
|
445
|
+
_clear_cached_outbound_headers()
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def _get_shared_outbound_header_base() -> Optional[dict]:
|
|
449
|
+
"""Get outbound header base from shared registry (works across threads)."""
|
|
450
|
+
# PERFORMANCE: In LD_PRELOAD mode, skip lock (ContextVar is primary source)
|
|
451
|
+
if _ld_preload_active:
|
|
452
|
+
return _shared_outbound_header_base_registry.get("base_dict")
|
|
453
|
+
with _shared_outbound_header_base_lock:
|
|
454
|
+
return _shared_outbound_header_base_registry.get("base_dict")
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
def _clear_cached_outbound_headers() -> None:
|
|
458
|
+
"""Clear thread-local cached headers (called when base changes)."""
|
|
459
|
+
try:
|
|
460
|
+
if hasattr(_cached_outbound_headers_tls, "headers"):
|
|
461
|
+
delattr(_cached_outbound_headers_tls, "headers")
|
|
462
|
+
except AttributeError:
|
|
463
|
+
pass
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
def _get_shared_trace_id() -> Optional[str]:
|
|
467
|
+
# PERFORMANCE: In LD_PRELOAD mode, skip lock (ContextVar is primary source)
|
|
468
|
+
if _ld_preload_active:
|
|
469
|
+
return _shared_trace_registry.get("trace_id")
|
|
470
|
+
with _shared_trace_registry_lock:
|
|
471
|
+
return _shared_trace_registry.get("trace_id")
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
def _get_context_or_thread_local(
|
|
475
|
+
ctx_var: ContextVar, attr_name: str, default: Any
|
|
476
|
+
) -> Any:
|
|
477
|
+
return ctx_var.get() # or getattr(_thread_locals, attr_name, default)
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
def _set_context_and_thread_local(
|
|
481
|
+
ctx_var: ContextVar, attr_name: str, value: Any
|
|
482
|
+
) -> Any:
|
|
483
|
+
ctx_var.set(value)
|
|
484
|
+
# setattr(_thread_locals, attr_name, value)
|
|
485
|
+
return value
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
def unset_sf_trace_id() -> None:
|
|
489
|
+
_set_shared_trace_id(None)
|
|
490
|
+
_set_context_and_thread_local(trace_id_ctx, "trace_id", None)
|
|
491
|
+
if _SF_DEBUG_ENABLED:
|
|
492
|
+
print("[[DEBUG]] unset_sf_trace_id: trace_id cleared", log=False)
|
|
493
|
+
|
|
494
|
+
|
|
495
|
+
def _get_or_set_context_and_thread_local(
|
|
496
|
+
ctx_var: ContextVar, attr_name: str, value_if_not_set
|
|
497
|
+
) -> Tuple[bool, Any]:
|
|
498
|
+
value = ctx_var.get() # or getattr(_thread_locals, attr_name, None)
|
|
499
|
+
if value is None:
|
|
500
|
+
_set_context_and_thread_local(ctx_var, attr_name, value_if_not_set)
|
|
501
|
+
return True, value_if_not_set
|
|
502
|
+
return False, value
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
# Trace ID functions
|
|
506
|
+
def get_sf_trace_id() -> Optional[Union[str, UUID]]:
|
|
507
|
+
# Use ContextVar for both LD_PRELOAD and Python-only modes
|
|
508
|
+
# ContextVar is async-safe and thread-safe, no shared registry needed
|
|
509
|
+
return _get_context_or_thread_local(trace_id_ctx, "trace_id", None)
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def set_sf_trace_id(trace_id: Union[str, UUID]) -> Union[str, UUID]:
|
|
513
|
+
# Set in ContextVar for both LD_PRELOAD and Python-only modes
|
|
514
|
+
# ContextVar is async-safe and thread-safe, no shared registry needed
|
|
515
|
+
return _set_context_and_thread_local(trace_id_ctx, "trace_id", trace_id)
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
def generate_new_trace_id() -> str:
|
|
519
|
+
"""
|
|
520
|
+
Generate and set a fresh trace_id for requests without incoming X-Sf3-Rid header.
|
|
521
|
+
|
|
522
|
+
This is called explicitly when there's no incoming tracing header, ensuring
|
|
523
|
+
a fresh trace_id is generated for each request (avoiding stale ContextVar reuse).
|
|
524
|
+
|
|
525
|
+
Returns:
|
|
526
|
+
The newly generated trace_id string.
|
|
527
|
+
|
|
528
|
+
Performance:
|
|
529
|
+
- ULTRA-OPTIMIZED: Inlined ContextVar.set() (eliminated function call overhead)
|
|
530
|
+
- UUID retrieval: ~0.06μs (deque.popleft vs ~1.6μs for uuid.uuid4())
|
|
531
|
+
- ContextVar.set(): ~0.12μs (direct call, no wrapper)
|
|
532
|
+
- F-string format: ~0.10μs (trace_id construction)
|
|
533
|
+
- Total time: ~0.3μs (down from 43-45μs = 99.3% reduction!)
|
|
534
|
+
- First call: Same as subsequent calls (no cold start with aggressive pre-fill)
|
|
535
|
+
- Buffer auto-refills at 100 UUIDs reserve (generates 100 at a time)
|
|
536
|
+
"""
|
|
537
|
+
# PERFORMANCE: Use pre-generated UUID from ring buffer (26x faster: 0.06μs vs 1.6μs)
|
|
538
|
+
# Format: xxxxxxxx-xxxx-4xxx-xxxx-xxxxxxxxxxxx (36 chars with dashes)
|
|
539
|
+
unique_id = _uuid_buffer.popleft() if _uuid_buffer else str(uuid.uuid4())
|
|
540
|
+
trace_id = f"{NONSESSION_APPLOGS}-v3/{app_config._sailfish_api_key}/{unique_id}"
|
|
541
|
+
|
|
542
|
+
# PERFORMANCE: Inline ContextVar.set() to eliminate function call overhead
|
|
543
|
+
trace_id_ctx.set(trace_id)
|
|
544
|
+
|
|
545
|
+
if _SF_DEBUG_ENABLED:
|
|
546
|
+
print(
|
|
547
|
+
f"[generate_new_trace_id] Generated fresh trace_id: {trace_id}", log=False
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
return trace_id
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
def get_or_set_sf_trace_id(
|
|
554
|
+
new_trace_id_if_not_set: Optional[str] = None,
|
|
555
|
+
is_associated_with_inbound_request: bool = False,
|
|
556
|
+
) -> Tuple[bool, Union[str, UUID]]:
|
|
557
|
+
###
|
|
558
|
+
###
|
|
559
|
+
###
|
|
560
|
+
### IMPLEMENT skip if not ready yet?
|
|
561
|
+
###
|
|
562
|
+
###
|
|
563
|
+
###
|
|
564
|
+
# Check if trace_id already exists
|
|
565
|
+
if not new_trace_id_if_not_set:
|
|
566
|
+
# Use ContextVar for both LD_PRELOAD and Python-only modes
|
|
567
|
+
trace_id = _get_context_or_thread_local(trace_id_ctx, "trace_id", None)
|
|
568
|
+
if trace_id:
|
|
569
|
+
if _SF_DEBUG_ENABLED:
|
|
570
|
+
stack = "".join(traceback.format_stack(limit=10))
|
|
571
|
+
print(f"[trace_id] Returning existing trace_id: {trace_id}", log=False)
|
|
572
|
+
return False, trace_id
|
|
573
|
+
|
|
574
|
+
# No trace_id found - generate new one
|
|
575
|
+
if _SF_DEBUG_ENABLED:
|
|
576
|
+
print("[trace_id] No trace_id found. Generating new trace_id.", log=False)
|
|
577
|
+
# PERFORMANCE: Use pre-generated UUID from ring buffer (500x faster than uuid.uuid4())
|
|
578
|
+
unique_id = _get_pregenerated_uuid()
|
|
579
|
+
trace_id = f"{NONSESSION_APPLOGS}-v3/{app_config._sailfish_api_key}/{unique_id}"
|
|
580
|
+
|
|
581
|
+
# Set using ContextVar only (no shared registry)
|
|
582
|
+
_set_context_and_thread_local(trace_id_ctx, "trace_id", trace_id)
|
|
583
|
+
|
|
584
|
+
if _SF_DEBUG_ENABLED:
|
|
585
|
+
print(f"[trace_id] Generated and set new trace_id: {trace_id}", log=False)
|
|
586
|
+
return True, trace_id
|
|
587
|
+
|
|
588
|
+
# new_trace_id_if_not_set provided - set it directly
|
|
589
|
+
if _SF_DEBUG_ENABLED:
|
|
590
|
+
print(
|
|
591
|
+
f"[trace_id] Setting new trace_id from argument: {new_trace_id_if_not_set}",
|
|
592
|
+
log=False,
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
# Set using ContextVar only (no shared registry)
|
|
596
|
+
_set_context_and_thread_local(trace_id_ctx, "trace_id", new_trace_id_if_not_set)
|
|
597
|
+
|
|
598
|
+
return True, new_trace_id_if_not_set
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
# Handled exceptions functions
|
|
602
|
+
def get_handled_exceptions() -> Set[Any]:
|
|
603
|
+
return _get_context_or_thread_local(
|
|
604
|
+
handled_exceptions_ctx, "handled_exceptions", set()
|
|
605
|
+
)
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def set_handled_exceptions(exceptions_set: Set[Any]) -> Set[Any]:
|
|
609
|
+
return _set_context_and_thread_local(
|
|
610
|
+
handled_exceptions_ctx, "handled_exceptions", exceptions_set
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
def get_or_set_handled_exceptions(default: set = None) -> Tuple[bool, Set[Any]]:
|
|
615
|
+
if default is None:
|
|
616
|
+
default = set()
|
|
617
|
+
return _get_or_set_context_and_thread_local(
|
|
618
|
+
handled_exceptions_ctx, "handled_exceptions", default
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
def mark_exception_handled(exception) -> None:
|
|
623
|
+
handled = get_handled_exceptions()
|
|
624
|
+
handled.add(id(exception))
|
|
625
|
+
set_handled_exceptions(handled)
|
|
626
|
+
if hasattr(exception, "_handled"):
|
|
627
|
+
setattr(exception, "_handled", True)
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def has_handled_exception(exception) -> bool:
|
|
631
|
+
return id(exception) in get_handled_exceptions() or getattr(
|
|
632
|
+
exception, "_handled", False
|
|
633
|
+
)
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
def reset_handled_exceptions() -> Set[Any]:
|
|
637
|
+
return set_handled_exceptions(set())
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
# Reentrancy guards (logging)
|
|
641
|
+
def get_reentrancy_guard_logging_active() -> bool:
|
|
642
|
+
return _get_context_or_thread_local(
|
|
643
|
+
reentrancy_guard_logging_active_ctx, "reentrancy_guard_logging_active", False
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
def set_reentrancy_guard_logging_active(value: bool) -> bool:
|
|
648
|
+
return _set_context_and_thread_local(
|
|
649
|
+
reentrancy_guard_logging_active_ctx, "reentrancy_guard_logging_active", value
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
def get_or_set_reentrancy_guard_logging_active(
|
|
654
|
+
value_if_not_set: bool,
|
|
655
|
+
) -> Tuple[bool, bool]:
|
|
656
|
+
return _get_or_set_context_and_thread_local(
|
|
657
|
+
reentrancy_guard_logging_active_ctx,
|
|
658
|
+
"reentrancy_guard_logging_active",
|
|
659
|
+
value_if_not_set,
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
def activate_reentrancy_guards_logging() -> bool:
|
|
664
|
+
set_reentrancy_guard_logging_active(True)
|
|
665
|
+
set_reentrancy_guard_logging_preactive(True)
|
|
666
|
+
return True
|
|
667
|
+
|
|
668
|
+
|
|
669
|
+
def get_reentrancy_guard_logging_preactive() -> bool:
|
|
670
|
+
return _get_context_or_thread_local(
|
|
671
|
+
reentrancy_guard_logging_preactive_ctx,
|
|
672
|
+
"reentrancy_guard_logging_preactive",
|
|
673
|
+
False,
|
|
674
|
+
)
|
|
675
|
+
|
|
676
|
+
|
|
677
|
+
def set_reentrancy_guard_logging_preactive(value: bool) -> bool:
|
|
678
|
+
return _set_context_and_thread_local(
|
|
679
|
+
reentrancy_guard_logging_preactive_ctx,
|
|
680
|
+
"reentrancy_guard_logging_preactive",
|
|
681
|
+
value,
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
|
|
685
|
+
def get_or_set_reentrancy_guard_logging_preactive(
|
|
686
|
+
value_if_not_set: bool,
|
|
687
|
+
) -> Tuple[bool, bool]:
|
|
688
|
+
return _get_or_set_context_and_thread_local(
|
|
689
|
+
reentrancy_guard_logging_preactive_ctx,
|
|
690
|
+
"reentrancy_guard_logging_preactive",
|
|
691
|
+
value_if_not_set,
|
|
692
|
+
)
|
|
693
|
+
|
|
694
|
+
|
|
695
|
+
def activate_reentrancy_guards_logging_preactive() -> bool:
|
|
696
|
+
return set_reentrancy_guard_logging_preactive(True)
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
# Reentrancy guards (stdout)
|
|
700
|
+
def get_reentrancy_guard_sys_stdout_active() -> bool:
|
|
701
|
+
return _get_context_or_thread_local(
|
|
702
|
+
reentrancy_guard_sys_stdout_active_ctx,
|
|
703
|
+
"reentrancy_guard_sys_stdout_active",
|
|
704
|
+
False,
|
|
705
|
+
)
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
def set_reentrancy_guard_sys_stdout_active(value: bool) -> bool:
|
|
709
|
+
return _set_context_and_thread_local(
|
|
710
|
+
reentrancy_guard_sys_stdout_active_ctx,
|
|
711
|
+
"reentrancy_guard_sys_stdout_active",
|
|
712
|
+
value,
|
|
713
|
+
)
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def activate_reentrancy_guards_sys_stdout() -> bool:
|
|
717
|
+
set_reentrancy_guard_sys_stdout_active(True)
|
|
718
|
+
return True
|
|
719
|
+
|
|
720
|
+
|
|
721
|
+
# Reentrancy guards (print)
|
|
722
|
+
def get_reentrancy_guard_print_active() -> bool:
|
|
723
|
+
return _get_context_or_thread_local(
|
|
724
|
+
reentrancy_guard_print_active_ctx, "reentrancy_guard_print_active", False
|
|
725
|
+
)
|
|
726
|
+
|
|
727
|
+
|
|
728
|
+
def set_reentrancy_guard_print_active(value: bool) -> bool:
|
|
729
|
+
return _set_context_and_thread_local(
|
|
730
|
+
reentrancy_guard_print_active_ctx, "reentrancy_guard_print_active", value
|
|
731
|
+
)
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
def get_or_set_reentrancy_guard_print_active(
|
|
735
|
+
value_if_not_set: bool,
|
|
736
|
+
) -> Tuple[bool, bool]:
|
|
737
|
+
return _get_or_set_context_and_thread_local(
|
|
738
|
+
reentrancy_guard_print_active_ctx,
|
|
739
|
+
"reentrancy_guard_print_active",
|
|
740
|
+
value_if_not_set,
|
|
741
|
+
)
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
def activate_reentrancy_guards_print() -> bool:
|
|
745
|
+
set_reentrancy_guard_print_active(True)
|
|
746
|
+
set_reentrancy_guard_print_preactive(True)
|
|
747
|
+
return True
|
|
748
|
+
|
|
749
|
+
|
|
750
|
+
def get_reentrancy_guard_print_preactive() -> bool:
|
|
751
|
+
return _get_context_or_thread_local(
|
|
752
|
+
reentrancy_guard_print_preactive_ctx, "reentrancy_guard_print_preactive", False
|
|
753
|
+
)
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
def set_reentrancy_guard_print_preactive(value: bool) -> bool:
|
|
757
|
+
return _set_context_and_thread_local(
|
|
758
|
+
reentrancy_guard_print_preactive_ctx, "reentrancy_guard_print_preactive", value
|
|
759
|
+
)
|
|
760
|
+
|
|
761
|
+
|
|
762
|
+
def get_or_set_reentrancy_guard_print_preactive(
|
|
763
|
+
value_if_not_set: bool,
|
|
764
|
+
) -> Tuple[bool, bool]:
|
|
765
|
+
return _get_or_set_context_and_thread_local(
|
|
766
|
+
reentrancy_guard_print_preactive_ctx,
|
|
767
|
+
"reentrancy_guard_print_preactive",
|
|
768
|
+
value_if_not_set,
|
|
769
|
+
)
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
def activate_reentrancy_guards_print_preactive() -> bool:
|
|
773
|
+
return set_reentrancy_guard_print_preactive(True)
|
|
774
|
+
|
|
775
|
+
|
|
776
|
+
# Reentrancy guards (exception)
|
|
777
|
+
def get_reentrancy_guard_exception_active() -> bool:
|
|
778
|
+
return _get_context_or_thread_local(
|
|
779
|
+
reentrancy_guard_exception_active_ctx,
|
|
780
|
+
"reentrancy_guard_exception_active",
|
|
781
|
+
False,
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def set_reentrancy_guard_exception_active(value: bool) -> bool:
|
|
786
|
+
return _set_context_and_thread_local(
|
|
787
|
+
reentrancy_guard_exception_active_ctx,
|
|
788
|
+
"reentrancy_guard_exception_active",
|
|
789
|
+
value,
|
|
790
|
+
)
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
def get_or_set_reentrancy_guard_exception_active(
|
|
794
|
+
value_if_not_set: bool,
|
|
795
|
+
) -> Tuple[bool, bool]:
|
|
796
|
+
return _get_or_set_context_and_thread_local(
|
|
797
|
+
reentrancy_guard_exception_active_ctx,
|
|
798
|
+
"reentrancy_guard_exception_active",
|
|
799
|
+
value_if_not_set,
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
|
|
803
|
+
def activate_reentrancy_guards_exception() -> bool:
|
|
804
|
+
set_reentrancy_guard_exception_active(True)
|
|
805
|
+
set_reentrancy_guard_exception_preactive(True)
|
|
806
|
+
return True
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
def get_reentrancy_guard_exception_preactive() -> bool:
|
|
810
|
+
return _get_context_or_thread_local(
|
|
811
|
+
reentrancy_guard_exception_preactive_ctx,
|
|
812
|
+
"reentrancy_guard_exception_preactive",
|
|
813
|
+
False,
|
|
814
|
+
)
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
def set_reentrancy_guard_exception_preactive(value: bool) -> bool:
|
|
818
|
+
return _set_context_and_thread_local(
|
|
819
|
+
reentrancy_guard_exception_preactive_ctx,
|
|
820
|
+
"reentrancy_guard_exception_preactive",
|
|
821
|
+
value,
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
|
|
825
|
+
def get_or_set_reentrancy_guard_exception_preactive(
|
|
826
|
+
value_if_not_set: bool,
|
|
827
|
+
) -> Tuple[bool, bool]:
|
|
828
|
+
return _get_or_set_context_and_thread_local(
|
|
829
|
+
reentrancy_guard_exception_preactive_ctx,
|
|
830
|
+
"reentrancy_guard_exception_preactive",
|
|
831
|
+
value_if_not_set,
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
def activate_reentrancy_guards_exception_preactive() -> bool:
|
|
836
|
+
return set_reentrancy_guard_exception_preactive(True)
|
|
837
|
+
|
|
838
|
+
|
|
839
|
+
# Get and set context
|
|
840
|
+
def get_context(lightweight: bool = False) -> Dict[str, Any]:
|
|
841
|
+
"""
|
|
842
|
+
Get current context for thread propagation.
|
|
843
|
+
|
|
844
|
+
Args:
|
|
845
|
+
lightweight: If True, only capture trace_id (for HTTP client background threads).
|
|
846
|
+
If False, capture full context (for user-created threads).
|
|
847
|
+
|
|
848
|
+
Performance:
|
|
849
|
+
- Lightweight mode: ~10μs (1 ContextVar read)
|
|
850
|
+
- Full mode: ~540μs (11 ContextVar reads)
|
|
851
|
+
"""
|
|
852
|
+
if lightweight:
|
|
853
|
+
# ULTRA-FAST PATH: Only propagate trace_id for background threads
|
|
854
|
+
# HTTP client threads (httplib2, urllib3, httpcore) only need trace_id
|
|
855
|
+
# This is 50x faster than full context capture
|
|
856
|
+
return {
|
|
857
|
+
"trace_id": get_sf_trace_id(),
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
# FULL CONTEXT: For user threads that need all state
|
|
861
|
+
return {
|
|
862
|
+
"trace_id": get_sf_trace_id(),
|
|
863
|
+
"handled_exceptions": get_handled_exceptions(),
|
|
864
|
+
"reentrancy_guard_logging_active": get_reentrancy_guard_logging_active(),
|
|
865
|
+
"reentrancy_guard_logging_preactive": get_reentrancy_guard_logging_preactive(),
|
|
866
|
+
"reentrancy_guard_print_active": get_reentrancy_guard_print_active(),
|
|
867
|
+
"reentrancy_guard_print_preactive": get_reentrancy_guard_print_preactive(),
|
|
868
|
+
"reentrancy_guard_exception_active": get_reentrancy_guard_exception_active(),
|
|
869
|
+
"reentrancy_guard_exception_preactive": get_reentrancy_guard_exception_preactive(),
|
|
870
|
+
"reentrancy_guard_sys_stdout_active": get_reentrancy_guard_sys_stdout_active(),
|
|
871
|
+
"suppress_network_recording": is_network_recording_suppressed(),
|
|
872
|
+
"suppress_log_output": is_log_output_suppressed(),
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
|
|
876
|
+
def set_context(context) -> None:
|
|
877
|
+
set_sf_trace_id(context.get("trace_id"))
|
|
878
|
+
set_handled_exceptions(context.get("handled_exceptions", set()))
|
|
879
|
+
set_reentrancy_guard_logging_active(
|
|
880
|
+
context.get("reentrancy_guard_logging_active", False)
|
|
881
|
+
)
|
|
882
|
+
set_reentrancy_guard_logging_preactive(
|
|
883
|
+
context.get("reentrancy_guard_logging_preactive", False)
|
|
884
|
+
)
|
|
885
|
+
set_reentrancy_guard_print_active(
|
|
886
|
+
context.get("reentrancy_guard_print_active", False)
|
|
887
|
+
)
|
|
888
|
+
set_reentrancy_guard_print_preactive(
|
|
889
|
+
context.get("reentrancy_guard_print_preactive", False)
|
|
890
|
+
)
|
|
891
|
+
set_reentrancy_guard_exception_active(
|
|
892
|
+
context.get("reentrancy_guard_exception_active", False)
|
|
893
|
+
)
|
|
894
|
+
set_reentrancy_guard_exception_preactive(
|
|
895
|
+
context.get("reentrancy_guard_exception_preactive", False)
|
|
896
|
+
)
|
|
897
|
+
set_reentrancy_guard_sys_stdout_active(
|
|
898
|
+
context.get("reentrancy_guard_sys_stdout_active", False)
|
|
899
|
+
)
|
|
900
|
+
# suppressors are transient; don't set them from incoming context
|
|
901
|
+
|
|
902
|
+
|
|
903
|
+
@contextmanager
|
|
904
|
+
def suppress_network_recording():
|
|
905
|
+
token = suppress_network_recording_ctx.set(True)
|
|
906
|
+
try:
|
|
907
|
+
yield
|
|
908
|
+
finally:
|
|
909
|
+
suppress_network_recording_ctx.reset(token)
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
@functools.lru_cache(maxsize=1)
|
|
913
|
+
def _get_disabled_route_patterns() -> List[str]:
|
|
914
|
+
"""
|
|
915
|
+
Get route patterns to skip network hop capture.
|
|
916
|
+
|
|
917
|
+
Routes are configured via setup_interceptors(routes_to_skip_network_hops=[...])
|
|
918
|
+
which defaults to the SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES env var.
|
|
919
|
+
|
|
920
|
+
Returns a list of route patterns with wildcard support (* and ? characters).
|
|
921
|
+
Cached for performance (called on every request).
|
|
922
|
+
|
|
923
|
+
Examples:
|
|
924
|
+
"/healthz, /metrics" -> ["/healthz", "/metrics"]
|
|
925
|
+
"/admin/*, /api/v1/status*" -> ["/admin/*", "/api/v1/status*"]
|
|
926
|
+
"""
|
|
927
|
+
# Get patterns from app_config (already contains parameter or env var default)
|
|
928
|
+
patterns = getattr(app_config, "_routes_to_skip_network_hops", [])
|
|
929
|
+
|
|
930
|
+
if _SF_DEBUG_ENABLED and patterns:
|
|
931
|
+
print(
|
|
932
|
+
f"[_get_disabled_route_patterns] Route patterns to skip: {patterns}",
|
|
933
|
+
log=False,
|
|
934
|
+
)
|
|
935
|
+
|
|
936
|
+
return patterns
|
|
937
|
+
|
|
938
|
+
|
|
939
|
+
def _route_matches_pattern(path: str) -> bool:
|
|
940
|
+
"""
|
|
941
|
+
Check if the given path matches any disabled route pattern.
|
|
942
|
+
|
|
943
|
+
Uses fnmatch for glob-style pattern matching:
|
|
944
|
+
- * matches any sequence of characters
|
|
945
|
+
- ? matches any single character
|
|
946
|
+
|
|
947
|
+
Args:
|
|
948
|
+
path: Request path to check (e.g., "/api/v1/users")
|
|
949
|
+
|
|
950
|
+
Returns:
|
|
951
|
+
True if path matches any disabled pattern, False otherwise.
|
|
952
|
+
|
|
953
|
+
Examples:
|
|
954
|
+
_route_matches_pattern("/healthz") -> True if "/healthz" in patterns
|
|
955
|
+
_route_matches_pattern("/admin/users") -> True if "/admin/*" in patterns
|
|
956
|
+
_route_matches_pattern("/api/v1/status") -> True if "/api/v1/status*" in patterns
|
|
957
|
+
"""
|
|
958
|
+
patterns = _get_disabled_route_patterns()
|
|
959
|
+
if not patterns:
|
|
960
|
+
return False
|
|
961
|
+
|
|
962
|
+
# Use fnmatch for glob pattern matching (* and ? wildcards)
|
|
963
|
+
for pattern in patterns:
|
|
964
|
+
if fnmatch.fnmatch(path, pattern):
|
|
965
|
+
if _SF_DEBUG_ENABLED:
|
|
966
|
+
print(
|
|
967
|
+
f"[_route_matches_pattern] Path '{path}' matches pattern '{pattern}' - suppressing",
|
|
968
|
+
log=False,
|
|
969
|
+
)
|
|
970
|
+
return True
|
|
971
|
+
|
|
972
|
+
return False
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
def is_network_recording_suppressed() -> bool:
|
|
976
|
+
"""
|
|
977
|
+
Check if network recording is suppressed.
|
|
978
|
+
|
|
979
|
+
Checks three suppression mechanisms (any one triggers suppression):
|
|
980
|
+
1. Explicit suppression via context manager or decorator (suppress_network_recording_ctx)
|
|
981
|
+
2. Route-based suppression via SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES
|
|
982
|
+
3. Thread-local C telemetry guard (g_in_telemetry_send) - used by C extension sender threads
|
|
983
|
+
|
|
984
|
+
Returns:
|
|
985
|
+
True if network recording is suppressed by any mechanism, False otherwise.
|
|
986
|
+
"""
|
|
987
|
+
# Check explicit suppression (context manager / decorator)
|
|
988
|
+
if suppress_network_recording_ctx.get():
|
|
989
|
+
return True
|
|
990
|
+
|
|
991
|
+
# Check route-based suppression
|
|
992
|
+
current_path = get_current_request_path()
|
|
993
|
+
if current_path and _route_matches_pattern(current_path):
|
|
994
|
+
return True
|
|
995
|
+
|
|
996
|
+
return False
|
|
997
|
+
|
|
998
|
+
|
|
999
|
+
@contextmanager
|
|
1000
|
+
def suppress_log_output():
|
|
1001
|
+
token = suppress_log_output_ctx.set(True)
|
|
1002
|
+
try:
|
|
1003
|
+
yield
|
|
1004
|
+
finally:
|
|
1005
|
+
suppress_log_output_ctx.reset(token)
|
|
1006
|
+
|
|
1007
|
+
|
|
1008
|
+
def is_log_output_suppressed() -> bool:
|
|
1009
|
+
return suppress_log_output_ctx.get()
|
|
1010
|
+
|
|
1011
|
+
|
|
1012
|
+
# Current request path functions (for route-based suppression)
|
|
1013
|
+
def set_current_request_path(path: str) -> None:
|
|
1014
|
+
"""Set current request path for route-based network suppression."""
|
|
1015
|
+
current_request_path_ctx.set(path)
|
|
1016
|
+
|
|
1017
|
+
|
|
1018
|
+
def get_current_request_path() -> Optional[str]:
|
|
1019
|
+
"""Get current request path."""
|
|
1020
|
+
return current_request_path_ctx.get()
|
|
1021
|
+
|
|
1022
|
+
|
|
1023
|
+
def clear_current_request_path() -> None:
|
|
1024
|
+
"""Clear current request path at end of request."""
|
|
1025
|
+
current_request_path_ctx.set(None)
|
|
1026
|
+
|
|
1027
|
+
|
|
1028
|
+
# Function span capture override functions (ultra-fast, <10ns)
|
|
1029
|
+
def get_funcspan_override() -> Optional[str]:
|
|
1030
|
+
"""Get function span capture override header value (fast ContextVar lookup ~8ns)."""
|
|
1031
|
+
return funcspan_override_ctx.get()
|
|
1032
|
+
|
|
1033
|
+
|
|
1034
|
+
def set_funcspan_override(value: Optional[str]) -> Optional[str]:
|
|
1035
|
+
"""
|
|
1036
|
+
Set function span capture override header value.
|
|
1037
|
+
|
|
1038
|
+
CRITICAL: This function syncs BOTH the Python ContextVar (for async-safety)
|
|
1039
|
+
AND the C thread-local (for ultra-fast profiler lookups).
|
|
1040
|
+
"""
|
|
1041
|
+
funcspan_override_ctx.set(value)
|
|
1042
|
+
|
|
1043
|
+
# CRITICAL: Also sync to C thread-local for profiler to see
|
|
1044
|
+
# The C profiler's config_lookup() checks g_thread_config.has_override first
|
|
1045
|
+
if value:
|
|
1046
|
+
try:
|
|
1047
|
+
from . import _sffuncspan_config
|
|
1048
|
+
_sffuncspan_config.set_thread_override(value)
|
|
1049
|
+
except Exception:
|
|
1050
|
+
pass # C extension may not be available
|
|
1051
|
+
|
|
1052
|
+
return value
|
|
1053
|
+
|
|
1054
|
+
|
|
1055
|
+
def clear_funcspan_override() -> None:
|
|
1056
|
+
"""
|
|
1057
|
+
Clear function span capture override.
|
|
1058
|
+
|
|
1059
|
+
CRITICAL: This function clears BOTH the Python ContextVar AND the C thread-local.
|
|
1060
|
+
"""
|
|
1061
|
+
funcspan_override_ctx.set(None)
|
|
1062
|
+
|
|
1063
|
+
# CRITICAL: Also clear C thread-local
|
|
1064
|
+
try:
|
|
1065
|
+
from . import _sffuncspan_config
|
|
1066
|
+
_sffuncspan_config.clear_thread_override()
|
|
1067
|
+
except Exception:
|
|
1068
|
+
pass # C extension may not be available
|
|
1069
|
+
|
|
1070
|
+
|
|
1071
|
+
def _get_funcspan_override_for_c() -> Optional[str]:
|
|
1072
|
+
"""
|
|
1073
|
+
Bridge function for C code to read the ContextVar.
|
|
1074
|
+
|
|
1075
|
+
This is called by _sffuncspan_config.c as a fallback when g_thread_config
|
|
1076
|
+
is empty (happens after async thread switches). Returns the override header
|
|
1077
|
+
value from the ContextVar, which follows async context correctly.
|
|
1078
|
+
|
|
1079
|
+
Performance: ~100-200ns (includes Python call overhead + GIL + ContextVar read).
|
|
1080
|
+
This is only called once per thread switch; subsequent calls use cached C thread-local.
|
|
1081
|
+
"""
|
|
1082
|
+
return funcspan_override_ctx.get()
|
|
1083
|
+
|
|
1084
|
+
|
|
1085
|
+
# ================================
|
|
1086
|
+
# Current Function Span ID (synced from C profiler for async-safety)
|
|
1087
|
+
# ================================
|
|
1088
|
+
|
|
1089
|
+
|
|
1090
|
+
def get_current_function_span_id() -> Optional[str]:
|
|
1091
|
+
"""
|
|
1092
|
+
Get current function's span ID for linking telemetry events.
|
|
1093
|
+
|
|
1094
|
+
ASYNC-SAFE: Uses ContextVar (isolated per async request).
|
|
1095
|
+
Falls back to C thread-local if ContextVar not set (sync code).
|
|
1096
|
+
|
|
1097
|
+
Returns None if:
|
|
1098
|
+
- SF_ENABLE_FUNCTION_SPANS is disabled
|
|
1099
|
+
- No active function span
|
|
1100
|
+
- C extension not available
|
|
1101
|
+
|
|
1102
|
+
Performance:
|
|
1103
|
+
- ContextVar lookup: ~50ns (async-safe)
|
|
1104
|
+
- C fallback: ~10-20ns (sync-only, thread-local)
|
|
1105
|
+
|
|
1106
|
+
Returns:
|
|
1107
|
+
Current function span ID, or None if not in a function span.
|
|
1108
|
+
"""
|
|
1109
|
+
if not _HAS_FUNCSPAN_NATIVE:
|
|
1110
|
+
return None
|
|
1111
|
+
|
|
1112
|
+
# Try ContextVar first (async-safe, synced by C profiler)
|
|
1113
|
+
span_id = current_span_id_ctx.get()
|
|
1114
|
+
if span_id is not None:
|
|
1115
|
+
return span_id
|
|
1116
|
+
|
|
1117
|
+
# Fallback to C thread-local (for sync code before C sync runs)
|
|
1118
|
+
# This shouldn't normally happen after C profiler starts syncing
|
|
1119
|
+
try:
|
|
1120
|
+
return _sffuncspan.get_current_span_id()
|
|
1121
|
+
except Exception:
|
|
1122
|
+
return None
|
|
1123
|
+
|
|
1124
|
+
|
|
1125
|
+
def _set_current_span_id(span_id: Optional[str]) -> None:
|
|
1126
|
+
"""
|
|
1127
|
+
Internal: Set current span ID in ContextVar.
|
|
1128
|
+
|
|
1129
|
+
Called by C profiler (_sffuncspan.c) on every span push/pop
|
|
1130
|
+
to sync thread-local state to async-safe ContextVar.
|
|
1131
|
+
|
|
1132
|
+
DO NOT CALL DIRECTLY - Only for C profiler use.
|
|
1133
|
+
|
|
1134
|
+
Args:
|
|
1135
|
+
span_id: Span ID to set, or None to clear
|
|
1136
|
+
"""
|
|
1137
|
+
current_span_id_ctx.set(span_id)
|
|
1138
|
+
|
|
1139
|
+
|
|
1140
|
+
# ================================
|
|
1141
|
+
# Outbound header generation (ultra-fast header injection with shared registry + ContextVar)
|
|
1142
|
+
# ================================
|
|
1143
|
+
|
|
1144
|
+
|
|
1145
|
+
def set_outbound_header_base(
|
|
1146
|
+
base_trace: str, parent_trace_id: str, funcspan: Optional[str]
|
|
1147
|
+
) -> None:
|
|
1148
|
+
"""
|
|
1149
|
+
Store base header info in BOTH shared registry AND ContextVar.
|
|
1150
|
+
|
|
1151
|
+
**OPTIMIZATION:** When LD_PRELOAD is active, pre-builds the headers dict HERE
|
|
1152
|
+
so all outbound calls can reuse it (~10ns vs 1-10μs per call).
|
|
1153
|
+
|
|
1154
|
+
Uses same pattern as trace_id for cross-thread support.
|
|
1155
|
+
|
|
1156
|
+
Args:
|
|
1157
|
+
base_trace: Base trace path (e.g., "session_id/page_visit_id") - used for generating new X-Sf3-Rid
|
|
1158
|
+
parent_trace_id: FULL incoming trace_id (e.g., "session_id/page_visit_id/request_uuid") - used for X-Sf4-Prid
|
|
1159
|
+
funcspan: Optional function span capture override header value
|
|
1160
|
+
|
|
1161
|
+
Performance: <1μs (dict creation + set operations)
|
|
1162
|
+
|
|
1163
|
+
ULTRA-OPTIMIZED: Lockless path when LD_PRELOAD active, minimal dict allocations, no C calls.
|
|
1164
|
+
"""
|
|
1165
|
+
# Only pre-build cached headers when LD_PRELOAD is active
|
|
1166
|
+
# In LD_PRELOAD mode, C code appends UUID at socket intercept (~10ns)
|
|
1167
|
+
# In Python SSL mode, UUID is generated per-call in get_outbound_headers_with_new_uuid()
|
|
1168
|
+
ld_preload_enabled = is_ld_preload_active()
|
|
1169
|
+
|
|
1170
|
+
cached_headers = None # For debug logging; only populated when LD_PRELOAD is active
|
|
1171
|
+
|
|
1172
|
+
if ld_preload_enabled:
|
|
1173
|
+
# LD_PRELOAD mode: Cache 2-part headers (C will append UUID)
|
|
1174
|
+
if funcspan:
|
|
1175
|
+
cached_headers = {
|
|
1176
|
+
SAILFISH_TRACING_HEADER: base_trace,
|
|
1177
|
+
PARENT_SESSION_ID_HEADER: parent_trace_id,
|
|
1178
|
+
FUNCSPAN_OVERRIDE_HEADER: funcspan,
|
|
1179
|
+
}
|
|
1180
|
+
base_dict = {
|
|
1181
|
+
"base_trace": base_trace,
|
|
1182
|
+
"parent_trace_id": parent_trace_id,
|
|
1183
|
+
"funcspan": funcspan,
|
|
1184
|
+
"_cached_headers": cached_headers,
|
|
1185
|
+
}
|
|
1186
|
+
else:
|
|
1187
|
+
cached_headers = {
|
|
1188
|
+
SAILFISH_TRACING_HEADER: base_trace,
|
|
1189
|
+
PARENT_SESSION_ID_HEADER: parent_trace_id,
|
|
1190
|
+
}
|
|
1191
|
+
base_dict = {
|
|
1192
|
+
"base_trace": base_trace,
|
|
1193
|
+
"parent_trace_id": parent_trace_id,
|
|
1194
|
+
"funcspan": None,
|
|
1195
|
+
"_cached_headers": cached_headers,
|
|
1196
|
+
}
|
|
1197
|
+
else:
|
|
1198
|
+
# Python SSL mode: No cached headers, generate UUID per-call
|
|
1199
|
+
base_dict = {
|
|
1200
|
+
"base_trace": base_trace,
|
|
1201
|
+
"parent_trace_id": parent_trace_id,
|
|
1202
|
+
"funcspan": funcspan,
|
|
1203
|
+
# No _cached_headers key - forces UUID generation in get_outbound_headers_with_new_uuid()
|
|
1204
|
+
}
|
|
1205
|
+
|
|
1206
|
+
# Store in ContextVar only (no shared registry)
|
|
1207
|
+
outbound_header_base_ctx.set(base_dict)
|
|
1208
|
+
|
|
1209
|
+
# DEBUG: Log when outbound header base is set (helps troubleshoot X-Sf4-Prid issues)
|
|
1210
|
+
if _SF_DEBUG_ENABLED:
|
|
1211
|
+
print(
|
|
1212
|
+
"[set_outbound_header_base] Set parent_trace_id="
|
|
1213
|
+
f"{parent_trace_id}, base_trace={base_trace}, "
|
|
1214
|
+
f"cached_headers={cached_headers}",
|
|
1215
|
+
log=False,
|
|
1216
|
+
)
|
|
1217
|
+
|
|
1218
|
+
# Set C TLS for non-LD_PRELOAD mode
|
|
1219
|
+
if not _ld_preload_active:
|
|
1220
|
+
_set_c_tls_parent_trace_id(parent_trace_id)
|
|
1221
|
+
|
|
1222
|
+
|
|
1223
|
+
def get_outbound_headers_with_new_uuid() -> dict:
|
|
1224
|
+
"""
|
|
1225
|
+
Generate fresh outbound headers with new UUID appended to base trace.
|
|
1226
|
+
|
|
1227
|
+
**ULTRA-FAST when LD_PRELOAD is active:**
|
|
1228
|
+
- Headers dict cached in ContextVar (no lock, ~10-20ns)
|
|
1229
|
+
- C appends UUID at socket intercept time (no Python UUID generation)
|
|
1230
|
+
- Total overhead: ~10-20ns (just ContextVar read + dict return)
|
|
1231
|
+
|
|
1232
|
+
**When LD_PRELOAD inactive:**
|
|
1233
|
+
- Each call generates new UUID in Python (~100ns)
|
|
1234
|
+
- Headers dict built fresh each time
|
|
1235
|
+
|
|
1236
|
+
Returns:
|
|
1237
|
+
Dictionary with X-Sf3-Rid, X-Sf4-Prid, and optionally X-Sf3-FunctionSpanCaptureOverride.
|
|
1238
|
+
Empty dict if no base initialized.
|
|
1239
|
+
|
|
1240
|
+
Performance:
|
|
1241
|
+
- LD_PRELOAD active: ~10-20ns (cached headers from ContextVar, NO LOCK)
|
|
1242
|
+
- LD_PRELOAD inactive: ~100ns (uuid4 generation + string concat + dict creation)
|
|
1243
|
+
"""
|
|
1244
|
+
# Get base_dict from ContextVar (no shared registry fallback)
|
|
1245
|
+
base_dict = outbound_header_base_ctx.get()
|
|
1246
|
+
|
|
1247
|
+
if not base_dict:
|
|
1248
|
+
if _SF_DEBUG_ENABLED:
|
|
1249
|
+
print(
|
|
1250
|
+
f"[get_outbound_headers_with_new_uuid] ⚠️ No outbound header base found in ContextVar",
|
|
1251
|
+
log=False,
|
|
1252
|
+
)
|
|
1253
|
+
return {}
|
|
1254
|
+
|
|
1255
|
+
# ULTRA-FAST PATH: Return pre-built headers if available (LD_PRELOAD mode)
|
|
1256
|
+
cached_headers = base_dict.get("_cached_headers")
|
|
1257
|
+
if cached_headers:
|
|
1258
|
+
# DEBUG: ENABLED for troubleshooting X-Sf4-Prid issue
|
|
1259
|
+
if _SF_DEBUG_ENABLED:
|
|
1260
|
+
print(
|
|
1261
|
+
f"[get_outbound_headers_with_new_uuid] ⚡ Returning pre-built headers: {cached_headers}",
|
|
1262
|
+
log=False,
|
|
1263
|
+
)
|
|
1264
|
+
# Return a shallow copy to prevent mutations from affecting cached dict
|
|
1265
|
+
return dict(cached_headers)
|
|
1266
|
+
|
|
1267
|
+
# SLOW PATH: Generate UUID in Python (Python-only mode)
|
|
1268
|
+
if _SF_DEBUG_ENABLED:
|
|
1269
|
+
print(
|
|
1270
|
+
f"[get_outbound_headers_with_new_uuid] 🐌 LD_PRELOAD inactive - generating UUID in Python",
|
|
1271
|
+
log=False,
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1274
|
+
base_trace = base_dict.get("base_trace")
|
|
1275
|
+
parent_trace_id = base_dict.get("parent_trace_id")
|
|
1276
|
+
funcspan = base_dict.get("funcspan")
|
|
1277
|
+
|
|
1278
|
+
if not base_trace or not parent_trace_id:
|
|
1279
|
+
if _SF_DEBUG_ENABLED:
|
|
1280
|
+
print(
|
|
1281
|
+
f"[get_outbound_headers_with_new_uuid] ⚠️ Missing base_trace or parent_trace_id!",
|
|
1282
|
+
log=False,
|
|
1283
|
+
)
|
|
1284
|
+
return {}
|
|
1285
|
+
|
|
1286
|
+
# Generate new UUID for each call
|
|
1287
|
+
# PERFORMANCE: Use pre-generated UUID from ring buffer (500x faster than uuid.uuid4())
|
|
1288
|
+
new_uuid = _get_pregenerated_uuid()
|
|
1289
|
+
outbound_trace_id = f"{base_trace}/{new_uuid}"
|
|
1290
|
+
|
|
1291
|
+
headers = {
|
|
1292
|
+
SAILFISH_TRACING_HEADER: outbound_trace_id, # X-Sf3-Rid: session/page/uuid (Python)
|
|
1293
|
+
PARENT_SESSION_ID_HEADER: parent_trace_id,
|
|
1294
|
+
}
|
|
1295
|
+
|
|
1296
|
+
if funcspan:
|
|
1297
|
+
headers[FUNCSPAN_OVERRIDE_HEADER] = funcspan
|
|
1298
|
+
|
|
1299
|
+
return headers
|
|
1300
|
+
|
|
1301
|
+
|
|
1302
|
+
import logging
|
|
1303
|
+
|
|
1304
|
+
# include httpcore/h11/h2 because we now call httpcore directly
|
|
1305
|
+
_HTTPX_LOGGERS = ("httpx", "httpcore", "h11", "h2")
|
|
1306
|
+
|
|
1307
|
+
|
|
1308
|
+
@contextmanager
|
|
1309
|
+
def suppress_logs():
|
|
1310
|
+
"""Temporarily silence client libraries without touching global logging config."""
|
|
1311
|
+
loggers = [logging.getLogger(n) for n in _HTTPX_LOGGERS]
|
|
1312
|
+
prev_disabled = [lg.disabled for lg in loggers]
|
|
1313
|
+
try:
|
|
1314
|
+
for lg in loggers:
|
|
1315
|
+
lg.disabled = True
|
|
1316
|
+
yield
|
|
1317
|
+
finally:
|
|
1318
|
+
for lg, was_disabled in zip(loggers, prev_disabled):
|
|
1319
|
+
lg.disabled = was_disabled
|