sf-veritas 0.11.10__cp314-cp314-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sf_veritas/__init__.py +46 -0
- sf_veritas/_auto_preload.py +73 -0
- sf_veritas/_sfconfig.c +162 -0
- sf_veritas/_sfconfig.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfcrashhandler.c +267 -0
- sf_veritas/_sfcrashhandler.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastlog.c +953 -0
- sf_veritas/_sffastlog.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastnet.c +994 -0
- sf_veritas/_sffastnet.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastnetworkrequest.c +727 -0
- sf_veritas/_sffastnetworkrequest.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffuncspan.c +2791 -0
- sf_veritas/_sffuncspan.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffuncspan_config.c +730 -0
- sf_veritas/_sffuncspan_config.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfheadercheck.c +341 -0
- sf_veritas/_sfheadercheck.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfnetworkhop.c +1454 -0
- sf_veritas/_sfnetworkhop.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfservice.c +1223 -0
- sf_veritas/_sfservice.cpython-314-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfteepreload.c +6227 -0
- sf_veritas/app_config.py +57 -0
- sf_veritas/cli.py +336 -0
- sf_veritas/constants.py +10 -0
- sf_veritas/custom_excepthook.py +304 -0
- sf_veritas/custom_log_handler.py +146 -0
- sf_veritas/custom_output_wrapper.py +153 -0
- sf_veritas/custom_print.py +153 -0
- sf_veritas/django_app.py +5 -0
- sf_veritas/env_vars.py +186 -0
- sf_veritas/exception_handling_middleware.py +18 -0
- sf_veritas/exception_metaclass.py +69 -0
- sf_veritas/fast_frame_info.py +116 -0
- sf_veritas/fast_network_hop.py +293 -0
- sf_veritas/frame_tools.py +112 -0
- sf_veritas/funcspan_config_loader.py +693 -0
- sf_veritas/function_span_profiler.py +1313 -0
- sf_veritas/get_preload_path.py +34 -0
- sf_veritas/import_hook.py +62 -0
- sf_veritas/infra_details/__init__.py +3 -0
- sf_veritas/infra_details/get_infra_details.py +24 -0
- sf_veritas/infra_details/kubernetes/__init__.py +3 -0
- sf_veritas/infra_details/kubernetes/get_cluster_name.py +147 -0
- sf_veritas/infra_details/kubernetes/get_details.py +7 -0
- sf_veritas/infra_details/running_on/__init__.py +17 -0
- sf_veritas/infra_details/running_on/kubernetes.py +11 -0
- sf_veritas/interceptors.py +543 -0
- sf_veritas/libsfnettee.so +0 -0
- sf_veritas/local_env_detect.py +118 -0
- sf_veritas/package_metadata.py +6 -0
- sf_veritas/patches/__init__.py +0 -0
- sf_veritas/patches/_patch_tracker.py +74 -0
- sf_veritas/patches/concurrent_futures.py +19 -0
- sf_veritas/patches/constants.py +1 -0
- sf_veritas/patches/exceptions.py +82 -0
- sf_veritas/patches/multiprocessing.py +32 -0
- sf_veritas/patches/network_libraries/__init__.py +99 -0
- sf_veritas/patches/network_libraries/aiohttp.py +294 -0
- sf_veritas/patches/network_libraries/curl_cffi.py +363 -0
- sf_veritas/patches/network_libraries/http_client.py +670 -0
- sf_veritas/patches/network_libraries/httpcore.py +580 -0
- sf_veritas/patches/network_libraries/httplib2.py +315 -0
- sf_veritas/patches/network_libraries/httpx.py +557 -0
- sf_veritas/patches/network_libraries/niquests.py +218 -0
- sf_veritas/patches/network_libraries/pycurl.py +399 -0
- sf_veritas/patches/network_libraries/requests.py +595 -0
- sf_veritas/patches/network_libraries/ssl_socket.py +822 -0
- sf_veritas/patches/network_libraries/tornado.py +360 -0
- sf_veritas/patches/network_libraries/treq.py +270 -0
- sf_veritas/patches/network_libraries/urllib_request.py +483 -0
- sf_veritas/patches/network_libraries/utils.py +598 -0
- sf_veritas/patches/os.py +17 -0
- sf_veritas/patches/threading.py +231 -0
- sf_veritas/patches/web_frameworks/__init__.py +54 -0
- sf_veritas/patches/web_frameworks/aiohttp.py +798 -0
- sf_veritas/patches/web_frameworks/async_websocket_consumer.py +337 -0
- sf_veritas/patches/web_frameworks/blacksheep.py +532 -0
- sf_veritas/patches/web_frameworks/bottle.py +513 -0
- sf_veritas/patches/web_frameworks/cherrypy.py +683 -0
- sf_veritas/patches/web_frameworks/cors_utils.py +122 -0
- sf_veritas/patches/web_frameworks/django.py +963 -0
- sf_veritas/patches/web_frameworks/eve.py +401 -0
- sf_veritas/patches/web_frameworks/falcon.py +931 -0
- sf_veritas/patches/web_frameworks/fastapi.py +738 -0
- sf_veritas/patches/web_frameworks/flask.py +526 -0
- sf_veritas/patches/web_frameworks/klein.py +501 -0
- sf_veritas/patches/web_frameworks/litestar.py +616 -0
- sf_veritas/patches/web_frameworks/pyramid.py +440 -0
- sf_veritas/patches/web_frameworks/quart.py +841 -0
- sf_veritas/patches/web_frameworks/robyn.py +708 -0
- sf_veritas/patches/web_frameworks/sanic.py +874 -0
- sf_veritas/patches/web_frameworks/starlette.py +742 -0
- sf_veritas/patches/web_frameworks/strawberry.py +1446 -0
- sf_veritas/patches/web_frameworks/tornado.py +485 -0
- sf_veritas/patches/web_frameworks/utils.py +170 -0
- sf_veritas/print_override.py +13 -0
- sf_veritas/regular_data_transmitter.py +444 -0
- sf_veritas/request_interceptor.py +401 -0
- sf_veritas/request_utils.py +550 -0
- sf_veritas/segfault_handler.py +116 -0
- sf_veritas/server_status.py +1 -0
- sf_veritas/shutdown_flag.py +11 -0
- sf_veritas/subprocess_startup.py +3 -0
- sf_veritas/test_cli.py +145 -0
- sf_veritas/thread_local.py +1319 -0
- sf_veritas/timeutil.py +114 -0
- sf_veritas/transmit_exception_to_sailfish.py +28 -0
- sf_veritas/transmitter.py +132 -0
- sf_veritas/types.py +47 -0
- sf_veritas/unified_interceptor.py +1678 -0
- sf_veritas/utils.py +39 -0
- sf_veritas-0.11.10.dist-info/METADATA +97 -0
- sf_veritas-0.11.10.dist-info/RECORD +141 -0
- sf_veritas-0.11.10.dist-info/WHEEL +5 -0
- sf_veritas-0.11.10.dist-info/entry_points.txt +2 -0
- sf_veritas-0.11.10.dist-info/top_level.txt +1 -0
- sf_veritas.libs/libbrotlicommon-6ce2a53c.so.1.0.6 +0 -0
- sf_veritas.libs/libbrotlidec-811d1be3.so.1.0.6 +0 -0
- sf_veritas.libs/libcom_err-730ca923.so.2.1 +0 -0
- sf_veritas.libs/libcrypt-52aca757.so.1.1.0 +0 -0
- sf_veritas.libs/libcrypto-bdaed0ea.so.1.1.1k +0 -0
- sf_veritas.libs/libcurl-eaa3cf66.so.4.5.0 +0 -0
- sf_veritas.libs/libgssapi_krb5-323bbd21.so.2.2 +0 -0
- sf_veritas.libs/libidn2-2f4a5893.so.0.3.6 +0 -0
- sf_veritas.libs/libk5crypto-9a74ff38.so.3.1 +0 -0
- sf_veritas.libs/libkeyutils-2777d33d.so.1.6 +0 -0
- sf_veritas.libs/libkrb5-a55300e8.so.3.3 +0 -0
- sf_veritas.libs/libkrb5support-e6594cfc.so.0.1 +0 -0
- sf_veritas.libs/liblber-2-d20824ef.4.so.2.10.9 +0 -0
- sf_veritas.libs/libldap-2-cea2a960.4.so.2.10.9 +0 -0
- sf_veritas.libs/libnghttp2-39367a22.so.14.17.0 +0 -0
- sf_veritas.libs/libpcre2-8-516f4c9d.so.0.7.1 +0 -0
- sf_veritas.libs/libpsl-99becdd3.so.5.3.1 +0 -0
- sf_veritas.libs/libsasl2-7de4d792.so.3.0.0 +0 -0
- sf_veritas.libs/libselinux-d0805dcb.so.1 +0 -0
- sf_veritas.libs/libssh-c11d285b.so.4.8.7 +0 -0
- sf_veritas.libs/libssl-60250281.so.1.1.1k +0 -0
- sf_veritas.libs/libunistring-05abdd40.so.2.1.0 +0 -0
- sf_veritas.libs/libuuid-95b83d40.so.1.3.0 +0 -0
|
@@ -0,0 +1,1678 @@
|
|
|
1
|
+
import atexit
|
|
2
|
+
import builtins
|
|
3
|
+
import functools
|
|
4
|
+
import inspect
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
import signal
|
|
8
|
+
import sys
|
|
9
|
+
import threading
|
|
10
|
+
import traceback
|
|
11
|
+
from types import ModuleType
|
|
12
|
+
from typing import Dict, List, Optional, Union
|
|
13
|
+
|
|
14
|
+
from pydantic import validate_call
|
|
15
|
+
|
|
16
|
+
from . import app_config
|
|
17
|
+
from .custom_excepthook import (
|
|
18
|
+
custom_excepthook,
|
|
19
|
+
custom_thread_excepthook,
|
|
20
|
+
start_profiling,
|
|
21
|
+
)
|
|
22
|
+
from .custom_log_handler import CustomLogHandler
|
|
23
|
+
from .env_vars import (
|
|
24
|
+
LOG_LEVEL,
|
|
25
|
+
PRINT_CONFIGURATION_STATUSES,
|
|
26
|
+
SF_DEBUG,
|
|
27
|
+
SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES,
|
|
28
|
+
SF_DISABLE_PARENT_DEATH_SIGNAL,
|
|
29
|
+
SF_PARENT_MONITOR_INTERVAL_MS,
|
|
30
|
+
)
|
|
31
|
+
from .exception_metaclass import PatchedException
|
|
32
|
+
from .interceptors import PrintInterceptor
|
|
33
|
+
from .local_env_detect import set_sf_is_local_flag
|
|
34
|
+
from .patches.network_libraries import patch_all_http_clients
|
|
35
|
+
|
|
36
|
+
# from .patches.threading import patch_threading
|
|
37
|
+
from .patches.web_frameworks import patch_web_frameworks
|
|
38
|
+
from .shutdown_flag import set_shutdown_flag
|
|
39
|
+
from .thread_local import (
|
|
40
|
+
_thread_locals,
|
|
41
|
+
get_current_function_span_id,
|
|
42
|
+
get_or_set_sf_trace_id,
|
|
43
|
+
get_reentrancy_guard_sys_stdout_active,
|
|
44
|
+
)
|
|
45
|
+
from .timeutil import TimeSync
|
|
46
|
+
|
|
47
|
+
# Optional native fast path for prints (C extension)
|
|
48
|
+
try:
|
|
49
|
+
from . import _sffastlog # provides init_print() and print_()
|
|
50
|
+
|
|
51
|
+
_FAST_OK = True
|
|
52
|
+
except Exception:
|
|
53
|
+
_sffastlog = None
|
|
54
|
+
_FAST_OK = False
|
|
55
|
+
|
|
56
|
+
_FAST_PRINT_READY = False # one-time guard for native print init
|
|
57
|
+
|
|
58
|
+
# Optional native fast path for service operations (C extension)
|
|
59
|
+
try:
|
|
60
|
+
from . import _sfservice # provides service_identifier(), collect_metadata(), etc.
|
|
61
|
+
|
|
62
|
+
_SFSERVICE_OK = True
|
|
63
|
+
except Exception:
|
|
64
|
+
_sfservice = None
|
|
65
|
+
_SFSERVICE_OK = False
|
|
66
|
+
|
|
67
|
+
_SFSERVICE_READY = False # one-time guard for native service init
|
|
68
|
+
|
|
69
|
+
# Optional native fast path for function spans (C extension)
|
|
70
|
+
try:
|
|
71
|
+
import sf_veritas._sffuncspan as _sffuncspan
|
|
72
|
+
|
|
73
|
+
from .function_span_profiler import init_function_span_profiler
|
|
74
|
+
|
|
75
|
+
_FUNCSPAN_OK = True
|
|
76
|
+
except Exception as import_error:
|
|
77
|
+
_sffuncspan = None
|
|
78
|
+
_FUNCSPAN_OK = False
|
|
79
|
+
if os.getenv("SF_DEBUG", "false").lower() == "true":
|
|
80
|
+
import traceback
|
|
81
|
+
|
|
82
|
+
print(
|
|
83
|
+
f"[[DEBUG]] Failed to import _sffuncspan C extension: {import_error}",
|
|
84
|
+
file=sys.stderr,
|
|
85
|
+
)
|
|
86
|
+
traceback.print_exc(file=sys.stderr)
|
|
87
|
+
|
|
88
|
+
_FUNCSPAN_READY = False # one-time guard for native funcspan init
|
|
89
|
+
_FUNCSPAN_PROFILER = None # global profiler instance
|
|
90
|
+
|
|
91
|
+
# GraphQL mutation string for prints — keep schema identical to server
|
|
92
|
+
_COLLECT_PRINT_MUTATION = (
|
|
93
|
+
"mutation CollectPrintStatements("
|
|
94
|
+
"$apiKey: String!,"
|
|
95
|
+
"$serviceUuid: String!,"
|
|
96
|
+
"$sessionId: String!,"
|
|
97
|
+
"$contents: String!,"
|
|
98
|
+
"$reentrancyGuardPreactive: Boolean!,"
|
|
99
|
+
"$library: String!,"
|
|
100
|
+
"$timestampMs: String!,"
|
|
101
|
+
"$version: String!"
|
|
102
|
+
"){collectPrintStatements("
|
|
103
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,sessionId:$sessionId,"
|
|
104
|
+
"contents:$contents,reentrancyGuardPreactive:$reentrancyGuardPreactive,"
|
|
105
|
+
"library:$library,timestampMs:$timestampMs,version:$version)}"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# GraphQL mutation string for function spans
|
|
109
|
+
_COLLECT_FUNCTION_SPAN_MUTATION = (
|
|
110
|
+
"mutation CollectFunctionSpans("
|
|
111
|
+
"$apiKey: String!,"
|
|
112
|
+
"$serviceUuid: String!,"
|
|
113
|
+
"$library: String!,"
|
|
114
|
+
"$version: String!,"
|
|
115
|
+
"$sessionId: String!,"
|
|
116
|
+
"$spanId: String!,"
|
|
117
|
+
"$parentSpanId: String,"
|
|
118
|
+
"$filePath: String!,"
|
|
119
|
+
"$lineNumber: Int!,"
|
|
120
|
+
"$columnNumber: Int!,"
|
|
121
|
+
"$functionName: String!,"
|
|
122
|
+
"$arguments: String!,"
|
|
123
|
+
"$returnValue: String,"
|
|
124
|
+
"$startTimeNs: String!,"
|
|
125
|
+
"$durationNs: String!,"
|
|
126
|
+
"$timestampMs: String!"
|
|
127
|
+
"){collectFunctionSpans("
|
|
128
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
129
|
+
"sessionId:$sessionId,spanId:$spanId,parentSpanId:$parentSpanId,"
|
|
130
|
+
"filePath:$filePath,lineNumber:$lineNumber,columnNumber:$columnNumber,"
|
|
131
|
+
"functionName:$functionName,arguments:$arguments,returnValue:$returnValue,"
|
|
132
|
+
"startTimeNs:$startTimeNs,durationNs:$durationNs,timestampMs:$timestampMs)}"
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# GraphQL mutation string for service identification
|
|
136
|
+
_IDENTIFY_SERVICE_DETAILS_MUTATION = (
|
|
137
|
+
"mutation IdentifyServiceDetails("
|
|
138
|
+
"$apiKey: String!,"
|
|
139
|
+
"$serviceUuid: String!,"
|
|
140
|
+
"$library: String!,"
|
|
141
|
+
"$version: String!,"
|
|
142
|
+
"$serviceIdentifier: String!,"
|
|
143
|
+
"$serviceVersion: String!,"
|
|
144
|
+
"$serviceDisplayName: String,"
|
|
145
|
+
"$serviceAdditionalMetadata: JSON,"
|
|
146
|
+
"$gitSha: String!,"
|
|
147
|
+
"$infrastructureType: String!,"
|
|
148
|
+
"$infrastructureDetails: JSON,"
|
|
149
|
+
"$setupInterceptorsFilePath: String!,"
|
|
150
|
+
"$setupInterceptorsLineNumber: Int!,"
|
|
151
|
+
"$timestampMs: String!"
|
|
152
|
+
"){identifyServiceDetails("
|
|
153
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
154
|
+
"serviceIdentifier:$serviceIdentifier,serviceVersion:$serviceVersion,"
|
|
155
|
+
"serviceDisplayName:$serviceDisplayName,"
|
|
156
|
+
"serviceAdditionalMetadata:$serviceAdditionalMetadata,gitSha:$gitSha,"
|
|
157
|
+
"infrastructureType:$infrastructureType,infrastructureDetails:$infrastructureDetails,"
|
|
158
|
+
"setupInterceptorsFilePath:$setupInterceptorsFilePath,"
|
|
159
|
+
"setupInterceptorsLineNumber:$setupInterceptorsLineNumber,timestampMs:$timestampMs)}"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# GraphQL mutation string for update service details
|
|
163
|
+
_UPDATE_SERVICE_DETAILS_MUTATION = (
|
|
164
|
+
"mutation UpdateServiceDetails("
|
|
165
|
+
"$apiKey: String!,"
|
|
166
|
+
"$serviceUuid: String!,"
|
|
167
|
+
"$timestampMs: String!,"
|
|
168
|
+
"$serviceIdentifier: String,"
|
|
169
|
+
"$serviceVersion: String,"
|
|
170
|
+
"$serviceAdditionalMetadata: JSON,"
|
|
171
|
+
"$gitSha: String"
|
|
172
|
+
"){updateServiceDetails("
|
|
173
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,timestampMs:$timestampMs,"
|
|
174
|
+
"serviceIdentifier:$serviceIdentifier,serviceVersion:$serviceVersion,"
|
|
175
|
+
"serviceAdditionalMetadata:$serviceAdditionalMetadata,gitSha:$gitSha)}"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# GraphQL mutation string for collect metadata
|
|
179
|
+
_COLLECT_METADATA_MUTATION = (
|
|
180
|
+
"mutation CollectMetadata("
|
|
181
|
+
"$apiKey: String!,"
|
|
182
|
+
"$serviceUuid: String!,"
|
|
183
|
+
"$library: String!,"
|
|
184
|
+
"$version: String!,"
|
|
185
|
+
"$sessionId: String!,"
|
|
186
|
+
"$userId: String!,"
|
|
187
|
+
"$traitsJson: String!,"
|
|
188
|
+
"$excludedFields: [String!]!,"
|
|
189
|
+
"$override: Boolean!,"
|
|
190
|
+
"$timestampMs: String!"
|
|
191
|
+
"){collectMetadata("
|
|
192
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
193
|
+
"sessionId:$sessionId,userId:$userId,traitsJson:$traitsJson,"
|
|
194
|
+
"excludedFields:$excludedFields,override:$override,timestampMs:$timestampMs)}"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# GraphQL mutation string for domains to not pass header to
|
|
198
|
+
_DOMAINS_TO_NOT_PASS_HEADER_TO_MUTATION = (
|
|
199
|
+
"mutation DomainsToNotPassHeaderTo("
|
|
200
|
+
"$apiKey: String!,"
|
|
201
|
+
"$serviceUuid: String!,"
|
|
202
|
+
"$library: String!,"
|
|
203
|
+
"$version: String!,"
|
|
204
|
+
"$domains: [String!]!,"
|
|
205
|
+
"$timestampMs: String!"
|
|
206
|
+
"){domainsToNotPassHeaderTo("
|
|
207
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
208
|
+
"domains:$domains,timestampMs:$timestampMs)}"
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
STRINGS_NOT_FOUND_IN_CALLER_LOCATIONS = {
|
|
212
|
+
"site-packages",
|
|
213
|
+
"dist-packages",
|
|
214
|
+
"venv",
|
|
215
|
+
"/lib/python",
|
|
216
|
+
"\\lib\\python",
|
|
217
|
+
"sf-veritas",
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _ensure_fast_print_initialized() -> bool:
|
|
222
|
+
"""
|
|
223
|
+
Lazily init the native print path; becomes a cheap bool check after first success.
|
|
224
|
+
"""
|
|
225
|
+
global _FAST_PRINT_READY
|
|
226
|
+
if not _FAST_OK or _FAST_PRINT_READY:
|
|
227
|
+
return _FAST_PRINT_READY
|
|
228
|
+
|
|
229
|
+
endpoint = getattr(app_config, "_sailfish_graphql_endpoint", None)
|
|
230
|
+
api_key = getattr(app_config, "_sailfish_api_key", None)
|
|
231
|
+
service_uuid = getattr(app_config, "_service_uuid", None)
|
|
232
|
+
library = getattr(app_config, "library", "sailfish-python")
|
|
233
|
+
version = getattr(app_config, "version", "0.0.0")
|
|
234
|
+
http2 = 1 if os.getenv("SF_NBPOST_HTTP2", "0") == "1" else 0
|
|
235
|
+
|
|
236
|
+
if not (endpoint and api_key and service_uuid):
|
|
237
|
+
return False
|
|
238
|
+
|
|
239
|
+
try:
|
|
240
|
+
ok = _sffastlog.init_print(
|
|
241
|
+
url=endpoint,
|
|
242
|
+
query=_COLLECT_PRINT_MUTATION,
|
|
243
|
+
api_key=str(api_key),
|
|
244
|
+
service_uuid=str(service_uuid),
|
|
245
|
+
library=str(library),
|
|
246
|
+
version=str(version),
|
|
247
|
+
http2=http2,
|
|
248
|
+
)
|
|
249
|
+
_FAST_PRINT_READY = bool(ok)
|
|
250
|
+
except Exception:
|
|
251
|
+
_FAST_PRINT_READY = False
|
|
252
|
+
|
|
253
|
+
return _FAST_PRINT_READY
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def _ensure_funcspan_initialized() -> bool:
|
|
257
|
+
"""
|
|
258
|
+
Lazily init the native function span path; becomes a cheap bool check after first success.
|
|
259
|
+
"""
|
|
260
|
+
global _FUNCSPAN_READY, _FUNCSPAN_PROFILER
|
|
261
|
+
|
|
262
|
+
# PERFORMANCE: Skip function span profiler when testing network library only
|
|
263
|
+
if os.getenv("TESTING_NETWORK_LIBRARY_ONLY", "0") == "1":
|
|
264
|
+
if SF_DEBUG:
|
|
265
|
+
print(
|
|
266
|
+
"[[DEBUG]] Function span profiler: Disabled (TESTING_NETWORK_LIBRARY_ONLY=1)",
|
|
267
|
+
log=False,
|
|
268
|
+
)
|
|
269
|
+
return False
|
|
270
|
+
|
|
271
|
+
if not _FUNCSPAN_OK:
|
|
272
|
+
if SF_DEBUG:
|
|
273
|
+
print(
|
|
274
|
+
"[[DEBUG]] Function span profiler: C extension not available (_FUNCSPAN_OK=False)",
|
|
275
|
+
log=False,
|
|
276
|
+
)
|
|
277
|
+
return False
|
|
278
|
+
|
|
279
|
+
if _FUNCSPAN_READY:
|
|
280
|
+
return _FUNCSPAN_READY
|
|
281
|
+
|
|
282
|
+
endpoint = getattr(app_config, "_sailfish_graphql_endpoint", None)
|
|
283
|
+
api_key = getattr(app_config, "_sailfish_api_key", None)
|
|
284
|
+
service_uuid = getattr(app_config, "_service_uuid", None)
|
|
285
|
+
library = getattr(app_config, "library", "sailfish-python")
|
|
286
|
+
version = getattr(app_config, "version", "0.0.0")
|
|
287
|
+
http2 = 1 if os.getenv("SF_NBPOST_HTTP2", "0") == "1" else 0
|
|
288
|
+
|
|
289
|
+
# Get function span config from env vars
|
|
290
|
+
enable_profiler = os.getenv("SF_ENABLE_PROFILER", "false").lower() == "true"
|
|
291
|
+
if SF_DEBUG:
|
|
292
|
+
print(
|
|
293
|
+
f"[[DEBUG]] Function span profiler: SF_ENABLE_PROFILER={os.getenv('SF_ENABLE_PROFILER')} -> enable_profiler={enable_profiler}",
|
|
294
|
+
log=False,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
if not enable_profiler:
|
|
298
|
+
if SF_DEBUG:
|
|
299
|
+
print(
|
|
300
|
+
"[[DEBUG]] Function span profiler: Disabled (SF_ENABLE_PROFILER not 'true')",
|
|
301
|
+
log=False,
|
|
302
|
+
)
|
|
303
|
+
return False
|
|
304
|
+
|
|
305
|
+
if not (endpoint and api_key and service_uuid):
|
|
306
|
+
if SF_DEBUG:
|
|
307
|
+
print(
|
|
308
|
+
f"[[DEBUG]] Function span profiler: Missing config (endpoint={bool(endpoint)}, api_key={bool(api_key)}, service_uuid={bool(service_uuid)})",
|
|
309
|
+
log=False,
|
|
310
|
+
)
|
|
311
|
+
return False
|
|
312
|
+
|
|
313
|
+
# Configuration options
|
|
314
|
+
variable_capture_size_limit_mb = int(os.getenv("SF_FUNCSPAN_VAR_LIMIT_MB", "1"))
|
|
315
|
+
capture_from_installed_libraries = (
|
|
316
|
+
os.getenv("SF_FUNCSPAN_CAPTURE_LIBRARIES", "").split(",")
|
|
317
|
+
if os.getenv("SF_FUNCSPAN_CAPTURE_LIBRARIES")
|
|
318
|
+
else []
|
|
319
|
+
)
|
|
320
|
+
sample_rate = float(os.getenv("SF_FUNCSPAN_SAMPLE_RATE", "1.0"))
|
|
321
|
+
enable_sampling = (
|
|
322
|
+
os.getenv("SF_FUNCSPAN_ENABLE_SAMPLING", "false").lower() == "true"
|
|
323
|
+
)
|
|
324
|
+
include_django_view_functions = (
|
|
325
|
+
os.getenv("SF_FUNCSPAN_INCLUDE_DJANGO_VIEW_FUNCTIONS", "false").lower()
|
|
326
|
+
== "true"
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
try:
|
|
330
|
+
_FUNCSPAN_PROFILER = init_function_span_profiler(
|
|
331
|
+
url=endpoint,
|
|
332
|
+
query=_COLLECT_FUNCTION_SPAN_MUTATION,
|
|
333
|
+
api_key=str(api_key),
|
|
334
|
+
service_uuid=str(service_uuid),
|
|
335
|
+
library=str(library),
|
|
336
|
+
version=str(version),
|
|
337
|
+
http2=(http2 == 1),
|
|
338
|
+
variable_capture_size_limit_mb=variable_capture_size_limit_mb,
|
|
339
|
+
capture_from_installed_libraries=capture_from_installed_libraries,
|
|
340
|
+
sample_rate=sample_rate,
|
|
341
|
+
enable_sampling=enable_sampling,
|
|
342
|
+
include_django_view_functions=include_django_view_functions,
|
|
343
|
+
auto_start=True,
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
# Load .sailfish configuration files (directory/file/function-level configs)
|
|
347
|
+
try:
|
|
348
|
+
from .funcspan_config_loader import FunctionSpanConfigLoader
|
|
349
|
+
|
|
350
|
+
# Get the directory where setup_interceptors() was called from
|
|
351
|
+
setup_dir = getattr(app_config, "_setup_interceptors_call_filename", None)
|
|
352
|
+
root_paths = []
|
|
353
|
+
|
|
354
|
+
if setup_dir:
|
|
355
|
+
# Use the directory containing the file that called setup_interceptors()
|
|
356
|
+
root_paths.append(os.path.dirname(os.path.abspath(setup_dir)))
|
|
357
|
+
|
|
358
|
+
# Also add current working directory
|
|
359
|
+
root_paths.append(os.getcwd())
|
|
360
|
+
|
|
361
|
+
if SF_DEBUG:
|
|
362
|
+
print(
|
|
363
|
+
f"[[DEBUG]] Loading .sailfish configs from: {root_paths}",
|
|
364
|
+
log=False,
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
config_loader = FunctionSpanConfigLoader(root_paths)
|
|
368
|
+
config_loader.load_all_configs()
|
|
369
|
+
|
|
370
|
+
if SF_DEBUG:
|
|
371
|
+
print(
|
|
372
|
+
"[[DEBUG]] Function span config loader initialized successfully",
|
|
373
|
+
log=False,
|
|
374
|
+
)
|
|
375
|
+
except Exception as config_error:
|
|
376
|
+
if SF_DEBUG:
|
|
377
|
+
print(
|
|
378
|
+
f"[[DEBUG]] Failed to load .sailfish configs (non-fatal): {config_error}",
|
|
379
|
+
log=False,
|
|
380
|
+
)
|
|
381
|
+
# Config loading is optional - don't fail if it doesn't work
|
|
382
|
+
|
|
383
|
+
_FUNCSPAN_READY = True
|
|
384
|
+
|
|
385
|
+
# Set master kill switch from SF_ENABLE_FUNCTION_SPANS (defaults to "true")
|
|
386
|
+
sf_enable_env = os.getenv("SF_ENABLE_FUNCTION_SPANS", "true")
|
|
387
|
+
enable_function_spans = sf_enable_env.lower() == "true"
|
|
388
|
+
|
|
389
|
+
print(
|
|
390
|
+
f"[FuncSpanDebug] SF_ENABLE_FUNCTION_SPANS = '{sf_enable_env}' -> enabled={enable_function_spans}",
|
|
391
|
+
log=False,
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
_sffuncspan.set_function_spans_enabled(enable_function_spans)
|
|
395
|
+
|
|
396
|
+
if not enable_function_spans:
|
|
397
|
+
print(
|
|
398
|
+
"[FuncSpanDebug] WARNING: Function span profiling is DISABLED by SF_ENABLE_FUNCTION_SPANS",
|
|
399
|
+
log=False,
|
|
400
|
+
)
|
|
401
|
+
print(
|
|
402
|
+
"[FuncSpanDebug] This means parent_span_id will ALWAYS be null!",
|
|
403
|
+
log=False,
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
if SF_DEBUG:
|
|
407
|
+
print(
|
|
408
|
+
f"[[DEBUG]] Function span capture: SF_ENABLE_FUNCTION_SPANS={os.getenv('SF_ENABLE_FUNCTION_SPANS', 'true')} -> enabled={enable_function_spans}",
|
|
409
|
+
log=False,
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
except Exception as e:
|
|
413
|
+
if SF_DEBUG:
|
|
414
|
+
print(
|
|
415
|
+
f"[[DEBUG]] Failed to initialize function span profiler: {e}", log=False
|
|
416
|
+
)
|
|
417
|
+
_FUNCSPAN_READY = False
|
|
418
|
+
|
|
419
|
+
return _FUNCSPAN_READY
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
def _ensure_service_initialized() -> bool:
|
|
423
|
+
"""
|
|
424
|
+
Lazily init the native service operations path (service_identifier, collect_metadata);
|
|
425
|
+
becomes a cheap bool check after first success.
|
|
426
|
+
"""
|
|
427
|
+
global _SFSERVICE_READY
|
|
428
|
+
if not _SFSERVICE_OK or _SFSERVICE_READY:
|
|
429
|
+
return _SFSERVICE_READY
|
|
430
|
+
|
|
431
|
+
endpoint = getattr(app_config, "_sailfish_graphql_endpoint", None)
|
|
432
|
+
api_key = getattr(app_config, "_sailfish_api_key", None)
|
|
433
|
+
service_uuid = getattr(app_config, "_service_uuid", None)
|
|
434
|
+
library = getattr(app_config, "library", "sailfish-python")
|
|
435
|
+
version = getattr(app_config, "version", "0.0.0")
|
|
436
|
+
http2 = 1 if os.getenv("SF_NBPOST_HTTP2", "0") == "1" else 0
|
|
437
|
+
|
|
438
|
+
if not (endpoint and api_key and service_uuid):
|
|
439
|
+
return False
|
|
440
|
+
|
|
441
|
+
try:
|
|
442
|
+
# Initialize the main service module (starts sender thread)
|
|
443
|
+
ok = _sfservice.init(
|
|
444
|
+
url=endpoint,
|
|
445
|
+
query="", # Not used for init, only for channel-specific inits
|
|
446
|
+
api_key=str(api_key),
|
|
447
|
+
service_uuid=str(service_uuid),
|
|
448
|
+
library=str(library),
|
|
449
|
+
version=str(version),
|
|
450
|
+
http2=http2,
|
|
451
|
+
)
|
|
452
|
+
if not ok:
|
|
453
|
+
_SFSERVICE_READY = False
|
|
454
|
+
return False
|
|
455
|
+
|
|
456
|
+
# Initialize service identifier channel
|
|
457
|
+
ok = _sfservice.init_service_identifier(
|
|
458
|
+
url=endpoint,
|
|
459
|
+
query=_IDENTIFY_SERVICE_DETAILS_MUTATION,
|
|
460
|
+
api_key=str(api_key),
|
|
461
|
+
service_uuid=str(service_uuid),
|
|
462
|
+
library=str(library),
|
|
463
|
+
version=str(version),
|
|
464
|
+
http2=http2,
|
|
465
|
+
)
|
|
466
|
+
if not ok:
|
|
467
|
+
_SFSERVICE_READY = False
|
|
468
|
+
return False
|
|
469
|
+
|
|
470
|
+
# Initialize collect metadata channel
|
|
471
|
+
ok = _sfservice.init_collect_metadata(
|
|
472
|
+
url=endpoint,
|
|
473
|
+
query=_COLLECT_METADATA_MUTATION,
|
|
474
|
+
api_key=str(api_key),
|
|
475
|
+
service_uuid=str(service_uuid),
|
|
476
|
+
library=str(library),
|
|
477
|
+
version=str(version),
|
|
478
|
+
http2=http2,
|
|
479
|
+
)
|
|
480
|
+
if not ok:
|
|
481
|
+
_SFSERVICE_READY = False
|
|
482
|
+
return False
|
|
483
|
+
|
|
484
|
+
# Initialize domains channel
|
|
485
|
+
ok = _sfservice.init_domains(
|
|
486
|
+
url=endpoint,
|
|
487
|
+
query=_DOMAINS_TO_NOT_PASS_HEADER_TO_MUTATION,
|
|
488
|
+
api_key=str(api_key),
|
|
489
|
+
service_uuid=str(service_uuid),
|
|
490
|
+
library=str(library),
|
|
491
|
+
version=str(version),
|
|
492
|
+
http2=http2,
|
|
493
|
+
)
|
|
494
|
+
if not ok:
|
|
495
|
+
_SFSERVICE_READY = False
|
|
496
|
+
return False
|
|
497
|
+
|
|
498
|
+
# NOTE: update_service channel is NOT initialized here because the C implementation
|
|
499
|
+
# is currently incompatible with the GraphQL schema. It will use Python fallback.
|
|
500
|
+
# The C extension has py_update_service() which takes domains[], but the actual
|
|
501
|
+
# GraphQL mutation expects service_identifier, service_version, etc.
|
|
502
|
+
# TODO: Reimplement build_body_update_service() in C to match the schema.
|
|
503
|
+
|
|
504
|
+
_SFSERVICE_READY = True
|
|
505
|
+
except Exception as e:
|
|
506
|
+
if SF_DEBUG:
|
|
507
|
+
print(f"[[DEBUG]] Failed to initialize _sfservice: {e}", log=False)
|
|
508
|
+
_SFSERVICE_READY = False
|
|
509
|
+
|
|
510
|
+
return _SFSERVICE_READY
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
def _shutdown_all_c_extensions():
|
|
514
|
+
"""
|
|
515
|
+
Shutdown all C extensions in the correct order.
|
|
516
|
+
This function is called on application exit to ensure clean shutdown
|
|
517
|
+
of background threads and prevent exit code 137 (SIGKILL).
|
|
518
|
+
|
|
519
|
+
Order matters: shutdown dependencies first, then core extensions.
|
|
520
|
+
"""
|
|
521
|
+
# Use sys.stderr.write() for debugging because print() might not work during shutdown
|
|
522
|
+
import sys
|
|
523
|
+
|
|
524
|
+
# Check if shutdown was already called (by signal handler)
|
|
525
|
+
global _shutdown_handler_called
|
|
526
|
+
if _shutdown_handler_called:
|
|
527
|
+
return
|
|
528
|
+
|
|
529
|
+
_shutdown_handler_called = True
|
|
530
|
+
|
|
531
|
+
# First, set the global shutdown flag (for any Python code that checks it)
|
|
532
|
+
set_shutdown_flag()
|
|
533
|
+
|
|
534
|
+
# Shutdown function span profiler first (depends on funcspan C extension)
|
|
535
|
+
global _FUNCSPAN_PROFILER
|
|
536
|
+
if _FUNCSPAN_PROFILER is not None:
|
|
537
|
+
try:
|
|
538
|
+
_FUNCSPAN_PROFILER.stop()
|
|
539
|
+
_FUNCSPAN_PROFILER = None
|
|
540
|
+
except Exception as e:
|
|
541
|
+
pass
|
|
542
|
+
|
|
543
|
+
# Shutdown function span config C extension
|
|
544
|
+
try:
|
|
545
|
+
from . import _sffuncspan_config
|
|
546
|
+
|
|
547
|
+
_sffuncspan_config.shutdown()
|
|
548
|
+
except Exception as e:
|
|
549
|
+
if SF_DEBUG:
|
|
550
|
+
sys.stderr.write(f"[SHUTDOWN] _sffuncspan_config.shutdown() failed: {e}\n")
|
|
551
|
+
sys.stderr.flush()
|
|
552
|
+
|
|
553
|
+
# Shutdown function span C extension
|
|
554
|
+
try:
|
|
555
|
+
if _FUNCSPAN_OK and _sffuncspan:
|
|
556
|
+
_sffuncspan.shutdown()
|
|
557
|
+
except Exception as e:
|
|
558
|
+
if SF_DEBUG:
|
|
559
|
+
sys.stderr.write(f"[SHUTDOWN] _sffuncspan.shutdown() failed: {e}\n")
|
|
560
|
+
sys.stderr.flush()
|
|
561
|
+
|
|
562
|
+
# Shutdown network hop C extension
|
|
563
|
+
try:
|
|
564
|
+
from . import fast_network_hop
|
|
565
|
+
|
|
566
|
+
if fast_network_hop._NETWORKHOP_FAST_OK and fast_network_hop._sfnetworkhop:
|
|
567
|
+
fast_network_hop._sfnetworkhop.shutdown()
|
|
568
|
+
except Exception as e:
|
|
569
|
+
if SF_DEBUG:
|
|
570
|
+
sys.stderr.write(f"[SHUTDOWN] _sfnetworkhop.shutdown() failed: {e}\n")
|
|
571
|
+
sys.stderr.flush()
|
|
572
|
+
|
|
573
|
+
# Shutdown network request C extension
|
|
574
|
+
try:
|
|
575
|
+
from .patches.network_libraries import utils as net_utils
|
|
576
|
+
|
|
577
|
+
if net_utils._FAST_NETWORKREQUEST_AVAILABLE and net_utils._sffastnetworkrequest:
|
|
578
|
+
net_utils._sffastnetworkrequest.shutdown()
|
|
579
|
+
except Exception as e:
|
|
580
|
+
if SF_DEBUG:
|
|
581
|
+
sys.stderr.write(
|
|
582
|
+
f"[SHUTDOWN] _sffastnetworkrequest.shutdown() failed: {e}\n"
|
|
583
|
+
)
|
|
584
|
+
sys.stderr.flush()
|
|
585
|
+
|
|
586
|
+
# Shutdown service operations C extension
|
|
587
|
+
try:
|
|
588
|
+
if _SFSERVICE_OK and _sfservice:
|
|
589
|
+
_sfservice.shutdown()
|
|
590
|
+
except Exception as e:
|
|
591
|
+
if SF_DEBUG:
|
|
592
|
+
sys.stderr.write(f"[SHUTDOWN] _sfservice.shutdown() failed: {e}\n")
|
|
593
|
+
sys.stderr.flush()
|
|
594
|
+
|
|
595
|
+
# Shutdown fast log C extension (core - shutdown last)
|
|
596
|
+
try:
|
|
597
|
+
if _FAST_OK and _sffastlog:
|
|
598
|
+
_sffastlog.shutdown()
|
|
599
|
+
except Exception as e:
|
|
600
|
+
if SF_DEBUG:
|
|
601
|
+
sys.stderr.write(f"[SHUTDOWN] _sffastlog.shutdown() failed: {e}\n")
|
|
602
|
+
sys.stderr.flush()
|
|
603
|
+
|
|
604
|
+
if SF_DEBUG:
|
|
605
|
+
sys.stderr.write(
|
|
606
|
+
f"[SHUTDOWN] _shutdown_all_c_extensions() completed in PID {os.getpid()}\n"
|
|
607
|
+
)
|
|
608
|
+
sys.stderr.flush()
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
# Global flag to prevent calling shutdown handler multiple times
|
|
612
|
+
_shutdown_handler_called = False
|
|
613
|
+
|
|
614
|
+
# Store the original signal.signal function for monkey-patching
|
|
615
|
+
_original_signal_signal = None
|
|
616
|
+
|
|
617
|
+
# Track handlers that have already been wrapped to avoid double-wrapping
|
|
618
|
+
_wrapped_handlers = {} # {signum: wrapped_handler}
|
|
619
|
+
|
|
620
|
+
# Track if we've already run shutdown for this signal
|
|
621
|
+
_shutdown_by_signal = {} # {signum: bool}
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
def _patched_signal_signal(signum, handler):
|
|
625
|
+
"""
|
|
626
|
+
Monkey-patched version of signal.signal() that intercepts SIGTERM/SIGINT
|
|
627
|
+
registrations and chains our C extension cleanup before the application's handler.
|
|
628
|
+
|
|
629
|
+
This ensures our cleanup runs first, regardless of when frameworks
|
|
630
|
+
(Django, Celery, Uvicorn, etc.) install their signal handlers.
|
|
631
|
+
"""
|
|
632
|
+
global _wrapped_handlers, _shutdown_by_signal
|
|
633
|
+
|
|
634
|
+
# Only intercept SIGTERM and SIGINT
|
|
635
|
+
if signum not in (signal.SIGTERM, signal.SIGINT):
|
|
636
|
+
return _original_signal_signal(signum, handler)
|
|
637
|
+
|
|
638
|
+
# ALWAYS log interception (not just SF_DEBUG) for debugging 137 issues
|
|
639
|
+
sys.stderr.write(
|
|
640
|
+
f"[SIGNAL_PATCH] Intercepted signal.signal({signum}, {handler}) in PID {os.getpid()}\n"
|
|
641
|
+
)
|
|
642
|
+
sys.stderr.flush()
|
|
643
|
+
|
|
644
|
+
# Check if this handler is already one we wrapped (avoid double-wrapping)
|
|
645
|
+
if handler in _wrapped_handlers.values():
|
|
646
|
+
if SF_DEBUG:
|
|
647
|
+
sys.stderr.write(
|
|
648
|
+
f"[SIGNAL_PATCH] Handler already wrapped, passing through\n"
|
|
649
|
+
)
|
|
650
|
+
sys.stderr.flush()
|
|
651
|
+
return _original_signal_signal(signum, handler)
|
|
652
|
+
|
|
653
|
+
# Handle special cases
|
|
654
|
+
if handler == signal.SIG_IGN:
|
|
655
|
+
# They want to ignore the signal - respect that but still cleanup
|
|
656
|
+
def wrapped_ignore(sig, frame):
|
|
657
|
+
if not _shutdown_by_signal.get(sig, False):
|
|
658
|
+
_shutdown_by_signal[sig] = True
|
|
659
|
+
sys.stderr.write(
|
|
660
|
+
f"[SIGNAL] Received signal {sig} (SIG_IGN), running cleanup\n"
|
|
661
|
+
)
|
|
662
|
+
sys.stderr.flush()
|
|
663
|
+
_shutdown_all_c_extensions()
|
|
664
|
+
|
|
665
|
+
wrapped_handler = wrapped_ignore
|
|
666
|
+
|
|
667
|
+
elif handler == signal.SIG_DFL:
|
|
668
|
+
# They want default behavior - cleanup then re-raise
|
|
669
|
+
def wrapped_default(sig, frame):
|
|
670
|
+
if not _shutdown_by_signal.get(sig, False):
|
|
671
|
+
_shutdown_by_signal[sig] = True
|
|
672
|
+
sys.stderr.write(
|
|
673
|
+
f"[SIGNAL] Received signal {sig} (SIG_DFL), running cleanup\n"
|
|
674
|
+
)
|
|
675
|
+
sys.stderr.flush()
|
|
676
|
+
_shutdown_all_c_extensions()
|
|
677
|
+
# Restore default and re-raise
|
|
678
|
+
_original_signal_signal(sig, signal.SIG_DFL)
|
|
679
|
+
os.kill(os.getpid(), sig)
|
|
680
|
+
|
|
681
|
+
wrapped_handler = wrapped_default
|
|
682
|
+
|
|
683
|
+
elif callable(handler):
|
|
684
|
+
# They provided a custom handler - chain ours before theirs
|
|
685
|
+
def wrapped_custom(sig, frame):
|
|
686
|
+
if not _shutdown_by_signal.get(sig, False):
|
|
687
|
+
_shutdown_by_signal[sig] = True
|
|
688
|
+
sys.stderr.write(
|
|
689
|
+
f"[SIGNAL] Received signal {sig} in PID {os.getpid()}\n"
|
|
690
|
+
)
|
|
691
|
+
sys.stderr.flush()
|
|
692
|
+
_shutdown_all_c_extensions()
|
|
693
|
+
|
|
694
|
+
# Print all remaining threads for debugging
|
|
695
|
+
import threading
|
|
696
|
+
|
|
697
|
+
sys.stderr.write(
|
|
698
|
+
f"[SIGNAL] Active threads after shutdown: {threading.active_count()}\n"
|
|
699
|
+
)
|
|
700
|
+
for thread in threading.enumerate():
|
|
701
|
+
sys.stderr.write(
|
|
702
|
+
f"[SIGNAL] - {thread.name} (daemon={thread.daemon}, alive={thread.is_alive()})\n"
|
|
703
|
+
)
|
|
704
|
+
sys.stderr.flush()
|
|
705
|
+
|
|
706
|
+
sys.stderr.write(f"[SIGNAL] Calling application handler: {handler}\n")
|
|
707
|
+
sys.stderr.flush()
|
|
708
|
+
handler(sig, frame)
|
|
709
|
+
|
|
710
|
+
wrapped_handler = wrapped_custom
|
|
711
|
+
else:
|
|
712
|
+
# Unknown handler type - pass through
|
|
713
|
+
if SF_DEBUG:
|
|
714
|
+
sys.stderr.write(f"[SIGNAL_PATCH] Unknown handler type, passing through\n")
|
|
715
|
+
sys.stderr.flush()
|
|
716
|
+
return _original_signal_signal(signum, handler)
|
|
717
|
+
|
|
718
|
+
# Track this wrapped handler
|
|
719
|
+
_wrapped_handlers[signum] = wrapped_handler
|
|
720
|
+
|
|
721
|
+
# Install the wrapped handler
|
|
722
|
+
if SF_DEBUG:
|
|
723
|
+
sys.stderr.write(
|
|
724
|
+
f"[SIGNAL_PATCH] Installing wrapped handler for signal {signum}\n"
|
|
725
|
+
)
|
|
726
|
+
sys.stderr.flush()
|
|
727
|
+
|
|
728
|
+
return _original_signal_signal(signum, wrapped_handler)
|
|
729
|
+
|
|
730
|
+
|
|
731
|
+
def _monitor_parent_process():
|
|
732
|
+
"""
|
|
733
|
+
Background daemon thread that monitors parent process for death.
|
|
734
|
+
|
|
735
|
+
This is a cross-platform solution that works on Linux, macOS, and Windows.
|
|
736
|
+
It detects when the parent process dies by checking if we've been reparented
|
|
737
|
+
(parent PID changes, typically to init/PID 1).
|
|
738
|
+
|
|
739
|
+
How it works:
|
|
740
|
+
- Records the initial parent PID at startup
|
|
741
|
+
- Periodically checks if current parent PID != initial parent PID
|
|
742
|
+
- When parent dies, we get reparented (usually to PID 1)
|
|
743
|
+
- Triggers clean shutdown of C extensions immediately
|
|
744
|
+
|
|
745
|
+
This handles all cases where parent dies without forwarding signals:
|
|
746
|
+
- Shell wrappers (sh -c) that don't forward SIGTERM
|
|
747
|
+
- Process supervisors that exit unexpectedly
|
|
748
|
+
- Container runtimes that kill parent process
|
|
749
|
+
|
|
750
|
+
Check interval configured via: SF_PARENT_MONITOR_INTERVAL_MS (default: 100ms)
|
|
751
|
+
Set to 0 to disable monitoring.
|
|
752
|
+
"""
|
|
753
|
+
import time
|
|
754
|
+
|
|
755
|
+
# Record initial parent PID
|
|
756
|
+
initial_parent_pid = os.getppid()
|
|
757
|
+
|
|
758
|
+
sys.stderr.write(
|
|
759
|
+
f"[SAILFISH_INIT] Parent monitor thread started (parent PID: {initial_parent_pid}, check interval: {SF_PARENT_MONITOR_INTERVAL_MS}ms)\n"
|
|
760
|
+
)
|
|
761
|
+
sys.stderr.flush()
|
|
762
|
+
|
|
763
|
+
# Convert milliseconds to seconds for time.sleep()
|
|
764
|
+
check_interval_seconds = SF_PARENT_MONITOR_INTERVAL_MS / 1000.0
|
|
765
|
+
|
|
766
|
+
try:
|
|
767
|
+
while True:
|
|
768
|
+
time.sleep(check_interval_seconds)
|
|
769
|
+
|
|
770
|
+
current_parent_pid = os.getppid()
|
|
771
|
+
|
|
772
|
+
# Check if parent has changed (we've been reparented)
|
|
773
|
+
if current_parent_pid != initial_parent_pid:
|
|
774
|
+
sys.stderr.write(
|
|
775
|
+
f"[PARENT_MONITOR] Parent process died! Initial parent PID: {initial_parent_pid}, current parent PID: {current_parent_pid}\n"
|
|
776
|
+
)
|
|
777
|
+
sys.stderr.write(
|
|
778
|
+
f"[PARENT_MONITOR] Triggering clean shutdown of C extensions...\n"
|
|
779
|
+
)
|
|
780
|
+
sys.stderr.flush()
|
|
781
|
+
|
|
782
|
+
# Trigger shutdown
|
|
783
|
+
_shutdown_all_c_extensions()
|
|
784
|
+
|
|
785
|
+
sys.stderr.write(
|
|
786
|
+
f"[PARENT_MONITOR] Clean shutdown complete, exiting with code 0\n"
|
|
787
|
+
)
|
|
788
|
+
sys.stderr.flush()
|
|
789
|
+
|
|
790
|
+
# Exit cleanly
|
|
791
|
+
os._exit(0)
|
|
792
|
+
|
|
793
|
+
except Exception as e:
|
|
794
|
+
# If monitoring fails, log but don't crash the application
|
|
795
|
+
if SF_DEBUG:
|
|
796
|
+
sys.stderr.write(
|
|
797
|
+
f"[PARENT_MONITOR] Monitoring thread error (non-fatal): {e}\n"
|
|
798
|
+
)
|
|
799
|
+
sys.stderr.flush()
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
def _setup_parent_death_signal():
|
|
803
|
+
"""
|
|
804
|
+
On Linux, register to receive SIGTERM when parent process dies.
|
|
805
|
+
This handles cases where shell wrappers (sh -c) or process supervisors
|
|
806
|
+
don't forward signals properly in Docker/Kubernetes environments.
|
|
807
|
+
|
|
808
|
+
This is a best-effort enhancement that works transparently without
|
|
809
|
+
requiring customers to modify Dockerfiles, entrypoints, or K8s configs.
|
|
810
|
+
|
|
811
|
+
How it works:
|
|
812
|
+
- Uses Linux prctl(PR_SET_PDEATHSIG, SIGTERM)
|
|
813
|
+
- When parent process dies, kernel sends SIGTERM to this process
|
|
814
|
+
- Our monkey-patched signal handlers run
|
|
815
|
+
- C extensions shut down cleanly
|
|
816
|
+
|
|
817
|
+
Platform support:
|
|
818
|
+
- Linux: Uses prctl(PR_SET_PDEATHSIG)
|
|
819
|
+
- macOS: Skipped (no prctl)
|
|
820
|
+
- Windows: Skipped (no prctl)
|
|
821
|
+
|
|
822
|
+
Can be disabled via: SF_DISABLE_PARENT_DEATH_SIGNAL=true
|
|
823
|
+
"""
|
|
824
|
+
# Check if disabled via environment variable
|
|
825
|
+
if SF_DISABLE_PARENT_DEATH_SIGNAL:
|
|
826
|
+
if SF_DEBUG:
|
|
827
|
+
sys.stderr.write(
|
|
828
|
+
"[SAILFISH_INIT] Parent death signal disabled via SF_DISABLE_PARENT_DEATH_SIGNAL\n"
|
|
829
|
+
)
|
|
830
|
+
sys.stderr.flush()
|
|
831
|
+
return
|
|
832
|
+
|
|
833
|
+
# Only supported on Linux
|
|
834
|
+
if sys.platform != "linux":
|
|
835
|
+
if SF_DEBUG:
|
|
836
|
+
sys.stderr.write(
|
|
837
|
+
f"[SAILFISH_INIT] Parent death signal not supported on {sys.platform}, skipping\n"
|
|
838
|
+
)
|
|
839
|
+
sys.stderr.flush()
|
|
840
|
+
return
|
|
841
|
+
|
|
842
|
+
try:
|
|
843
|
+
import ctypes
|
|
844
|
+
|
|
845
|
+
# Load libc
|
|
846
|
+
try:
|
|
847
|
+
libc = ctypes.CDLL("libc.so.6")
|
|
848
|
+
except OSError:
|
|
849
|
+
# Try alternative libc names
|
|
850
|
+
try:
|
|
851
|
+
libc = ctypes.CDLL("libc.so")
|
|
852
|
+
except OSError:
|
|
853
|
+
if SF_DEBUG:
|
|
854
|
+
sys.stderr.write(
|
|
855
|
+
"[SAILFISH_INIT] Could not load libc, parent death signal unavailable\n"
|
|
856
|
+
)
|
|
857
|
+
sys.stderr.flush()
|
|
858
|
+
return
|
|
859
|
+
|
|
860
|
+
# prctl constants
|
|
861
|
+
PR_SET_PDEATHSIG = 1 # Set parent death signal
|
|
862
|
+
|
|
863
|
+
# Register to receive SIGTERM when parent dies
|
|
864
|
+
result = libc.prctl(PR_SET_PDEATHSIG, signal.SIGTERM)
|
|
865
|
+
|
|
866
|
+
if result == 0:
|
|
867
|
+
sys.stderr.write(
|
|
868
|
+
"[SAILFISH_INIT] Registered parent death signal (SIGTERM on parent exit)\n"
|
|
869
|
+
)
|
|
870
|
+
sys.stderr.flush()
|
|
871
|
+
else:
|
|
872
|
+
if SF_DEBUG:
|
|
873
|
+
sys.stderr.write(
|
|
874
|
+
f"[SAILFISH_INIT] prctl returned {result}, parent death signal may not be active\n"
|
|
875
|
+
)
|
|
876
|
+
sys.stderr.flush()
|
|
877
|
+
|
|
878
|
+
except AttributeError:
|
|
879
|
+
# prctl function not available in libc
|
|
880
|
+
if SF_DEBUG:
|
|
881
|
+
sys.stderr.write(
|
|
882
|
+
"[SAILFISH_INIT] prctl not available in libc, parent death signal unavailable\n"
|
|
883
|
+
)
|
|
884
|
+
sys.stderr.flush()
|
|
885
|
+
except Exception as e:
|
|
886
|
+
# Any other error - log but don't crash
|
|
887
|
+
if SF_DEBUG:
|
|
888
|
+
sys.stderr.write(
|
|
889
|
+
f"[SAILFISH_INIT] Unexpected error setting up parent death signal: {e}\n"
|
|
890
|
+
)
|
|
891
|
+
sys.stderr.flush()
|
|
892
|
+
|
|
893
|
+
|
|
894
|
+
class UnifiedInterceptor:
|
|
895
|
+
"""
|
|
896
|
+
Replaces sys.stdout and builtins.print with ultra-thin shims:
|
|
897
|
+
- direct write to real stdout (no recursion, no lock, no regex)
|
|
898
|
+
- native fast path to _sffastlog.print_() when available
|
|
899
|
+
- fallback to PrintInterceptor otherwise
|
|
900
|
+
"""
|
|
901
|
+
|
|
902
|
+
__slots__ = (
|
|
903
|
+
"print_interceptor",
|
|
904
|
+
"_original_stdout",
|
|
905
|
+
"_original_stderr",
|
|
906
|
+
)
|
|
907
|
+
|
|
908
|
+
def __init__(self):
|
|
909
|
+
# Note: CustomLogHandler is created in setup_interceptors() and added to loggers there
|
|
910
|
+
# We don't need a separate instance here
|
|
911
|
+
self.print_interceptor = PrintInterceptor()
|
|
912
|
+
# Use sys.__stdout__ and sys.__stderr__ to get the ORIGINAL streams
|
|
913
|
+
# before any monkey-patching. This prevents recursion issues when
|
|
914
|
+
# log=False tries to bypass interceptors.
|
|
915
|
+
self._original_stdout = sys.__stdout__
|
|
916
|
+
self._original_stderr = sys.__stderr__
|
|
917
|
+
|
|
918
|
+
# -------- sys.stdout replacement --------
|
|
919
|
+
def write(self, message):
|
|
920
|
+
"""
|
|
921
|
+
Ultra-thin write path: write to real stdout, then ship via C fast path or fallback.
|
|
922
|
+
"""
|
|
923
|
+
# Debug logging for Django output capture
|
|
924
|
+
if SF_DEBUG and message and message.strip():
|
|
925
|
+
sys.__stderr__.write(
|
|
926
|
+
f"[DEBUG UnifiedInterceptor.write] {repr(message[:80])}\n"
|
|
927
|
+
)
|
|
928
|
+
sys.__stderr__.flush()
|
|
929
|
+
|
|
930
|
+
# Respect guards
|
|
931
|
+
if get_reentrancy_guard_sys_stdout_active() or getattr(
|
|
932
|
+
_thread_locals, "reentrancy_guard_logging_active", False
|
|
933
|
+
):
|
|
934
|
+
self._original_stdout.write(message)
|
|
935
|
+
return
|
|
936
|
+
|
|
937
|
+
# Always write to the real stdout first; no unconditional flush.
|
|
938
|
+
self._original_stdout.write(message)
|
|
939
|
+
|
|
940
|
+
# Skip empty / newline-only
|
|
941
|
+
if not message or message == "\n":
|
|
942
|
+
if SF_DEBUG:
|
|
943
|
+
sys.__stderr__.write(f"[DEBUG] Skipping empty/newline message\n")
|
|
944
|
+
sys.__stderr__.flush()
|
|
945
|
+
return
|
|
946
|
+
|
|
947
|
+
# Build/send once
|
|
948
|
+
_, trace_id = get_or_set_sf_trace_id()
|
|
949
|
+
|
|
950
|
+
# Native fast path (ring + libcurl sender)
|
|
951
|
+
fast_ok = _ensure_fast_print_initialized()
|
|
952
|
+
if SF_DEBUG:
|
|
953
|
+
sys.__stderr__.write(f"[DEBUG] Fast print initialized: {fast_ok}\n")
|
|
954
|
+
sys.__stderr__.flush()
|
|
955
|
+
|
|
956
|
+
if fast_ok:
|
|
957
|
+
try:
|
|
958
|
+
# Capture parent_span_id IMMEDIATELY for async-safety
|
|
959
|
+
parent_span_id = get_current_function_span_id()
|
|
960
|
+
|
|
961
|
+
if SF_DEBUG:
|
|
962
|
+
sys.__stderr__.write(
|
|
963
|
+
f"[DEBUG] Calling _sffastlog.print_() with message: {repr(message[:50])}, parent_span_id={parent_span_id}\n"
|
|
964
|
+
)
|
|
965
|
+
sys.__stderr__.flush()
|
|
966
|
+
_sffastlog.print_(
|
|
967
|
+
contents=message, session_id=str(trace_id), preactive=0, parent_span_id=parent_span_id
|
|
968
|
+
)
|
|
969
|
+
if SF_DEBUG:
|
|
970
|
+
sys.__stderr__.write(f"[DEBUG] _sffastlog.print_() succeeded\n")
|
|
971
|
+
sys.__stderr__.flush()
|
|
972
|
+
return
|
|
973
|
+
except Exception as e:
|
|
974
|
+
if SF_DEBUG:
|
|
975
|
+
sys.__stderr__.write(f"[DEBUG] _sffastlog.print_() failed: {e}\n")
|
|
976
|
+
sys.__stderr__.flush()
|
|
977
|
+
pass # fall back below
|
|
978
|
+
|
|
979
|
+
# Fallback to Python interceptor path
|
|
980
|
+
if SF_DEBUG:
|
|
981
|
+
sys.__stderr__.write(f"[DEBUG] Using fallback Python interceptor\n")
|
|
982
|
+
sys.__stderr__.flush()
|
|
983
|
+
parent_span_id = get_current_function_span_id()
|
|
984
|
+
self.print_interceptor.do_send((message, trace_id), trace_id, parent_span_id=parent_span_id)
|
|
985
|
+
|
|
986
|
+
def flush(self):
|
|
987
|
+
self._original_stdout.flush()
|
|
988
|
+
|
|
989
|
+
# -------- print() override --------
|
|
990
|
+
def create_custom_print(self):
|
|
991
|
+
"""
|
|
992
|
+
Provide a print function compatible with the builtins.print signature,
|
|
993
|
+
but as lean as possible.
|
|
994
|
+
"""
|
|
995
|
+
_orig = self._original_stdout
|
|
996
|
+
_pi = self.print_interceptor
|
|
997
|
+
|
|
998
|
+
def custom_print(*args, sep=" ", end="\n", file=None, flush=False, log=True):
|
|
999
|
+
# ULTRA-FAST PATH: Early exit for log=False (skips string formatting + trace ID)
|
|
1000
|
+
# This is critical for debug prints with log=False which should be near-zero cost
|
|
1001
|
+
if not log:
|
|
1002
|
+
# Format and write to stdout, but skip all telemetry/tracing work
|
|
1003
|
+
out = sep.join(map(str, args)) + end
|
|
1004
|
+
_orig.write(out)
|
|
1005
|
+
if flush:
|
|
1006
|
+
_orig.flush()
|
|
1007
|
+
return
|
|
1008
|
+
|
|
1009
|
+
# NORMAL PATH: Format once for both stdout and logging
|
|
1010
|
+
out = sep.join(map(str, args)) + end
|
|
1011
|
+
|
|
1012
|
+
# Always write to real stdout.
|
|
1013
|
+
# (Ignore 'file' param to avoid recursion into our own wrappers.)
|
|
1014
|
+
_orig.write(out)
|
|
1015
|
+
if flush:
|
|
1016
|
+
_orig.flush()
|
|
1017
|
+
|
|
1018
|
+
msg = out
|
|
1019
|
+
if not msg.strip():
|
|
1020
|
+
return
|
|
1021
|
+
|
|
1022
|
+
# Trace id once
|
|
1023
|
+
_, trace_id = get_or_set_sf_trace_id()
|
|
1024
|
+
# Capture parent_span_id IMMEDIATELY for async-safety
|
|
1025
|
+
parent_span_id = get_current_function_span_id()
|
|
1026
|
+
|
|
1027
|
+
# Native fast path
|
|
1028
|
+
if _ensure_fast_print_initialized():
|
|
1029
|
+
try:
|
|
1030
|
+
_sffastlog.print_(
|
|
1031
|
+
contents=msg, session_id=str(trace_id), preactive=0, parent_span_id=parent_span_id
|
|
1032
|
+
)
|
|
1033
|
+
return
|
|
1034
|
+
except Exception:
|
|
1035
|
+
pass
|
|
1036
|
+
|
|
1037
|
+
# Fallback
|
|
1038
|
+
_pi.do_send((msg, trace_id), trace_id, parent_span_id=parent_span_id)
|
|
1039
|
+
|
|
1040
|
+
return custom_print
|
|
1041
|
+
|
|
1042
|
+
def __getattr__(self, attr):
|
|
1043
|
+
"""
|
|
1044
|
+
Delegate attribute access to original stdout or stderr when needed.
|
|
1045
|
+
"""
|
|
1046
|
+
if hasattr(self._original_stdout, attr):
|
|
1047
|
+
return getattr(self._original_stdout, attr)
|
|
1048
|
+
# TODO: If you later intercept stderr, mirror the same behavior
|
|
1049
|
+
# elif hasattr(self._original_stderr, attr):
|
|
1050
|
+
# return getattr(self._original_stderr, attr)
|
|
1051
|
+
raise AttributeError(
|
|
1052
|
+
f"'{self.__class__.__name__}' object has no attribute '{attr}'"
|
|
1053
|
+
)
|
|
1054
|
+
|
|
1055
|
+
def intercept_stdout(self):
|
|
1056
|
+
"""
|
|
1057
|
+
Replace sys.stdout and builtins.print to intercept all output.
|
|
1058
|
+
"""
|
|
1059
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1060
|
+
self._original_stdout.write("Intercepting stdout and print...\n")
|
|
1061
|
+
self._original_stdout.flush()
|
|
1062
|
+
|
|
1063
|
+
# Replace stdout
|
|
1064
|
+
sys.stdout = self
|
|
1065
|
+
# NOTE: stderr interception left as-is; uncomment if you want parity:
|
|
1066
|
+
# sys.stderr = self
|
|
1067
|
+
|
|
1068
|
+
# Save original print if not already saved
|
|
1069
|
+
if not hasattr(builtins, "_original_print"):
|
|
1070
|
+
builtins._original_print = builtins.print
|
|
1071
|
+
|
|
1072
|
+
# Override builtins.print with our ultra-thin implementation
|
|
1073
|
+
custom_print_function = self.create_custom_print()
|
|
1074
|
+
builtins.print = functools.partial(custom_print_function)
|
|
1075
|
+
|
|
1076
|
+
# Update __builtins__ reference if needed
|
|
1077
|
+
if isinstance(__builtins__, dict):
|
|
1078
|
+
__builtins__["print"] = custom_print_function
|
|
1079
|
+
elif isinstance(__builtins__, ModuleType):
|
|
1080
|
+
setattr(__builtins__, "print", custom_print_function)
|
|
1081
|
+
|
|
1082
|
+
# Also ensure __main__.print and builtins module reference are updated
|
|
1083
|
+
if "__main__" in sys.modules:
|
|
1084
|
+
sys.modules["__main__"].__dict__["print"] = custom_print_function
|
|
1085
|
+
sys.modules["builtins"].print = custom_print_function
|
|
1086
|
+
|
|
1087
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1088
|
+
self._original_stdout.write("Intercepting stdout and print...DONE\n")
|
|
1089
|
+
self._original_stdout.flush()
|
|
1090
|
+
|
|
1091
|
+
# -------- exceptions --------
|
|
1092
|
+
def intercept_exceptions(self):
|
|
1093
|
+
start_profiling()
|
|
1094
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1095
|
+
self._original_stdout.write("Intercepting uncaught exceptions...\n")
|
|
1096
|
+
self._original_stdout.flush()
|
|
1097
|
+
|
|
1098
|
+
sys.excepthook = custom_excepthook
|
|
1099
|
+
if hasattr(threading, "excepthook"):
|
|
1100
|
+
threading.excepthook = custom_thread_excepthook
|
|
1101
|
+
|
|
1102
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1103
|
+
self._original_stdout.write("Intercepting uncaught exceptions...DONE\n")
|
|
1104
|
+
self._original_stdout.flush()
|
|
1105
|
+
|
|
1106
|
+
# TODO - Figure out how to make this work universally
|
|
1107
|
+
def patch_exception_class(self):
|
|
1108
|
+
import builtins as _b
|
|
1109
|
+
|
|
1110
|
+
if hasattr(_b.Exception, "transmit_to_sailfish"):
|
|
1111
|
+
return
|
|
1112
|
+
try:
|
|
1113
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1114
|
+
self._original_stdout.write("Monkey-patching Exceptions class...\n")
|
|
1115
|
+
self._original_stdout.flush()
|
|
1116
|
+
_ = _b.Exception
|
|
1117
|
+
_b.Exception = PatchedException
|
|
1118
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1119
|
+
self._original_stdout.write("Monkey-patching Exceptions class...DONE\n")
|
|
1120
|
+
self._original_stdout.flush()
|
|
1121
|
+
except Exception as e:
|
|
1122
|
+
print(f"[Warning] Failed to patch `builtins.Exception`: {e}")
|
|
1123
|
+
|
|
1124
|
+
|
|
1125
|
+
# ----------------- setup entrypoint -----------------
|
|
1126
|
+
|
|
1127
|
+
|
|
1128
|
+
@validate_call
|
|
1129
|
+
def setup_interceptors(
|
|
1130
|
+
api_key: str,
|
|
1131
|
+
graphql_endpoint: str = None,
|
|
1132
|
+
service_identifier: Optional[str] = None,
|
|
1133
|
+
service_version: Optional[str] = None,
|
|
1134
|
+
service_display_name: Optional[str] = None,
|
|
1135
|
+
git_sha: Optional[str] = None,
|
|
1136
|
+
service_additional_metadata: Optional[
|
|
1137
|
+
Dict[str, Union[str, int, float, None]]
|
|
1138
|
+
] = None,
|
|
1139
|
+
profiling_mode_enabled: bool = False,
|
|
1140
|
+
profiling_max_depth: int = 5,
|
|
1141
|
+
domains_to_not_propagate_headers_to: Optional[List[str]] = None,
|
|
1142
|
+
routes_to_skip_network_hops: Optional[List[str]] = None,
|
|
1143
|
+
site_and_dist_packages_to_collect_local_variables_on: Optional[List[str]] = None,
|
|
1144
|
+
setup_global_time_at_app_spinup: bool = False, # Return to True later on
|
|
1145
|
+
):
|
|
1146
|
+
if service_identifier is None:
|
|
1147
|
+
service_identifier = os.getenv("SERVICE_VERSION", os.getenv("GIT_SHA"))
|
|
1148
|
+
if git_sha is None:
|
|
1149
|
+
git_sha = os.getenv("GIT_SHA")
|
|
1150
|
+
app_config._service_identifier = service_identifier
|
|
1151
|
+
app_config._service_version = service_version
|
|
1152
|
+
app_config._service_display_name = service_display_name
|
|
1153
|
+
app_config._git_sha = git_sha
|
|
1154
|
+
app_config._service_additional_metadata = service_additional_metadata
|
|
1155
|
+
app_config._profiling_mode_enabled = profiling_mode_enabled
|
|
1156
|
+
app_config._profiling_max_depth = profiling_max_depth
|
|
1157
|
+
app_config._set_site_and_dist_packages_to_collect_local_variables_on = (
|
|
1158
|
+
site_and_dist_packages_to_collect_local_variables_on
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
# Use parameter if provided, otherwise fall back to environment variable
|
|
1162
|
+
if routes_to_skip_network_hops is not None:
|
|
1163
|
+
app_config._routes_to_skip_network_hops = routes_to_skip_network_hops
|
|
1164
|
+
else:
|
|
1165
|
+
# Parse env var as comma-separated list
|
|
1166
|
+
if SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES:
|
|
1167
|
+
app_config._routes_to_skip_network_hops = [
|
|
1168
|
+
p.strip()
|
|
1169
|
+
for p in SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES.split(",")
|
|
1170
|
+
if p.strip()
|
|
1171
|
+
]
|
|
1172
|
+
else:
|
|
1173
|
+
app_config._routes_to_skip_network_hops = []
|
|
1174
|
+
|
|
1175
|
+
# Capture caller file/line (avoid site-packages etc)
|
|
1176
|
+
for frame in inspect.stack():
|
|
1177
|
+
if any(s in frame.filename for s in STRINGS_NOT_FOUND_IN_CALLER_LOCATIONS):
|
|
1178
|
+
continue
|
|
1179
|
+
app_config._setup_interceptors_call_filename = frame.filename
|
|
1180
|
+
app_config._setup_interceptors_call_lineno = frame.lineno
|
|
1181
|
+
break
|
|
1182
|
+
|
|
1183
|
+
# Configure core endpoints/keys
|
|
1184
|
+
app_config._sailfish_api_key = api_key
|
|
1185
|
+
app_config._sailfish_graphql_endpoint = (
|
|
1186
|
+
graphql_endpoint or app_config._sailfish_graphql_endpoint
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
# NOTE: Service UUID is now automatically generated by C library if not provided
|
|
1190
|
+
# The C library (_sfteepreload.so) generates UUID during sender thread init and exports to environment
|
|
1191
|
+
# Python reads from environment (app_config.py), ensuring both use the same UUID
|
|
1192
|
+
# No user configuration required!
|
|
1193
|
+
if SF_DEBUG:
|
|
1194
|
+
print(f"[setup_interceptors] Using service UUID: {app_config._service_uuid}")
|
|
1195
|
+
|
|
1196
|
+
# Idempotent setup
|
|
1197
|
+
if app_config._interceptors_initialized:
|
|
1198
|
+
if SF_DEBUG:
|
|
1199
|
+
print("[[DEBUG]] Interceptors already set up. Skipping setup.")
|
|
1200
|
+
return
|
|
1201
|
+
|
|
1202
|
+
if not app_config._sailfish_api_key:
|
|
1203
|
+
raise RuntimeError(
|
|
1204
|
+
"The 'api_key' parameter is missing. Please provide a valid value."
|
|
1205
|
+
)
|
|
1206
|
+
|
|
1207
|
+
# CRITICAL: Push configuration to C library AFTER app_config values are set
|
|
1208
|
+
# Constructor runs BEFORE Python starts, so C library must wait for these values
|
|
1209
|
+
# C hooks check SF_INITIALIZED flag and pass through until sf_initialize() is called
|
|
1210
|
+
if os.getenv("LD_PRELOAD") and (
|
|
1211
|
+
"libsfnettee.so" in os.getenv("LD_PRELOAD", "")
|
|
1212
|
+
or "_sfteepreload" in os.getenv("LD_PRELOAD", "")
|
|
1213
|
+
):
|
|
1214
|
+
try:
|
|
1215
|
+
# Use the secure Python C extension module interface
|
|
1216
|
+
from sf_veritas import _sfconfig
|
|
1217
|
+
|
|
1218
|
+
# Push configuration to C library
|
|
1219
|
+
sink_url = app_config._sailfish_graphql_endpoint
|
|
1220
|
+
api_key = app_config._sailfish_api_key
|
|
1221
|
+
service_uuid = app_config._service_uuid
|
|
1222
|
+
|
|
1223
|
+
if SF_DEBUG:
|
|
1224
|
+
print(f"[setup_interceptors] Configuring C library:")
|
|
1225
|
+
print(f" Sink URL: {sink_url}")
|
|
1226
|
+
print(f" API Key: {'***' if api_key else '(null)'}")
|
|
1227
|
+
print(f" Service UUID: {service_uuid}")
|
|
1228
|
+
|
|
1229
|
+
_sfconfig.set_sink_url(sink_url if sink_url else "")
|
|
1230
|
+
_sfconfig.set_api_key(api_key if api_key else "")
|
|
1231
|
+
_sfconfig.set_service_uuid(service_uuid if service_uuid else "")
|
|
1232
|
+
_sfconfig.initialize() # Activates C capture (sets SF_INITIALIZED = 1)
|
|
1233
|
+
|
|
1234
|
+
if SF_DEBUG:
|
|
1235
|
+
print("[setup_interceptors] ✓ C library configured and activated")
|
|
1236
|
+
except Exception as e:
|
|
1237
|
+
# Non-fatal: C library will use env vars as fallback, Python patches will still work
|
|
1238
|
+
if SF_DEBUG:
|
|
1239
|
+
print(f"[setup_interceptors] Failed to configure C library: {e}")
|
|
1240
|
+
print(
|
|
1241
|
+
"[setup_interceptors] C library will use environment variables as fallback"
|
|
1242
|
+
)
|
|
1243
|
+
traceback.print_exc()
|
|
1244
|
+
|
|
1245
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1246
|
+
print("Setting up interceptors")
|
|
1247
|
+
|
|
1248
|
+
# Register shutdown handlers to cleanly stop C extensions and prevent exit code 137
|
|
1249
|
+
# atexit: handles normal Python exit (sys.exit(), end of script, etc.)
|
|
1250
|
+
atexit.register(_shutdown_all_c_extensions)
|
|
1251
|
+
|
|
1252
|
+
# Monkey-patch signal.signal() to intercept ALL signal handler registrations
|
|
1253
|
+
# This ensures our C extension cleanup runs first, regardless of when
|
|
1254
|
+
# frameworks (Django, Celery, Uvicorn, etc.) install their handlers
|
|
1255
|
+
global _original_signal_signal
|
|
1256
|
+
if _original_signal_signal is None: # Only patch once
|
|
1257
|
+
_original_signal_signal = signal.signal
|
|
1258
|
+
signal.signal = _patched_signal_signal
|
|
1259
|
+
|
|
1260
|
+
# ALWAYS log this (not just SF_DEBUG) so we can debug 137 issues
|
|
1261
|
+
sys.stderr.write(
|
|
1262
|
+
f"[SAILFISH_INIT] Monkey-patched signal.signal() in PID {os.getpid()}\n"
|
|
1263
|
+
)
|
|
1264
|
+
sys.stderr.flush()
|
|
1265
|
+
|
|
1266
|
+
# Check if handlers are already registered and wrap them
|
|
1267
|
+
for sig in (signal.SIGTERM, signal.SIGINT):
|
|
1268
|
+
current_handler = signal.getsignal(sig)
|
|
1269
|
+
if current_handler not in (signal.SIG_DFL, signal.SIG_IGN, None):
|
|
1270
|
+
# A handler is already registered - wrap it
|
|
1271
|
+
sys.stderr.write(
|
|
1272
|
+
f"[SAILFISH_INIT] Found existing {sig} handler: {current_handler}, wrapping it\n"
|
|
1273
|
+
)
|
|
1274
|
+
sys.stderr.flush()
|
|
1275
|
+
|
|
1276
|
+
# Use our patched signal.signal to wrap it
|
|
1277
|
+
signal.signal(sig, current_handler)
|
|
1278
|
+
else:
|
|
1279
|
+
sys.stderr.write(
|
|
1280
|
+
f"[SAILFISH_INIT] No existing handler for signal {sig} (current: {current_handler})\n"
|
|
1281
|
+
)
|
|
1282
|
+
sys.stderr.flush()
|
|
1283
|
+
|
|
1284
|
+
# Setup parent death signal (Linux only, best-effort)
|
|
1285
|
+
# This ensures Python receives SIGTERM even when shell wrappers don't forward signals
|
|
1286
|
+
# Critical for Docker/Kubernetes environments where customers can't modify infrastructure
|
|
1287
|
+
_setup_parent_death_signal()
|
|
1288
|
+
|
|
1289
|
+
# Start parent process monitor thread (cross-platform)
|
|
1290
|
+
# This actively detects when parent process dies by checking for reparenting
|
|
1291
|
+
# More reliable than signals, works on all platforms
|
|
1292
|
+
if SF_PARENT_MONITOR_INTERVAL_MS > 0:
|
|
1293
|
+
parent_monitor_thread = threading.Thread(
|
|
1294
|
+
target=_monitor_parent_process,
|
|
1295
|
+
name="sailfish-parent-monitor",
|
|
1296
|
+
daemon=True, # Daemon thread won't prevent process exit
|
|
1297
|
+
)
|
|
1298
|
+
parent_monitor_thread.start()
|
|
1299
|
+
else:
|
|
1300
|
+
if SF_DEBUG:
|
|
1301
|
+
sys.stderr.write(
|
|
1302
|
+
"[SAILFISH_INIT] Parent monitoring disabled (SF_PARENT_MONITOR_INTERVAL_MS=0)\n"
|
|
1303
|
+
)
|
|
1304
|
+
sys.stderr.flush()
|
|
1305
|
+
|
|
1306
|
+
# Global time sync
|
|
1307
|
+
if setup_global_time_at_app_spinup:
|
|
1308
|
+
TimeSync.get_instance()
|
|
1309
|
+
|
|
1310
|
+
# Local env detect
|
|
1311
|
+
set_sf_is_local_flag()
|
|
1312
|
+
|
|
1313
|
+
# Install hooks
|
|
1314
|
+
unified_interceptor = UnifiedInterceptor()
|
|
1315
|
+
unified_interceptor.intercept_exceptions()
|
|
1316
|
+
|
|
1317
|
+
# Configure logging to capture ALL logs (including those with propagate=False like Uvicorn)
|
|
1318
|
+
logging.basicConfig(level=LOG_LEVEL)
|
|
1319
|
+
custom_handler = CustomLogHandler()
|
|
1320
|
+
|
|
1321
|
+
# Add to root logger (captures all logs with propagate=True)
|
|
1322
|
+
root_logger = logging.getLogger()
|
|
1323
|
+
root_logger.addHandler(custom_handler)
|
|
1324
|
+
|
|
1325
|
+
# OPTIMIZATION: Cache loggers we've already processed to avoid repeated checks
|
|
1326
|
+
# Use dict instead of set for faster lookups (dicts have slightly better cache locality)
|
|
1327
|
+
# This cache tracks which loggers we've seen and don't need to check again
|
|
1328
|
+
_processed_loggers = {}
|
|
1329
|
+
_processed_loggers_lock = (
|
|
1330
|
+
threading.Lock()
|
|
1331
|
+
) # CRITICAL: Protect dict from race conditions
|
|
1332
|
+
|
|
1333
|
+
# Store reference to check if handler is already added
|
|
1334
|
+
# OPTIMIZATION: Cache the CustomLogHandler class to avoid repeated lookups
|
|
1335
|
+
_handler_class = CustomLogHandler
|
|
1336
|
+
|
|
1337
|
+
def _needs_handler(logger_instance):
|
|
1338
|
+
"""Check if logger needs our handler added.
|
|
1339
|
+
|
|
1340
|
+
OPTIMIZED: Use direct iteration instead of generator expression to avoid overhead.
|
|
1341
|
+
"""
|
|
1342
|
+
# Fast path: If no handlers, definitely needs one
|
|
1343
|
+
if not logger_instance.handlers:
|
|
1344
|
+
return True
|
|
1345
|
+
|
|
1346
|
+
# Check if our handler is already present (avoid generator overhead)
|
|
1347
|
+
for h in logger_instance.handlers:
|
|
1348
|
+
if isinstance(h, _handler_class):
|
|
1349
|
+
return False
|
|
1350
|
+
return True
|
|
1351
|
+
|
|
1352
|
+
# Monkey-patch logging.Logger.__setattr__ to detect when propagate is set to False
|
|
1353
|
+
# This catches cases where logger is created before our patch, but propagate set later
|
|
1354
|
+
_original_Logger_setattr = logging.Logger.__setattr__
|
|
1355
|
+
|
|
1356
|
+
def _patched_Logger_setattr(self, name, value):
|
|
1357
|
+
_original_Logger_setattr(self, name, value)
|
|
1358
|
+
# If propagate was just set to False, add our handler
|
|
1359
|
+
if name == "propagate" and value is False and self.name:
|
|
1360
|
+
if _needs_handler(self):
|
|
1361
|
+
self.addHandler(custom_handler)
|
|
1362
|
+
with _processed_loggers_lock:
|
|
1363
|
+
_processed_loggers[self.name] = (
|
|
1364
|
+
self # Mark as processed (cache logger instance)
|
|
1365
|
+
)
|
|
1366
|
+
if SF_DEBUG:
|
|
1367
|
+
print(
|
|
1368
|
+
f"[[DEBUG]] Auto-added handler to {self.name} (propagate set to False)",
|
|
1369
|
+
)
|
|
1370
|
+
|
|
1371
|
+
logging.Logger.__setattr__ = _patched_Logger_setattr
|
|
1372
|
+
|
|
1373
|
+
# Monkey-patch logging.getLogger() to auto-add handler to propagate=False loggers
|
|
1374
|
+
# This catches loggers retrieved/accessed after setup
|
|
1375
|
+
_original_getLogger = logging.getLogger
|
|
1376
|
+
|
|
1377
|
+
def _patched_getLogger(name=None):
|
|
1378
|
+
# ULTRA-FAST PATH: Early exit for root logger (most common case)
|
|
1379
|
+
# Check BEFORE calling original getLogger to save a function call
|
|
1380
|
+
if name is None or name == "root":
|
|
1381
|
+
return _original_getLogger(name)
|
|
1382
|
+
|
|
1383
|
+
# ULTRA-FAST PATH: Lock-free cache check for hits (~50ns vs ~10μs with lock)
|
|
1384
|
+
# SAFE: In CPython, dict.get() is atomic due to GIL. Even during concurrent
|
|
1385
|
+
# dict resizes, get() won't crash (might miss, but we'll catch that below)
|
|
1386
|
+
# This eliminates lock contention on cache hits (99.9% of calls after warmup)
|
|
1387
|
+
cached = _processed_loggers.get(name, None)
|
|
1388
|
+
if cached is not None:
|
|
1389
|
+
return cached
|
|
1390
|
+
|
|
1391
|
+
# SLOW PATH: Cache miss - need to get logger and update cache
|
|
1392
|
+
# Get logger BEFORE taking lock (logging.getLogger has its own locking)
|
|
1393
|
+
logger = _original_getLogger(name)
|
|
1394
|
+
|
|
1395
|
+
# Double-checked locking: Check cache again before inserting
|
|
1396
|
+
# Another thread might have inserted while we were getting the logger
|
|
1397
|
+
with _processed_loggers_lock:
|
|
1398
|
+
# Recheck cache (avoids race where 2 threads both miss cache)
|
|
1399
|
+
cached = _processed_loggers.get(name, None)
|
|
1400
|
+
if cached is not None:
|
|
1401
|
+
return cached
|
|
1402
|
+
# Cache miss confirmed, insert our logger
|
|
1403
|
+
_processed_loggers[name] = logger
|
|
1404
|
+
|
|
1405
|
+
# FAST PATH: Only check propagate if it's actually False
|
|
1406
|
+
# Most loggers have propagate=True, so this avoids _needs_handler call
|
|
1407
|
+
# REMOVED: isinstance check - getLogger() always returns a Logger
|
|
1408
|
+
if not logger.propagate:
|
|
1409
|
+
# OPTIMIZATION: Inline _needs_handler check for hot path performance
|
|
1410
|
+
# Fast path: no handlers means we definitely need to add one
|
|
1411
|
+
needs_handler = not logger.handlers
|
|
1412
|
+
if not needs_handler:
|
|
1413
|
+
# Check if our handler is already present (manual loop for early exit)
|
|
1414
|
+
needs_handler = True
|
|
1415
|
+
for h in logger.handlers:
|
|
1416
|
+
if isinstance(h, _handler_class):
|
|
1417
|
+
needs_handler = False
|
|
1418
|
+
break
|
|
1419
|
+
|
|
1420
|
+
if needs_handler:
|
|
1421
|
+
logger.addHandler(custom_handler)
|
|
1422
|
+
if SF_DEBUG:
|
|
1423
|
+
print(
|
|
1424
|
+
f"[[DEBUG]] Auto-added handler to {name} (has propagate=False)",
|
|
1425
|
+
log=False,
|
|
1426
|
+
)
|
|
1427
|
+
|
|
1428
|
+
return logger
|
|
1429
|
+
|
|
1430
|
+
logging.getLogger = _patched_getLogger
|
|
1431
|
+
|
|
1432
|
+
# Also handle any existing loggers with propagate=False
|
|
1433
|
+
for logger_name in list(logging.Logger.manager.loggerDict.keys()):
|
|
1434
|
+
logger = _original_getLogger(logger_name)
|
|
1435
|
+
if isinstance(logger, logging.Logger) and not logger.propagate:
|
|
1436
|
+
if _needs_handler(logger):
|
|
1437
|
+
logger.addHandler(custom_handler)
|
|
1438
|
+
if SF_DEBUG:
|
|
1439
|
+
print(
|
|
1440
|
+
f"[[DEBUG]] Added handler to existing logger {logger_name} (has propagate=False)",
|
|
1441
|
+
)
|
|
1442
|
+
# Mark all existing loggers as processed to avoid checking them again
|
|
1443
|
+
# CRITICAL: Lock protects against race during initialization
|
|
1444
|
+
with _processed_loggers_lock:
|
|
1445
|
+
_processed_loggers[logger_name] = logger
|
|
1446
|
+
|
|
1447
|
+
if SF_DEBUG:
|
|
1448
|
+
print(
|
|
1449
|
+
f"[[DEBUG]] Configured logging: root handler + auto-patching getLogger() and Logger.__setattr__",
|
|
1450
|
+
)
|
|
1451
|
+
|
|
1452
|
+
# stdout + print override (this is the hot path)
|
|
1453
|
+
unified_interceptor.intercept_stdout()
|
|
1454
|
+
|
|
1455
|
+
# Framework wrappers / network patches
|
|
1456
|
+
if SF_DEBUG:
|
|
1457
|
+
print(
|
|
1458
|
+
f"[[DEBUG]] Before patch_web_frameworks, sys.getprofile() = {sys.getprofile()}",
|
|
1459
|
+
log=False,
|
|
1460
|
+
)
|
|
1461
|
+
# Initialize service operations C extension FIRST (before patching)
|
|
1462
|
+
# This ensures the C extension is ready when DomainsToNotPassHeaderToTransmitter
|
|
1463
|
+
# is called during patch_all_http_clients()
|
|
1464
|
+
if _ensure_service_initialized():
|
|
1465
|
+
try:
|
|
1466
|
+
import json
|
|
1467
|
+
|
|
1468
|
+
# Prepare parameters for service_identifier()
|
|
1469
|
+
service_identifier_val = app_config._service_identifier or ""
|
|
1470
|
+
service_version_val = app_config._service_version or ""
|
|
1471
|
+
service_display_name_val = app_config._service_display_name or ""
|
|
1472
|
+
git_sha_val = app_config._git_sha or ""
|
|
1473
|
+
|
|
1474
|
+
# Serialize additional metadata dict to JSON string
|
|
1475
|
+
service_additional_metadata_json = ""
|
|
1476
|
+
if app_config._service_additional_metadata:
|
|
1477
|
+
try:
|
|
1478
|
+
service_additional_metadata_json = json.dumps(
|
|
1479
|
+
app_config._service_additional_metadata
|
|
1480
|
+
)
|
|
1481
|
+
except Exception as e:
|
|
1482
|
+
if SF_DEBUG:
|
|
1483
|
+
print(
|
|
1484
|
+
f"[[DEBUG]] Failed to serialize service_additional_metadata: {e}",
|
|
1485
|
+
log=False,
|
|
1486
|
+
)
|
|
1487
|
+
|
|
1488
|
+
# Get infrastructure details
|
|
1489
|
+
infrastructure_type_val = ""
|
|
1490
|
+
infrastructure_details_json = ""
|
|
1491
|
+
try:
|
|
1492
|
+
infrastructure_type_val = app_config._infra_details.system.value
|
|
1493
|
+
infrastructure_details_json = json.dumps(
|
|
1494
|
+
app_config._infra_details.details
|
|
1495
|
+
)
|
|
1496
|
+
except Exception as e:
|
|
1497
|
+
if SF_DEBUG:
|
|
1498
|
+
print(
|
|
1499
|
+
f"[[DEBUG]] Failed to get infrastructure details: {e}",
|
|
1500
|
+
log=False,
|
|
1501
|
+
)
|
|
1502
|
+
|
|
1503
|
+
# Get setup_interceptors call location
|
|
1504
|
+
setup_file_path = app_config._setup_interceptors_call_filename or ""
|
|
1505
|
+
setup_line_number = app_config._setup_interceptors_call_lineno or 0
|
|
1506
|
+
|
|
1507
|
+
# Call the C extension to send service identification
|
|
1508
|
+
_sfservice.service_identifier(
|
|
1509
|
+
service_identifier=service_identifier_val,
|
|
1510
|
+
service_version=service_version_val,
|
|
1511
|
+
service_display_name=service_display_name_val,
|
|
1512
|
+
service_additional_metadata=service_additional_metadata_json,
|
|
1513
|
+
git_sha=git_sha_val,
|
|
1514
|
+
infrastructure_type=infrastructure_type_val,
|
|
1515
|
+
infrastructure_details=infrastructure_details_json,
|
|
1516
|
+
setup_interceptors_file_path=setup_file_path,
|
|
1517
|
+
setup_interceptors_line_number=setup_line_number,
|
|
1518
|
+
)
|
|
1519
|
+
|
|
1520
|
+
if SF_DEBUG:
|
|
1521
|
+
print(
|
|
1522
|
+
"[[DEBUG]] Service identification sent via _sfservice C extension",
|
|
1523
|
+
log=False,
|
|
1524
|
+
)
|
|
1525
|
+
except Exception as e:
|
|
1526
|
+
# Surface the full exception details for debugging
|
|
1527
|
+
print(f"[[ERROR]] Failed to send service identification: {e}", log=False)
|
|
1528
|
+
print(f"[[ERROR]] Full traceback:\n{traceback.format_exc()}", log=False)
|
|
1529
|
+
# Don't re-raise - allow initialization to continue
|
|
1530
|
+
# but the user will see the full error details now
|
|
1531
|
+
|
|
1532
|
+
# Now that C extension is initialized, apply framework/network patches
|
|
1533
|
+
# The DomainsToNotPassHeaderToTransmitter will use the C extension now
|
|
1534
|
+
patch_web_frameworks(routes_to_skip_network_hops)
|
|
1535
|
+
if SF_DEBUG:
|
|
1536
|
+
print(
|
|
1537
|
+
f"[[DEBUG]] After patch_web_frameworks, sys.getprofile() = {sys.getprofile()}",
|
|
1538
|
+
log=False,
|
|
1539
|
+
)
|
|
1540
|
+
patch_all_http_clients(domains_to_not_propagate_headers_to)
|
|
1541
|
+
|
|
1542
|
+
# Patch ThreadPoolExecutor to copy ContextVars (eliminates lock contention!)
|
|
1543
|
+
# patch_threading()
|
|
1544
|
+
# if SF_DEBUG:
|
|
1545
|
+
# print(
|
|
1546
|
+
# f"[[DEBUG]] After patch_threading, ThreadPoolExecutor will copy ContextVars",
|
|
1547
|
+
# log=False,
|
|
1548
|
+
# )
|
|
1549
|
+
|
|
1550
|
+
# Initialize function span profiler if enabled
|
|
1551
|
+
if _ensure_funcspan_initialized():
|
|
1552
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1553
|
+
print("Function span profiler initialized and started.", log=False)
|
|
1554
|
+
if SF_DEBUG:
|
|
1555
|
+
print(
|
|
1556
|
+
f"[[DEBUG]] After funcspan init, sys.getprofile() = {sys.getprofile()}",
|
|
1557
|
+
log=False,
|
|
1558
|
+
)
|
|
1559
|
+
|
|
1560
|
+
app_config._interceptors_initialized = True
|
|
1561
|
+
|
|
1562
|
+
# CRITICAL: Mark interceptors as ready - this enables profiling
|
|
1563
|
+
# The profiler skips all events until interceptors are fully initialized to prevent
|
|
1564
|
+
# crashes from profiling code in an inconsistent state during initialization.
|
|
1565
|
+
if _FUNCSPAN_OK and _sffuncspan:
|
|
1566
|
+
_sffuncspan.set_interceptors_ready()
|
|
1567
|
+
if SF_DEBUG:
|
|
1568
|
+
print("[[DEBUG]] Profiling enabled (interceptors ready)", log=False)
|
|
1569
|
+
|
|
1570
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1571
|
+
print("Interceptors setup completed.", log=False)
|
|
1572
|
+
|
|
1573
|
+
|
|
1574
|
+
def reinitialize_after_fork():
|
|
1575
|
+
"""
|
|
1576
|
+
Reinitialize only the C extensions after a fork (for multiprocessing frameworks like Robyn).
|
|
1577
|
+
Does NOT re-apply patches - those are inherited from the parent process.
|
|
1578
|
+
Only resets initialization flags and reinitializes C extension background threads/libcurl.
|
|
1579
|
+
"""
|
|
1580
|
+
global _FAST_PRINT_READY, _FUNCSPAN_READY, _FUNCSPAN_PROFILER
|
|
1581
|
+
|
|
1582
|
+
if SF_DEBUG:
|
|
1583
|
+
print(
|
|
1584
|
+
f"[[DEBUG]] reinitialize_after_fork() called in PID {os.getpid()}",
|
|
1585
|
+
log=False,
|
|
1586
|
+
)
|
|
1587
|
+
|
|
1588
|
+
# Shutdown C extensions first (resets g_running flag and cleans up state)
|
|
1589
|
+
# Note: We don't call _shutdown_all_c_extensions() here because we're reinitializing,
|
|
1590
|
+
# not shutting down permanently, so we don't want to set the shutdown flag.
|
|
1591
|
+
|
|
1592
|
+
# Shutdown function span config C extension
|
|
1593
|
+
try:
|
|
1594
|
+
from . import _sffuncspan_config
|
|
1595
|
+
|
|
1596
|
+
if SF_DEBUG:
|
|
1597
|
+
print("[[DEBUG]] Shutting down _sffuncspan_config before reinit", log=False)
|
|
1598
|
+
_sffuncspan_config.shutdown()
|
|
1599
|
+
except Exception as e:
|
|
1600
|
+
if SF_DEBUG:
|
|
1601
|
+
print(
|
|
1602
|
+
f"[[DEBUG]] _sffuncspan_config.shutdown() failed (non-fatal): {e}",
|
|
1603
|
+
log=False,
|
|
1604
|
+
)
|
|
1605
|
+
|
|
1606
|
+
# Shutdown function span C extension
|
|
1607
|
+
try:
|
|
1608
|
+
if _FUNCSPAN_OK and _sffuncspan:
|
|
1609
|
+
if SF_DEBUG:
|
|
1610
|
+
print("[[DEBUG]] Shutting down _sffuncspan before reinit", log=False)
|
|
1611
|
+
_sffuncspan.shutdown()
|
|
1612
|
+
except Exception as e:
|
|
1613
|
+
if SF_DEBUG:
|
|
1614
|
+
print(f"[[DEBUG]] _sffuncspan.shutdown() failed: {e}", log=False)
|
|
1615
|
+
|
|
1616
|
+
# Shutdown network hop C extension
|
|
1617
|
+
try:
|
|
1618
|
+
from . import fast_network_hop
|
|
1619
|
+
|
|
1620
|
+
if fast_network_hop._NETWORKHOP_FAST_OK and fast_network_hop._sfnetworkhop:
|
|
1621
|
+
if SF_DEBUG:
|
|
1622
|
+
print("[[DEBUG]] Shutting down _sfnetworkhop before reinit", log=False)
|
|
1623
|
+
fast_network_hop._sfnetworkhop.shutdown()
|
|
1624
|
+
except Exception as e:
|
|
1625
|
+
if SF_DEBUG:
|
|
1626
|
+
print(f"[[DEBUG]] _sfnetworkhop.shutdown() failed: {e}", log=False)
|
|
1627
|
+
|
|
1628
|
+
# Shutdown network request C extension (http.client body/header capture)
|
|
1629
|
+
try:
|
|
1630
|
+
from .patches.network_libraries import utils as net_utils
|
|
1631
|
+
|
|
1632
|
+
if net_utils._FAST_NETWORKREQUEST_AVAILABLE and net_utils._sffastnetworkrequest:
|
|
1633
|
+
if SF_DEBUG:
|
|
1634
|
+
print(
|
|
1635
|
+
"[[DEBUG]] Shutting down _sffastnetworkrequest before reinit",
|
|
1636
|
+
log=False,
|
|
1637
|
+
)
|
|
1638
|
+
net_utils._sffastnetworkrequest.shutdown()
|
|
1639
|
+
except Exception as e:
|
|
1640
|
+
if SF_DEBUG:
|
|
1641
|
+
print(f"[[DEBUG]] _sffastnetworkrequest.shutdown() failed: {e}", log=False)
|
|
1642
|
+
|
|
1643
|
+
# Shutdown fast log C extension (core - shutdown last)
|
|
1644
|
+
try:
|
|
1645
|
+
if _FAST_OK and _sffastlog:
|
|
1646
|
+
if SF_DEBUG:
|
|
1647
|
+
print("[[DEBUG]] Shutting down _sffastlog before reinit", log=False)
|
|
1648
|
+
_sffastlog.shutdown()
|
|
1649
|
+
except Exception as e:
|
|
1650
|
+
if SF_DEBUG:
|
|
1651
|
+
print(f"[[DEBUG]] _sffastlog.shutdown() failed: {e}", log=False)
|
|
1652
|
+
|
|
1653
|
+
# Reset initialization flags to force reinitialization
|
|
1654
|
+
_FAST_PRINT_READY = False
|
|
1655
|
+
_FUNCSPAN_READY = False
|
|
1656
|
+
_FUNCSPAN_PROFILER = None
|
|
1657
|
+
|
|
1658
|
+
# Reset network hop flag
|
|
1659
|
+
from . import fast_network_hop
|
|
1660
|
+
|
|
1661
|
+
fast_network_hop._FAST_NETWORKHOP_READY = False
|
|
1662
|
+
|
|
1663
|
+
# Reset network request flag
|
|
1664
|
+
from .patches.network_libraries import utils as net_utils
|
|
1665
|
+
|
|
1666
|
+
net_utils._FAST_NETWORKREQUEST_INITIALIZED = False
|
|
1667
|
+
|
|
1668
|
+
# Reinitialize C extensions (but not patches)
|
|
1669
|
+
_ensure_fast_print_initialized()
|
|
1670
|
+
_ensure_funcspan_initialized()
|
|
1671
|
+
fast_network_hop._ensure_fast_networkhop_initialized()
|
|
1672
|
+
net_utils.init_fast_networkrequest_tracking()
|
|
1673
|
+
|
|
1674
|
+
if SF_DEBUG:
|
|
1675
|
+
print(
|
|
1676
|
+
f"[[DEBUG]] reinitialize_after_fork() completed in PID {os.getpid()}",
|
|
1677
|
+
log=False,
|
|
1678
|
+
)
|