sf-veritas 0.10.3__cp311-cp311-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sf-veritas might be problematic. Click here for more details.
- sf_veritas/__init__.py +20 -0
- sf_veritas/_sffastlog.c +889 -0
- sf_veritas/_sffastlog.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastnet.c +924 -0
- sf_veritas/_sffastnet.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffastnetworkrequest.c +730 -0
- sf_veritas/_sffastnetworkrequest.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffuncspan.c +2155 -0
- sf_veritas/_sffuncspan.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sffuncspan_config.c +617 -0
- sf_veritas/_sffuncspan_config.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfheadercheck.c +341 -0
- sf_veritas/_sfheadercheck.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfnetworkhop.c +1451 -0
- sf_veritas/_sfnetworkhop.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfservice.c +1175 -0
- sf_veritas/_sfservice.cpython-311-x86_64-linux-gnu.so +0 -0
- sf_veritas/_sfteepreload.c +5167 -0
- sf_veritas/app_config.py +49 -0
- sf_veritas/cli.py +336 -0
- sf_veritas/constants.py +10 -0
- sf_veritas/custom_excepthook.py +304 -0
- sf_veritas/custom_log_handler.py +129 -0
- sf_veritas/custom_output_wrapper.py +144 -0
- sf_veritas/custom_print.py +146 -0
- sf_veritas/django_app.py +5 -0
- sf_veritas/env_vars.py +186 -0
- sf_veritas/exception_handling_middleware.py +18 -0
- sf_veritas/exception_metaclass.py +69 -0
- sf_veritas/fast_frame_info.py +116 -0
- sf_veritas/fast_network_hop.py +293 -0
- sf_veritas/frame_tools.py +112 -0
- sf_veritas/funcspan_config_loader.py +556 -0
- sf_veritas/function_span_profiler.py +1174 -0
- sf_veritas/import_hook.py +62 -0
- sf_veritas/infra_details/__init__.py +3 -0
- sf_veritas/infra_details/get_infra_details.py +24 -0
- sf_veritas/infra_details/kubernetes/__init__.py +3 -0
- sf_veritas/infra_details/kubernetes/get_cluster_name.py +147 -0
- sf_veritas/infra_details/kubernetes/get_details.py +7 -0
- sf_veritas/infra_details/running_on/__init__.py +17 -0
- sf_veritas/infra_details/running_on/kubernetes.py +11 -0
- sf_veritas/interceptors.py +497 -0
- sf_veritas/libsfnettee.so +0 -0
- sf_veritas/local_env_detect.py +118 -0
- sf_veritas/package_metadata.py +6 -0
- sf_veritas/patches/__init__.py +0 -0
- sf_veritas/patches/concurrent_futures.py +19 -0
- sf_veritas/patches/constants.py +1 -0
- sf_veritas/patches/exceptions.py +82 -0
- sf_veritas/patches/multiprocessing.py +32 -0
- sf_veritas/patches/network_libraries/__init__.py +76 -0
- sf_veritas/patches/network_libraries/aiohttp.py +281 -0
- sf_veritas/patches/network_libraries/curl_cffi.py +363 -0
- sf_veritas/patches/network_libraries/http_client.py +419 -0
- sf_veritas/patches/network_libraries/httpcore.py +515 -0
- sf_veritas/patches/network_libraries/httplib2.py +204 -0
- sf_veritas/patches/network_libraries/httpx.py +515 -0
- sf_veritas/patches/network_libraries/niquests.py +211 -0
- sf_veritas/patches/network_libraries/pycurl.py +385 -0
- sf_veritas/patches/network_libraries/requests.py +633 -0
- sf_veritas/patches/network_libraries/tornado.py +341 -0
- sf_veritas/patches/network_libraries/treq.py +270 -0
- sf_veritas/patches/network_libraries/urllib_request.py +468 -0
- sf_veritas/patches/network_libraries/utils.py +398 -0
- sf_veritas/patches/os.py +17 -0
- sf_veritas/patches/threading.py +218 -0
- sf_veritas/patches/web_frameworks/__init__.py +54 -0
- sf_veritas/patches/web_frameworks/aiohttp.py +793 -0
- sf_veritas/patches/web_frameworks/async_websocket_consumer.py +317 -0
- sf_veritas/patches/web_frameworks/blacksheep.py +527 -0
- sf_veritas/patches/web_frameworks/bottle.py +502 -0
- sf_veritas/patches/web_frameworks/cherrypy.py +678 -0
- sf_veritas/patches/web_frameworks/cors_utils.py +122 -0
- sf_veritas/patches/web_frameworks/django.py +944 -0
- sf_veritas/patches/web_frameworks/eve.py +395 -0
- sf_veritas/patches/web_frameworks/falcon.py +926 -0
- sf_veritas/patches/web_frameworks/fastapi.py +724 -0
- sf_veritas/patches/web_frameworks/flask.py +520 -0
- sf_veritas/patches/web_frameworks/klein.py +501 -0
- sf_veritas/patches/web_frameworks/litestar.py +551 -0
- sf_veritas/patches/web_frameworks/pyramid.py +428 -0
- sf_veritas/patches/web_frameworks/quart.py +824 -0
- sf_veritas/patches/web_frameworks/robyn.py +697 -0
- sf_veritas/patches/web_frameworks/sanic.py +857 -0
- sf_veritas/patches/web_frameworks/starlette.py +723 -0
- sf_veritas/patches/web_frameworks/strawberry.py +813 -0
- sf_veritas/patches/web_frameworks/tornado.py +481 -0
- sf_veritas/patches/web_frameworks/utils.py +91 -0
- sf_veritas/print_override.py +13 -0
- sf_veritas/regular_data_transmitter.py +409 -0
- sf_veritas/request_interceptor.py +401 -0
- sf_veritas/request_utils.py +550 -0
- sf_veritas/server_status.py +1 -0
- sf_veritas/shutdown_flag.py +11 -0
- sf_veritas/subprocess_startup.py +3 -0
- sf_veritas/test_cli.py +145 -0
- sf_veritas/thread_local.py +970 -0
- sf_veritas/timeutil.py +114 -0
- sf_veritas/transmit_exception_to_sailfish.py +28 -0
- sf_veritas/transmitter.py +132 -0
- sf_veritas/types.py +47 -0
- sf_veritas/unified_interceptor.py +1580 -0
- sf_veritas/utils.py +39 -0
- sf_veritas-0.10.3.dist-info/METADATA +97 -0
- sf_veritas-0.10.3.dist-info/RECORD +132 -0
- sf_veritas-0.10.3.dist-info/WHEEL +5 -0
- sf_veritas-0.10.3.dist-info/entry_points.txt +2 -0
- sf_veritas-0.10.3.dist-info/top_level.txt +1 -0
- sf_veritas.libs/libbrotlicommon-6ce2a53c.so.1.0.6 +0 -0
- sf_veritas.libs/libbrotlidec-811d1be3.so.1.0.6 +0 -0
- sf_veritas.libs/libcom_err-730ca923.so.2.1 +0 -0
- sf_veritas.libs/libcrypt-52aca757.so.1.1.0 +0 -0
- sf_veritas.libs/libcrypto-bdaed0ea.so.1.1.1k +0 -0
- sf_veritas.libs/libcurl-eaa3cf66.so.4.5.0 +0 -0
- sf_veritas.libs/libgssapi_krb5-323bbd21.so.2.2 +0 -0
- sf_veritas.libs/libidn2-2f4a5893.so.0.3.6 +0 -0
- sf_veritas.libs/libk5crypto-9a74ff38.so.3.1 +0 -0
- sf_veritas.libs/libkeyutils-2777d33d.so.1.6 +0 -0
- sf_veritas.libs/libkrb5-a55300e8.so.3.3 +0 -0
- sf_veritas.libs/libkrb5support-e6594cfc.so.0.1 +0 -0
- sf_veritas.libs/liblber-2-d20824ef.4.so.2.10.9 +0 -0
- sf_veritas.libs/libldap-2-cea2a960.4.so.2.10.9 +0 -0
- sf_veritas.libs/libnghttp2-39367a22.so.14.17.0 +0 -0
- sf_veritas.libs/libpcre2-8-516f4c9d.so.0.7.1 +0 -0
- sf_veritas.libs/libpsl-99becdd3.so.5.3.1 +0 -0
- sf_veritas.libs/libsasl2-7de4d792.so.3.0.0 +0 -0
- sf_veritas.libs/libselinux-d0805dcb.so.1 +0 -0
- sf_veritas.libs/libssh-c11d285b.so.4.8.7 +0 -0
- sf_veritas.libs/libssl-60250281.so.1.1.1k +0 -0
- sf_veritas.libs/libunistring-05abdd40.so.2.1.0 +0 -0
- sf_veritas.libs/libuuid-95b83d40.so.1.3.0 +0 -0
|
@@ -0,0 +1,1580 @@
|
|
|
1
|
+
import atexit
|
|
2
|
+
import builtins
|
|
3
|
+
import functools
|
|
4
|
+
import inspect
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
import signal
|
|
8
|
+
import sys
|
|
9
|
+
import threading
|
|
10
|
+
from types import ModuleType
|
|
11
|
+
from typing import Dict, List, Optional, Union
|
|
12
|
+
|
|
13
|
+
from pydantic import validate_call
|
|
14
|
+
|
|
15
|
+
from . import app_config
|
|
16
|
+
from .custom_excepthook import (
|
|
17
|
+
custom_excepthook,
|
|
18
|
+
custom_thread_excepthook,
|
|
19
|
+
start_profiling,
|
|
20
|
+
)
|
|
21
|
+
from .custom_log_handler import CustomLogHandler
|
|
22
|
+
from .env_vars import (
|
|
23
|
+
LOG_LEVEL,
|
|
24
|
+
PRINT_CONFIGURATION_STATUSES,
|
|
25
|
+
SF_DEBUG,
|
|
26
|
+
SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES,
|
|
27
|
+
SF_DISABLE_PARENT_DEATH_SIGNAL,
|
|
28
|
+
SF_PARENT_MONITOR_INTERVAL_MS,
|
|
29
|
+
)
|
|
30
|
+
from .exception_metaclass import PatchedException
|
|
31
|
+
from .interceptors import PrintInterceptor
|
|
32
|
+
from .local_env_detect import set_sf_is_local_flag
|
|
33
|
+
from .patches.network_libraries import patch_all_http_clients
|
|
34
|
+
from .patches.threading import patch_threading
|
|
35
|
+
from .patches.web_frameworks import patch_web_frameworks
|
|
36
|
+
from .shutdown_flag import set_shutdown_flag
|
|
37
|
+
from .thread_local import (
|
|
38
|
+
_thread_locals,
|
|
39
|
+
get_or_set_sf_trace_id,
|
|
40
|
+
get_reentrancy_guard_sys_stdout_active,
|
|
41
|
+
)
|
|
42
|
+
from .timeutil import TimeSync
|
|
43
|
+
|
|
44
|
+
# Optional native fast path for prints (C extension)
|
|
45
|
+
try:
|
|
46
|
+
from . import _sffastlog # provides init_print() and print_()
|
|
47
|
+
|
|
48
|
+
_FAST_OK = True
|
|
49
|
+
except Exception:
|
|
50
|
+
_sffastlog = None
|
|
51
|
+
_FAST_OK = False
|
|
52
|
+
|
|
53
|
+
_FAST_PRINT_READY = False # one-time guard for native print init
|
|
54
|
+
|
|
55
|
+
# Optional native fast path for service operations (C extension)
|
|
56
|
+
try:
|
|
57
|
+
from . import _sfservice # provides service_identifier(), collect_metadata(), etc.
|
|
58
|
+
|
|
59
|
+
_SFSERVICE_OK = True
|
|
60
|
+
except Exception:
|
|
61
|
+
_sfservice = None
|
|
62
|
+
_SFSERVICE_OK = False
|
|
63
|
+
|
|
64
|
+
_SFSERVICE_READY = False # one-time guard for native service init
|
|
65
|
+
|
|
66
|
+
# Optional native fast path for function spans (C extension)
|
|
67
|
+
try:
|
|
68
|
+
import sf_veritas._sffuncspan as _sffuncspan
|
|
69
|
+
|
|
70
|
+
from .function_span_profiler import init_function_span_profiler
|
|
71
|
+
|
|
72
|
+
_FUNCSPAN_OK = True
|
|
73
|
+
except Exception as import_error:
|
|
74
|
+
_sffuncspan = None
|
|
75
|
+
_FUNCSPAN_OK = False
|
|
76
|
+
if os.getenv("SF_DEBUG", "false").lower() == "true":
|
|
77
|
+
import traceback
|
|
78
|
+
|
|
79
|
+
print(
|
|
80
|
+
f"[[DEBUG]] Failed to import _sffuncspan C extension: {import_error}",
|
|
81
|
+
file=sys.stderr,
|
|
82
|
+
)
|
|
83
|
+
traceback.print_exc(file=sys.stderr)
|
|
84
|
+
|
|
85
|
+
_FUNCSPAN_READY = False # one-time guard for native funcspan init
|
|
86
|
+
_FUNCSPAN_PROFILER = None # global profiler instance
|
|
87
|
+
|
|
88
|
+
# GraphQL mutation string for prints — keep schema identical to server
|
|
89
|
+
_COLLECT_PRINT_MUTATION = (
|
|
90
|
+
"mutation CollectPrintStatements("
|
|
91
|
+
"$apiKey: String!,"
|
|
92
|
+
"$serviceUuid: String!,"
|
|
93
|
+
"$sessionId: String!,"
|
|
94
|
+
"$contents: String!,"
|
|
95
|
+
"$reentrancyGuardPreactive: Boolean!,"
|
|
96
|
+
"$library: String!,"
|
|
97
|
+
"$timestampMs: String!,"
|
|
98
|
+
"$version: String!"
|
|
99
|
+
"){collectPrintStatements("
|
|
100
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,sessionId:$sessionId,"
|
|
101
|
+
"contents:$contents,reentrancyGuardPreactive:$reentrancyGuardPreactive,"
|
|
102
|
+
"library:$library,timestampMs:$timestampMs,version:$version)}"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# GraphQL mutation string for function spans
|
|
106
|
+
_COLLECT_FUNCTION_SPAN_MUTATION = (
|
|
107
|
+
"mutation CollectFunctionSpan("
|
|
108
|
+
"$apiKey: String!,"
|
|
109
|
+
"$serviceUuid: String!,"
|
|
110
|
+
"$library: String!,"
|
|
111
|
+
"$version: String!,"
|
|
112
|
+
"$sessionId: String!,"
|
|
113
|
+
"$spanId: String!,"
|
|
114
|
+
"$parentSpanId: String,"
|
|
115
|
+
"$filePath: String!,"
|
|
116
|
+
"$lineNumber: Int!,"
|
|
117
|
+
"$columnNumber: Int!,"
|
|
118
|
+
"$functionName: String!,"
|
|
119
|
+
"$arguments: String!,"
|
|
120
|
+
"$returnValue: String,"
|
|
121
|
+
"$startTimeNs: String!,"
|
|
122
|
+
"$durationNs: String!,"
|
|
123
|
+
"$timestampMs: String!"
|
|
124
|
+
"){collectFunctionSpan("
|
|
125
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
126
|
+
"sessionId:$sessionId,spanId:$spanId,parentSpanId:$parentSpanId,"
|
|
127
|
+
"filePath:$filePath,lineNumber:$lineNumber,columnNumber:$columnNumber,"
|
|
128
|
+
"functionName:$functionName,arguments:$arguments,returnValue:$returnValue,"
|
|
129
|
+
"startTimeNs:$startTimeNs,durationNs:$durationNs,timestampMs:$timestampMs)}"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# GraphQL mutation string for service identification
|
|
133
|
+
_IDENTIFY_SERVICE_DETAILS_MUTATION = (
|
|
134
|
+
"mutation IdentifyServiceDetails("
|
|
135
|
+
"$apiKey: String!,"
|
|
136
|
+
"$serviceUuid: String!,"
|
|
137
|
+
"$library: String!,"
|
|
138
|
+
"$version: String!,"
|
|
139
|
+
"$serviceIdentifier: String!,"
|
|
140
|
+
"$serviceVersion: String!,"
|
|
141
|
+
"$serviceAdditionalMetadata: JSON,"
|
|
142
|
+
"$gitSha: String!,"
|
|
143
|
+
"$infrastructureType: String!,"
|
|
144
|
+
"$infrastructureDetails: JSON,"
|
|
145
|
+
"$setupInterceptorsFilePath: String!,"
|
|
146
|
+
"$setupInterceptorsLineNumber: Int!,"
|
|
147
|
+
"$timestampMs: String!"
|
|
148
|
+
"){identifyServiceDetails("
|
|
149
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
150
|
+
"serviceIdentifier:$serviceIdentifier,serviceVersion:$serviceVersion,"
|
|
151
|
+
"serviceAdditionalMetadata:$serviceAdditionalMetadata,gitSha:$gitSha,"
|
|
152
|
+
"infrastructureType:$infrastructureType,infrastructureDetails:$infrastructureDetails,"
|
|
153
|
+
"setupInterceptorsFilePath:$setupInterceptorsFilePath,"
|
|
154
|
+
"setupInterceptorsLineNumber:$setupInterceptorsLineNumber,timestampMs:$timestampMs)}"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# GraphQL mutation string for update service details
|
|
158
|
+
_UPDATE_SERVICE_DETAILS_MUTATION = (
|
|
159
|
+
"mutation UpdateServiceDetails("
|
|
160
|
+
"$apiKey: String!,"
|
|
161
|
+
"$serviceUuid: String!,"
|
|
162
|
+
"$timestampMs: String!,"
|
|
163
|
+
"$serviceIdentifier: String,"
|
|
164
|
+
"$serviceVersion: String,"
|
|
165
|
+
"$serviceAdditionalMetadata: JSON,"
|
|
166
|
+
"$gitSha: String"
|
|
167
|
+
"){updateServiceDetails("
|
|
168
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,timestampMs:$timestampMs,"
|
|
169
|
+
"serviceIdentifier:$serviceIdentifier,serviceVersion:$serviceVersion,"
|
|
170
|
+
"serviceAdditionalMetadata:$serviceAdditionalMetadata,gitSha:$gitSha)}"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# GraphQL mutation string for collect metadata
|
|
174
|
+
_COLLECT_METADATA_MUTATION = (
|
|
175
|
+
"mutation CollectMetadata("
|
|
176
|
+
"$apiKey: String!,"
|
|
177
|
+
"$serviceUuid: String!,"
|
|
178
|
+
"$library: String!,"
|
|
179
|
+
"$version: String!,"
|
|
180
|
+
"$sessionId: String!,"
|
|
181
|
+
"$userId: String!,"
|
|
182
|
+
"$traitsJson: String!,"
|
|
183
|
+
"$excludedFields: [String!]!,"
|
|
184
|
+
"$override: Boolean!,"
|
|
185
|
+
"$timestampMs: String!"
|
|
186
|
+
"){collectMetadata("
|
|
187
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
188
|
+
"sessionId:$sessionId,userId:$userId,traitsJson:$traitsJson,"
|
|
189
|
+
"excludedFields:$excludedFields,override:$override,timestampMs:$timestampMs)}"
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# GraphQL mutation string for domains to not pass header to
|
|
193
|
+
_DOMAINS_TO_NOT_PASS_HEADER_TO_MUTATION = (
|
|
194
|
+
"mutation DomainsToNotPassHeaderTo("
|
|
195
|
+
"$apiKey: String!,"
|
|
196
|
+
"$serviceUuid: String!,"
|
|
197
|
+
"$library: String!,"
|
|
198
|
+
"$version: String!,"
|
|
199
|
+
"$domains: [String!]!,"
|
|
200
|
+
"$timestampMs: String!"
|
|
201
|
+
"){domainsToNotPassHeaderTo("
|
|
202
|
+
"apiKey:$apiKey,serviceUuid:$serviceUuid,library:$library,version:$version,"
|
|
203
|
+
"domains:$domains,timestampMs:$timestampMs)}"
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
STRINGS_NOT_FOUND_IN_CALLER_LOCATIONS = {
|
|
207
|
+
"site-packages",
|
|
208
|
+
"dist-packages",
|
|
209
|
+
"venv",
|
|
210
|
+
"/lib/python",
|
|
211
|
+
"\\lib\\python",
|
|
212
|
+
"sf-veritas",
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _ensure_fast_print_initialized() -> bool:
|
|
217
|
+
"""
|
|
218
|
+
Lazily init the native print path; becomes a cheap bool check after first success.
|
|
219
|
+
"""
|
|
220
|
+
global _FAST_PRINT_READY
|
|
221
|
+
if not _FAST_OK or _FAST_PRINT_READY:
|
|
222
|
+
return _FAST_PRINT_READY
|
|
223
|
+
|
|
224
|
+
endpoint = getattr(app_config, "_sailfish_graphql_endpoint", None)
|
|
225
|
+
api_key = getattr(app_config, "_sailfish_api_key", None)
|
|
226
|
+
service_uuid = getattr(app_config, "_service_uuid", None)
|
|
227
|
+
library = getattr(app_config, "library", "sailfish-python")
|
|
228
|
+
version = getattr(app_config, "version", "0.0.0")
|
|
229
|
+
http2 = 1 if os.getenv("SF_NBPOST_HTTP2", "0") == "1" else 0
|
|
230
|
+
|
|
231
|
+
if not (endpoint and api_key and service_uuid):
|
|
232
|
+
return False
|
|
233
|
+
|
|
234
|
+
try:
|
|
235
|
+
ok = _sffastlog.init_print(
|
|
236
|
+
url=endpoint,
|
|
237
|
+
query=_COLLECT_PRINT_MUTATION,
|
|
238
|
+
api_key=str(api_key),
|
|
239
|
+
service_uuid=str(service_uuid),
|
|
240
|
+
library=str(library),
|
|
241
|
+
version=str(version),
|
|
242
|
+
http2=http2,
|
|
243
|
+
)
|
|
244
|
+
_FAST_PRINT_READY = bool(ok)
|
|
245
|
+
except Exception:
|
|
246
|
+
_FAST_PRINT_READY = False
|
|
247
|
+
|
|
248
|
+
return _FAST_PRINT_READY
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def _ensure_funcspan_initialized() -> bool:
|
|
252
|
+
"""
|
|
253
|
+
Lazily init the native function span path; becomes a cheap bool check after first success.
|
|
254
|
+
"""
|
|
255
|
+
global _FUNCSPAN_READY, _FUNCSPAN_PROFILER
|
|
256
|
+
|
|
257
|
+
# PERFORMANCE: Skip function span profiler when testing network library only
|
|
258
|
+
if os.getenv("TESTING_NETWORK_LIBRARY_ONLY", "0") == "1":
|
|
259
|
+
if SF_DEBUG:
|
|
260
|
+
print(
|
|
261
|
+
"[[DEBUG]] Function span profiler: Disabled (TESTING_NETWORK_LIBRARY_ONLY=1)",
|
|
262
|
+
log=False,
|
|
263
|
+
)
|
|
264
|
+
return False
|
|
265
|
+
|
|
266
|
+
if not _FUNCSPAN_OK:
|
|
267
|
+
if SF_DEBUG:
|
|
268
|
+
print(
|
|
269
|
+
"[[DEBUG]] Function span profiler: C extension not available (_FUNCSPAN_OK=False)",
|
|
270
|
+
log=False,
|
|
271
|
+
)
|
|
272
|
+
return False
|
|
273
|
+
|
|
274
|
+
if _FUNCSPAN_READY:
|
|
275
|
+
return _FUNCSPAN_READY
|
|
276
|
+
|
|
277
|
+
endpoint = getattr(app_config, "_sailfish_graphql_endpoint", None)
|
|
278
|
+
api_key = getattr(app_config, "_sailfish_api_key", None)
|
|
279
|
+
service_uuid = getattr(app_config, "_service_uuid", None)
|
|
280
|
+
library = getattr(app_config, "library", "sailfish-python")
|
|
281
|
+
version = getattr(app_config, "version", "0.0.0")
|
|
282
|
+
http2 = 1 if os.getenv("SF_NBPOST_HTTP2", "0") == "1" else 0
|
|
283
|
+
|
|
284
|
+
# Get function span config from env vars
|
|
285
|
+
enable_profiler = os.getenv("SF_ENABLE_PROFILER", "false").lower() == "true"
|
|
286
|
+
if SF_DEBUG:
|
|
287
|
+
print(
|
|
288
|
+
f"[[DEBUG]] Function span profiler: SF_ENABLE_PROFILER={os.getenv('SF_ENABLE_PROFILER')} -> enable_profiler={enable_profiler}",
|
|
289
|
+
log=False,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
if not enable_profiler:
|
|
293
|
+
if SF_DEBUG:
|
|
294
|
+
print(
|
|
295
|
+
"[[DEBUG]] Function span profiler: Disabled (SF_ENABLE_PROFILER not 'true')",
|
|
296
|
+
log=False,
|
|
297
|
+
)
|
|
298
|
+
return False
|
|
299
|
+
|
|
300
|
+
if not (endpoint and api_key and service_uuid):
|
|
301
|
+
if SF_DEBUG:
|
|
302
|
+
print(
|
|
303
|
+
f"[[DEBUG]] Function span profiler: Missing config (endpoint={bool(endpoint)}, api_key={bool(api_key)}, service_uuid={bool(service_uuid)})",
|
|
304
|
+
log=False,
|
|
305
|
+
)
|
|
306
|
+
return False
|
|
307
|
+
|
|
308
|
+
# Configuration options
|
|
309
|
+
variable_capture_size_limit_mb = int(os.getenv("SF_FUNCSPAN_VAR_LIMIT_MB", "1"))
|
|
310
|
+
capture_from_installed_libraries = (
|
|
311
|
+
os.getenv("SF_FUNCSPAN_CAPTURE_LIBRARIES", "").split(",")
|
|
312
|
+
if os.getenv("SF_FUNCSPAN_CAPTURE_LIBRARIES")
|
|
313
|
+
else []
|
|
314
|
+
)
|
|
315
|
+
sample_rate = float(os.getenv("SF_FUNCSPAN_SAMPLE_RATE", "1.0"))
|
|
316
|
+
enable_sampling = (
|
|
317
|
+
os.getenv("SF_FUNCSPAN_ENABLE_SAMPLING", "false").lower() == "true"
|
|
318
|
+
)
|
|
319
|
+
include_django_view_functions = (
|
|
320
|
+
os.getenv("SF_FUNCSPAN_INCLUDE_DJANGO_VIEW_FUNCTIONS", "false").lower()
|
|
321
|
+
== "true"
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
try:
|
|
325
|
+
_FUNCSPAN_PROFILER = init_function_span_profiler(
|
|
326
|
+
url=endpoint,
|
|
327
|
+
query=_COLLECT_FUNCTION_SPAN_MUTATION,
|
|
328
|
+
api_key=str(api_key),
|
|
329
|
+
service_uuid=str(service_uuid),
|
|
330
|
+
library=str(library),
|
|
331
|
+
version=str(version),
|
|
332
|
+
http2=(http2 == 1),
|
|
333
|
+
variable_capture_size_limit_mb=variable_capture_size_limit_mb,
|
|
334
|
+
capture_from_installed_libraries=capture_from_installed_libraries,
|
|
335
|
+
sample_rate=sample_rate,
|
|
336
|
+
enable_sampling=enable_sampling,
|
|
337
|
+
include_django_view_functions=include_django_view_functions,
|
|
338
|
+
auto_start=True,
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
# Load .sailfish configuration files (directory/file/function-level configs)
|
|
342
|
+
try:
|
|
343
|
+
from .funcspan_config_loader import FunctionSpanConfigLoader
|
|
344
|
+
|
|
345
|
+
# Get the directory where setup_interceptors() was called from
|
|
346
|
+
setup_dir = getattr(app_config, "_setup_interceptors_call_filename", None)
|
|
347
|
+
root_paths = []
|
|
348
|
+
|
|
349
|
+
if setup_dir:
|
|
350
|
+
# Use the directory containing the file that called setup_interceptors()
|
|
351
|
+
root_paths.append(os.path.dirname(os.path.abspath(setup_dir)))
|
|
352
|
+
|
|
353
|
+
# Also add current working directory
|
|
354
|
+
root_paths.append(os.getcwd())
|
|
355
|
+
|
|
356
|
+
if SF_DEBUG:
|
|
357
|
+
print(
|
|
358
|
+
f"[[DEBUG]] Loading .sailfish configs from: {root_paths}",
|
|
359
|
+
log=False,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
config_loader = FunctionSpanConfigLoader(root_paths)
|
|
363
|
+
config_loader.load_all_configs()
|
|
364
|
+
|
|
365
|
+
if SF_DEBUG:
|
|
366
|
+
print(
|
|
367
|
+
"[[DEBUG]] Function span config loader initialized successfully",
|
|
368
|
+
log=False,
|
|
369
|
+
)
|
|
370
|
+
except Exception as config_error:
|
|
371
|
+
if SF_DEBUG:
|
|
372
|
+
print(
|
|
373
|
+
f"[[DEBUG]] Failed to load .sailfish configs (non-fatal): {config_error}",
|
|
374
|
+
log=False,
|
|
375
|
+
)
|
|
376
|
+
# Config loading is optional - don't fail if it doesn't work
|
|
377
|
+
|
|
378
|
+
_FUNCSPAN_READY = True
|
|
379
|
+
|
|
380
|
+
# Set master kill switch from SF_ENABLE_FUNCTION_SPANS (defaults to "true")
|
|
381
|
+
enable_function_spans = (
|
|
382
|
+
os.getenv("SF_ENABLE_FUNCTION_SPANS", "true").lower() == "true"
|
|
383
|
+
)
|
|
384
|
+
_sffuncspan.set_function_spans_enabled(enable_function_spans)
|
|
385
|
+
|
|
386
|
+
if SF_DEBUG:
|
|
387
|
+
print(
|
|
388
|
+
f"[[DEBUG]] Function span capture: SF_ENABLE_FUNCTION_SPANS={os.getenv('SF_ENABLE_FUNCTION_SPANS', 'true')} -> enabled={enable_function_spans}",
|
|
389
|
+
log=False,
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
except Exception as e:
|
|
393
|
+
if SF_DEBUG:
|
|
394
|
+
print(
|
|
395
|
+
f"[[DEBUG]] Failed to initialize function span profiler: {e}", log=False
|
|
396
|
+
)
|
|
397
|
+
_FUNCSPAN_READY = False
|
|
398
|
+
|
|
399
|
+
return _FUNCSPAN_READY
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def _ensure_service_initialized() -> bool:
|
|
403
|
+
"""
|
|
404
|
+
Lazily init the native service operations path (service_identifier, collect_metadata);
|
|
405
|
+
becomes a cheap bool check after first success.
|
|
406
|
+
"""
|
|
407
|
+
global _SFSERVICE_READY
|
|
408
|
+
if not _SFSERVICE_OK or _SFSERVICE_READY:
|
|
409
|
+
return _SFSERVICE_READY
|
|
410
|
+
|
|
411
|
+
endpoint = getattr(app_config, "_sailfish_graphql_endpoint", None)
|
|
412
|
+
api_key = getattr(app_config, "_sailfish_api_key", None)
|
|
413
|
+
service_uuid = getattr(app_config, "_service_uuid", None)
|
|
414
|
+
library = getattr(app_config, "library", "sailfish-python")
|
|
415
|
+
version = getattr(app_config, "version", "0.0.0")
|
|
416
|
+
http2 = 1 if os.getenv("SF_NBPOST_HTTP2", "0") == "1" else 0
|
|
417
|
+
|
|
418
|
+
if not (endpoint and api_key and service_uuid):
|
|
419
|
+
return False
|
|
420
|
+
|
|
421
|
+
try:
|
|
422
|
+
# Initialize the main service module (starts sender thread)
|
|
423
|
+
ok = _sfservice.init(
|
|
424
|
+
url=endpoint,
|
|
425
|
+
query="", # Not used for init, only for channel-specific inits
|
|
426
|
+
api_key=str(api_key),
|
|
427
|
+
service_uuid=str(service_uuid),
|
|
428
|
+
library=str(library),
|
|
429
|
+
version=str(version),
|
|
430
|
+
http2=http2,
|
|
431
|
+
)
|
|
432
|
+
if not ok:
|
|
433
|
+
_SFSERVICE_READY = False
|
|
434
|
+
return False
|
|
435
|
+
|
|
436
|
+
# Initialize service identifier channel
|
|
437
|
+
ok = _sfservice.init_service_identifier(
|
|
438
|
+
url=endpoint,
|
|
439
|
+
query=_IDENTIFY_SERVICE_DETAILS_MUTATION,
|
|
440
|
+
api_key=str(api_key),
|
|
441
|
+
service_uuid=str(service_uuid),
|
|
442
|
+
library=str(library),
|
|
443
|
+
version=str(version),
|
|
444
|
+
http2=http2,
|
|
445
|
+
)
|
|
446
|
+
if not ok:
|
|
447
|
+
_SFSERVICE_READY = False
|
|
448
|
+
return False
|
|
449
|
+
|
|
450
|
+
# Initialize collect metadata channel
|
|
451
|
+
ok = _sfservice.init_collect_metadata(
|
|
452
|
+
url=endpoint,
|
|
453
|
+
query=_COLLECT_METADATA_MUTATION,
|
|
454
|
+
api_key=str(api_key),
|
|
455
|
+
service_uuid=str(service_uuid),
|
|
456
|
+
library=str(library),
|
|
457
|
+
version=str(version),
|
|
458
|
+
http2=http2,
|
|
459
|
+
)
|
|
460
|
+
if not ok:
|
|
461
|
+
_SFSERVICE_READY = False
|
|
462
|
+
return False
|
|
463
|
+
|
|
464
|
+
# Initialize domains channel
|
|
465
|
+
ok = _sfservice.init_domains(
|
|
466
|
+
url=endpoint,
|
|
467
|
+
query=_DOMAINS_TO_NOT_PASS_HEADER_TO_MUTATION,
|
|
468
|
+
api_key=str(api_key),
|
|
469
|
+
service_uuid=str(service_uuid),
|
|
470
|
+
library=str(library),
|
|
471
|
+
version=str(version),
|
|
472
|
+
http2=http2,
|
|
473
|
+
)
|
|
474
|
+
if not ok:
|
|
475
|
+
_SFSERVICE_READY = False
|
|
476
|
+
return False
|
|
477
|
+
|
|
478
|
+
# NOTE: update_service channel is NOT initialized here because the C implementation
|
|
479
|
+
# is currently incompatible with the GraphQL schema. It will use Python fallback.
|
|
480
|
+
# The C extension has py_update_service() which takes domains[], but the actual
|
|
481
|
+
# GraphQL mutation expects service_identifier, service_version, etc.
|
|
482
|
+
# TODO: Reimplement build_body_update_service() in C to match the schema.
|
|
483
|
+
|
|
484
|
+
_SFSERVICE_READY = True
|
|
485
|
+
except Exception as e:
|
|
486
|
+
if SF_DEBUG:
|
|
487
|
+
print(f"[[DEBUG]] Failed to initialize _sfservice: {e}", log=False)
|
|
488
|
+
_SFSERVICE_READY = False
|
|
489
|
+
|
|
490
|
+
return _SFSERVICE_READY
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
def _shutdown_all_c_extensions():
|
|
494
|
+
"""
|
|
495
|
+
Shutdown all C extensions in the correct order.
|
|
496
|
+
This function is called on application exit to ensure clean shutdown
|
|
497
|
+
of background threads and prevent exit code 137 (SIGKILL).
|
|
498
|
+
|
|
499
|
+
Order matters: shutdown dependencies first, then core extensions.
|
|
500
|
+
"""
|
|
501
|
+
# Use sys.stderr.write() for debugging because print() might not work during shutdown
|
|
502
|
+
import sys
|
|
503
|
+
|
|
504
|
+
# Check if shutdown was already called (by signal handler)
|
|
505
|
+
global _shutdown_handler_called
|
|
506
|
+
if _shutdown_handler_called:
|
|
507
|
+
return
|
|
508
|
+
|
|
509
|
+
_shutdown_handler_called = True
|
|
510
|
+
|
|
511
|
+
# First, set the global shutdown flag (for any Python code that checks it)
|
|
512
|
+
set_shutdown_flag()
|
|
513
|
+
|
|
514
|
+
# Shutdown function span profiler first (depends on funcspan C extension)
|
|
515
|
+
global _FUNCSPAN_PROFILER
|
|
516
|
+
if _FUNCSPAN_PROFILER is not None:
|
|
517
|
+
try:
|
|
518
|
+
_FUNCSPAN_PROFILER.stop()
|
|
519
|
+
_FUNCSPAN_PROFILER = None
|
|
520
|
+
except Exception as e:
|
|
521
|
+
pass
|
|
522
|
+
|
|
523
|
+
# Shutdown function span config C extension
|
|
524
|
+
try:
|
|
525
|
+
from . import _sffuncspan_config
|
|
526
|
+
|
|
527
|
+
_sffuncspan_config.shutdown()
|
|
528
|
+
except Exception as e:
|
|
529
|
+
if SF_DEBUG:
|
|
530
|
+
sys.stderr.write(f"[SHUTDOWN] _sffuncspan_config.shutdown() failed: {e}\n")
|
|
531
|
+
sys.stderr.flush()
|
|
532
|
+
|
|
533
|
+
# Shutdown function span C extension
|
|
534
|
+
try:
|
|
535
|
+
if _FUNCSPAN_OK and _sffuncspan:
|
|
536
|
+
_sffuncspan.shutdown()
|
|
537
|
+
except Exception as e:
|
|
538
|
+
if SF_DEBUG:
|
|
539
|
+
sys.stderr.write(f"[SHUTDOWN] _sffuncspan.shutdown() failed: {e}\n")
|
|
540
|
+
sys.stderr.flush()
|
|
541
|
+
|
|
542
|
+
# Shutdown network hop C extension
|
|
543
|
+
try:
|
|
544
|
+
from . import fast_network_hop
|
|
545
|
+
|
|
546
|
+
if fast_network_hop._NETWORKHOP_FAST_OK and fast_network_hop._sfnetworkhop:
|
|
547
|
+
fast_network_hop._sfnetworkhop.shutdown()
|
|
548
|
+
except Exception as e:
|
|
549
|
+
if SF_DEBUG:
|
|
550
|
+
sys.stderr.write(f"[SHUTDOWN] _sfnetworkhop.shutdown() failed: {e}\n")
|
|
551
|
+
sys.stderr.flush()
|
|
552
|
+
|
|
553
|
+
# Shutdown network request C extension
|
|
554
|
+
try:
|
|
555
|
+
from .patches.network_libraries import utils as net_utils
|
|
556
|
+
|
|
557
|
+
if net_utils._FAST_NETWORKREQUEST_AVAILABLE and net_utils._sffastnetworkrequest:
|
|
558
|
+
net_utils._sffastnetworkrequest.shutdown()
|
|
559
|
+
except Exception as e:
|
|
560
|
+
if SF_DEBUG:
|
|
561
|
+
sys.stderr.write(
|
|
562
|
+
f"[SHUTDOWN] _sffastnetworkrequest.shutdown() failed: {e}\n"
|
|
563
|
+
)
|
|
564
|
+
sys.stderr.flush()
|
|
565
|
+
|
|
566
|
+
# Shutdown service operations C extension
|
|
567
|
+
try:
|
|
568
|
+
if _SFSERVICE_OK and _sfservice:
|
|
569
|
+
_sfservice.shutdown()
|
|
570
|
+
except Exception as e:
|
|
571
|
+
if SF_DEBUG:
|
|
572
|
+
sys.stderr.write(f"[SHUTDOWN] _sfservice.shutdown() failed: {e}\n")
|
|
573
|
+
sys.stderr.flush()
|
|
574
|
+
|
|
575
|
+
# Shutdown fast log C extension (core - shutdown last)
|
|
576
|
+
try:
|
|
577
|
+
if _FAST_OK and _sffastlog:
|
|
578
|
+
_sffastlog.shutdown()
|
|
579
|
+
except Exception as e:
|
|
580
|
+
if SF_DEBUG:
|
|
581
|
+
sys.stderr.write(f"[SHUTDOWN] _sffastlog.shutdown() failed: {e}\n")
|
|
582
|
+
sys.stderr.flush()
|
|
583
|
+
|
|
584
|
+
if SF_DEBUG:
|
|
585
|
+
sys.stderr.write(
|
|
586
|
+
f"[SHUTDOWN] _shutdown_all_c_extensions() completed in PID {os.getpid()}\n"
|
|
587
|
+
)
|
|
588
|
+
sys.stderr.flush()
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
# Global flag to prevent calling shutdown handler multiple times
|
|
592
|
+
_shutdown_handler_called = False
|
|
593
|
+
|
|
594
|
+
# Store the original signal.signal function for monkey-patching
|
|
595
|
+
_original_signal_signal = None
|
|
596
|
+
|
|
597
|
+
# Track handlers that have already been wrapped to avoid double-wrapping
|
|
598
|
+
_wrapped_handlers = {} # {signum: wrapped_handler}
|
|
599
|
+
|
|
600
|
+
# Track if we've already run shutdown for this signal
|
|
601
|
+
_shutdown_by_signal = {} # {signum: bool}
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
def _patched_signal_signal(signum, handler):
|
|
605
|
+
"""
|
|
606
|
+
Monkey-patched version of signal.signal() that intercepts SIGTERM/SIGINT
|
|
607
|
+
registrations and chains our C extension cleanup before the application's handler.
|
|
608
|
+
|
|
609
|
+
This ensures our cleanup runs first, regardless of when frameworks
|
|
610
|
+
(Django, Celery, Uvicorn, etc.) install their signal handlers.
|
|
611
|
+
"""
|
|
612
|
+
global _wrapped_handlers, _shutdown_by_signal
|
|
613
|
+
|
|
614
|
+
# Only intercept SIGTERM and SIGINT
|
|
615
|
+
if signum not in (signal.SIGTERM, signal.SIGINT):
|
|
616
|
+
return _original_signal_signal(signum, handler)
|
|
617
|
+
|
|
618
|
+
# ALWAYS log interception (not just SF_DEBUG) for debugging 137 issues
|
|
619
|
+
sys.stderr.write(
|
|
620
|
+
f"[SIGNAL_PATCH] Intercepted signal.signal({signum}, {handler}) in PID {os.getpid()}\n"
|
|
621
|
+
)
|
|
622
|
+
sys.stderr.flush()
|
|
623
|
+
|
|
624
|
+
# Check if this handler is already one we wrapped (avoid double-wrapping)
|
|
625
|
+
if handler in _wrapped_handlers.values():
|
|
626
|
+
if SF_DEBUG:
|
|
627
|
+
sys.stderr.write(
|
|
628
|
+
f"[SIGNAL_PATCH] Handler already wrapped, passing through\n"
|
|
629
|
+
)
|
|
630
|
+
sys.stderr.flush()
|
|
631
|
+
return _original_signal_signal(signum, handler)
|
|
632
|
+
|
|
633
|
+
# Handle special cases
|
|
634
|
+
if handler == signal.SIG_IGN:
|
|
635
|
+
# They want to ignore the signal - respect that but still cleanup
|
|
636
|
+
def wrapped_ignore(sig, frame):
|
|
637
|
+
if not _shutdown_by_signal.get(sig, False):
|
|
638
|
+
_shutdown_by_signal[sig] = True
|
|
639
|
+
sys.stderr.write(
|
|
640
|
+
f"[SIGNAL] Received signal {sig} (SIG_IGN), running cleanup\n"
|
|
641
|
+
)
|
|
642
|
+
sys.stderr.flush()
|
|
643
|
+
_shutdown_all_c_extensions()
|
|
644
|
+
|
|
645
|
+
wrapped_handler = wrapped_ignore
|
|
646
|
+
|
|
647
|
+
elif handler == signal.SIG_DFL:
|
|
648
|
+
# They want default behavior - cleanup then re-raise
|
|
649
|
+
def wrapped_default(sig, frame):
|
|
650
|
+
if not _shutdown_by_signal.get(sig, False):
|
|
651
|
+
_shutdown_by_signal[sig] = True
|
|
652
|
+
sys.stderr.write(
|
|
653
|
+
f"[SIGNAL] Received signal {sig} (SIG_DFL), running cleanup\n"
|
|
654
|
+
)
|
|
655
|
+
sys.stderr.flush()
|
|
656
|
+
_shutdown_all_c_extensions()
|
|
657
|
+
# Restore default and re-raise
|
|
658
|
+
_original_signal_signal(sig, signal.SIG_DFL)
|
|
659
|
+
os.kill(os.getpid(), sig)
|
|
660
|
+
|
|
661
|
+
wrapped_handler = wrapped_default
|
|
662
|
+
|
|
663
|
+
elif callable(handler):
|
|
664
|
+
# They provided a custom handler - chain ours before theirs
|
|
665
|
+
def wrapped_custom(sig, frame):
|
|
666
|
+
if not _shutdown_by_signal.get(sig, False):
|
|
667
|
+
_shutdown_by_signal[sig] = True
|
|
668
|
+
sys.stderr.write(
|
|
669
|
+
f"[SIGNAL] Received signal {sig} in PID {os.getpid()}\n"
|
|
670
|
+
)
|
|
671
|
+
sys.stderr.flush()
|
|
672
|
+
_shutdown_all_c_extensions()
|
|
673
|
+
|
|
674
|
+
# Print all remaining threads for debugging
|
|
675
|
+
import threading
|
|
676
|
+
|
|
677
|
+
sys.stderr.write(
|
|
678
|
+
f"[SIGNAL] Active threads after shutdown: {threading.active_count()}\n"
|
|
679
|
+
)
|
|
680
|
+
for thread in threading.enumerate():
|
|
681
|
+
sys.stderr.write(
|
|
682
|
+
f"[SIGNAL] - {thread.name} (daemon={thread.daemon}, alive={thread.is_alive()})\n"
|
|
683
|
+
)
|
|
684
|
+
sys.stderr.flush()
|
|
685
|
+
|
|
686
|
+
sys.stderr.write(f"[SIGNAL] Calling application handler: {handler}\n")
|
|
687
|
+
sys.stderr.flush()
|
|
688
|
+
handler(sig, frame)
|
|
689
|
+
|
|
690
|
+
wrapped_handler = wrapped_custom
|
|
691
|
+
else:
|
|
692
|
+
# Unknown handler type - pass through
|
|
693
|
+
if SF_DEBUG:
|
|
694
|
+
sys.stderr.write(f"[SIGNAL_PATCH] Unknown handler type, passing through\n")
|
|
695
|
+
sys.stderr.flush()
|
|
696
|
+
return _original_signal_signal(signum, handler)
|
|
697
|
+
|
|
698
|
+
# Track this wrapped handler
|
|
699
|
+
_wrapped_handlers[signum] = wrapped_handler
|
|
700
|
+
|
|
701
|
+
# Install the wrapped handler
|
|
702
|
+
if SF_DEBUG:
|
|
703
|
+
sys.stderr.write(
|
|
704
|
+
f"[SIGNAL_PATCH] Installing wrapped handler for signal {signum}\n"
|
|
705
|
+
)
|
|
706
|
+
sys.stderr.flush()
|
|
707
|
+
|
|
708
|
+
return _original_signal_signal(signum, wrapped_handler)
|
|
709
|
+
|
|
710
|
+
|
|
711
|
+
def _monitor_parent_process():
|
|
712
|
+
"""
|
|
713
|
+
Background daemon thread that monitors parent process for death.
|
|
714
|
+
|
|
715
|
+
This is a cross-platform solution that works on Linux, macOS, and Windows.
|
|
716
|
+
It detects when the parent process dies by checking if we've been reparented
|
|
717
|
+
(parent PID changes, typically to init/PID 1).
|
|
718
|
+
|
|
719
|
+
How it works:
|
|
720
|
+
- Records the initial parent PID at startup
|
|
721
|
+
- Periodically checks if current parent PID != initial parent PID
|
|
722
|
+
- When parent dies, we get reparented (usually to PID 1)
|
|
723
|
+
- Triggers clean shutdown of C extensions immediately
|
|
724
|
+
|
|
725
|
+
This handles all cases where parent dies without forwarding signals:
|
|
726
|
+
- Shell wrappers (sh -c) that don't forward SIGTERM
|
|
727
|
+
- Process supervisors that exit unexpectedly
|
|
728
|
+
- Container runtimes that kill parent process
|
|
729
|
+
|
|
730
|
+
Check interval configured via: SF_PARENT_MONITOR_INTERVAL_MS (default: 100ms)
|
|
731
|
+
Set to 0 to disable monitoring.
|
|
732
|
+
"""
|
|
733
|
+
import time
|
|
734
|
+
|
|
735
|
+
# Record initial parent PID
|
|
736
|
+
initial_parent_pid = os.getppid()
|
|
737
|
+
|
|
738
|
+
sys.stderr.write(
|
|
739
|
+
f"[SAILFISH_INIT] Parent monitor thread started (parent PID: {initial_parent_pid}, check interval: {SF_PARENT_MONITOR_INTERVAL_MS}ms)\n"
|
|
740
|
+
)
|
|
741
|
+
sys.stderr.flush()
|
|
742
|
+
|
|
743
|
+
# Convert milliseconds to seconds for time.sleep()
|
|
744
|
+
check_interval_seconds = SF_PARENT_MONITOR_INTERVAL_MS / 1000.0
|
|
745
|
+
|
|
746
|
+
try:
|
|
747
|
+
while True:
|
|
748
|
+
time.sleep(check_interval_seconds)
|
|
749
|
+
|
|
750
|
+
current_parent_pid = os.getppid()
|
|
751
|
+
|
|
752
|
+
# Check if parent has changed (we've been reparented)
|
|
753
|
+
if current_parent_pid != initial_parent_pid:
|
|
754
|
+
sys.stderr.write(
|
|
755
|
+
f"[PARENT_MONITOR] Parent process died! Initial parent PID: {initial_parent_pid}, current parent PID: {current_parent_pid}\n"
|
|
756
|
+
)
|
|
757
|
+
sys.stderr.write(
|
|
758
|
+
f"[PARENT_MONITOR] Triggering clean shutdown of C extensions...\n"
|
|
759
|
+
)
|
|
760
|
+
sys.stderr.flush()
|
|
761
|
+
|
|
762
|
+
# Trigger shutdown
|
|
763
|
+
_shutdown_all_c_extensions()
|
|
764
|
+
|
|
765
|
+
sys.stderr.write(
|
|
766
|
+
f"[PARENT_MONITOR] Clean shutdown complete, exiting with code 0\n"
|
|
767
|
+
)
|
|
768
|
+
sys.stderr.flush()
|
|
769
|
+
|
|
770
|
+
# Exit cleanly
|
|
771
|
+
os._exit(0)
|
|
772
|
+
|
|
773
|
+
except Exception as e:
|
|
774
|
+
# If monitoring fails, log but don't crash the application
|
|
775
|
+
if SF_DEBUG:
|
|
776
|
+
sys.stderr.write(
|
|
777
|
+
f"[PARENT_MONITOR] Monitoring thread error (non-fatal): {e}\n"
|
|
778
|
+
)
|
|
779
|
+
sys.stderr.flush()
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
def _setup_parent_death_signal():
|
|
783
|
+
"""
|
|
784
|
+
On Linux, register to receive SIGTERM when parent process dies.
|
|
785
|
+
This handles cases where shell wrappers (sh -c) or process supervisors
|
|
786
|
+
don't forward signals properly in Docker/Kubernetes environments.
|
|
787
|
+
|
|
788
|
+
This is a best-effort enhancement that works transparently without
|
|
789
|
+
requiring customers to modify Dockerfiles, entrypoints, or K8s configs.
|
|
790
|
+
|
|
791
|
+
How it works:
|
|
792
|
+
- Uses Linux prctl(PR_SET_PDEATHSIG, SIGTERM)
|
|
793
|
+
- When parent process dies, kernel sends SIGTERM to this process
|
|
794
|
+
- Our monkey-patched signal handlers run
|
|
795
|
+
- C extensions shut down cleanly
|
|
796
|
+
|
|
797
|
+
Platform support:
|
|
798
|
+
- Linux: Uses prctl(PR_SET_PDEATHSIG)
|
|
799
|
+
- macOS: Skipped (no prctl)
|
|
800
|
+
- Windows: Skipped (no prctl)
|
|
801
|
+
|
|
802
|
+
Can be disabled via: SF_DISABLE_PARENT_DEATH_SIGNAL=true
|
|
803
|
+
"""
|
|
804
|
+
# Check if disabled via environment variable
|
|
805
|
+
if SF_DISABLE_PARENT_DEATH_SIGNAL:
|
|
806
|
+
if SF_DEBUG:
|
|
807
|
+
sys.stderr.write(
|
|
808
|
+
"[SAILFISH_INIT] Parent death signal disabled via SF_DISABLE_PARENT_DEATH_SIGNAL\n"
|
|
809
|
+
)
|
|
810
|
+
sys.stderr.flush()
|
|
811
|
+
return
|
|
812
|
+
|
|
813
|
+
# Only supported on Linux
|
|
814
|
+
if sys.platform != "linux":
|
|
815
|
+
if SF_DEBUG:
|
|
816
|
+
sys.stderr.write(
|
|
817
|
+
f"[SAILFISH_INIT] Parent death signal not supported on {sys.platform}, skipping\n"
|
|
818
|
+
)
|
|
819
|
+
sys.stderr.flush()
|
|
820
|
+
return
|
|
821
|
+
|
|
822
|
+
try:
|
|
823
|
+
import ctypes
|
|
824
|
+
|
|
825
|
+
# Load libc
|
|
826
|
+
try:
|
|
827
|
+
libc = ctypes.CDLL("libc.so.6")
|
|
828
|
+
except OSError:
|
|
829
|
+
# Try alternative libc names
|
|
830
|
+
try:
|
|
831
|
+
libc = ctypes.CDLL("libc.so")
|
|
832
|
+
except OSError:
|
|
833
|
+
if SF_DEBUG:
|
|
834
|
+
sys.stderr.write(
|
|
835
|
+
"[SAILFISH_INIT] Could not load libc, parent death signal unavailable\n"
|
|
836
|
+
)
|
|
837
|
+
sys.stderr.flush()
|
|
838
|
+
return
|
|
839
|
+
|
|
840
|
+
# prctl constants
|
|
841
|
+
PR_SET_PDEATHSIG = 1 # Set parent death signal
|
|
842
|
+
|
|
843
|
+
# Register to receive SIGTERM when parent dies
|
|
844
|
+
result = libc.prctl(PR_SET_PDEATHSIG, signal.SIGTERM)
|
|
845
|
+
|
|
846
|
+
if result == 0:
|
|
847
|
+
sys.stderr.write(
|
|
848
|
+
"[SAILFISH_INIT] Registered parent death signal (SIGTERM on parent exit)\n"
|
|
849
|
+
)
|
|
850
|
+
sys.stderr.flush()
|
|
851
|
+
else:
|
|
852
|
+
if SF_DEBUG:
|
|
853
|
+
sys.stderr.write(
|
|
854
|
+
f"[SAILFISH_INIT] prctl returned {result}, parent death signal may not be active\n"
|
|
855
|
+
)
|
|
856
|
+
sys.stderr.flush()
|
|
857
|
+
|
|
858
|
+
except AttributeError:
|
|
859
|
+
# prctl function not available in libc
|
|
860
|
+
if SF_DEBUG:
|
|
861
|
+
sys.stderr.write(
|
|
862
|
+
"[SAILFISH_INIT] prctl not available in libc, parent death signal unavailable\n"
|
|
863
|
+
)
|
|
864
|
+
sys.stderr.flush()
|
|
865
|
+
except Exception as e:
|
|
866
|
+
# Any other error - log but don't crash
|
|
867
|
+
if SF_DEBUG:
|
|
868
|
+
sys.stderr.write(
|
|
869
|
+
f"[SAILFISH_INIT] Unexpected error setting up parent death signal: {e}\n"
|
|
870
|
+
)
|
|
871
|
+
sys.stderr.flush()
|
|
872
|
+
|
|
873
|
+
|
|
874
|
+
class UnifiedInterceptor:
|
|
875
|
+
"""
|
|
876
|
+
Replaces sys.stdout and builtins.print with ultra-thin shims:
|
|
877
|
+
- direct write to real stdout (no recursion, no lock, no regex)
|
|
878
|
+
- native fast path to _sffastlog.print_() when available
|
|
879
|
+
- fallback to PrintInterceptor otherwise
|
|
880
|
+
"""
|
|
881
|
+
|
|
882
|
+
__slots__ = (
|
|
883
|
+
"print_interceptor",
|
|
884
|
+
"_original_stdout",
|
|
885
|
+
"_original_stderr",
|
|
886
|
+
)
|
|
887
|
+
|
|
888
|
+
def __init__(self):
|
|
889
|
+
# Note: CustomLogHandler is created in setup_interceptors() and added to loggers there
|
|
890
|
+
# We don't need a separate instance here
|
|
891
|
+
self.print_interceptor = PrintInterceptor()
|
|
892
|
+
# Use sys.__stdout__ and sys.__stderr__ to get the ORIGINAL streams
|
|
893
|
+
# before any monkey-patching. This prevents recursion issues when
|
|
894
|
+
# log=False tries to bypass interceptors.
|
|
895
|
+
self._original_stdout = sys.__stdout__
|
|
896
|
+
self._original_stderr = sys.__stderr__
|
|
897
|
+
|
|
898
|
+
# -------- sys.stdout replacement --------
|
|
899
|
+
def write(self, message):
|
|
900
|
+
"""
|
|
901
|
+
Ultra-thin write path: write to real stdout, then ship via C fast path or fallback.
|
|
902
|
+
"""
|
|
903
|
+
# Debug logging for Django output capture
|
|
904
|
+
if SF_DEBUG and message and message.strip():
|
|
905
|
+
sys.__stderr__.write(
|
|
906
|
+
f"[DEBUG UnifiedInterceptor.write] {repr(message[:80])}\n"
|
|
907
|
+
)
|
|
908
|
+
sys.__stderr__.flush()
|
|
909
|
+
|
|
910
|
+
# Respect guards
|
|
911
|
+
if get_reentrancy_guard_sys_stdout_active() or getattr(
|
|
912
|
+
_thread_locals, "reentrancy_guard_logging_active", False
|
|
913
|
+
):
|
|
914
|
+
self._original_stdout.write(message)
|
|
915
|
+
return
|
|
916
|
+
|
|
917
|
+
# Always write to the real stdout first; no unconditional flush.
|
|
918
|
+
self._original_stdout.write(message)
|
|
919
|
+
|
|
920
|
+
# Skip empty / newline-only
|
|
921
|
+
if not message or message == "\n":
|
|
922
|
+
if SF_DEBUG:
|
|
923
|
+
sys.__stderr__.write(f"[DEBUG] Skipping empty/newline message\n")
|
|
924
|
+
sys.__stderr__.flush()
|
|
925
|
+
return
|
|
926
|
+
|
|
927
|
+
# Build/send once
|
|
928
|
+
_, trace_id = get_or_set_sf_trace_id()
|
|
929
|
+
|
|
930
|
+
# Native fast path (ring + libcurl sender)
|
|
931
|
+
fast_ok = _ensure_fast_print_initialized()
|
|
932
|
+
if SF_DEBUG:
|
|
933
|
+
sys.__stderr__.write(f"[DEBUG] Fast print initialized: {fast_ok}\n")
|
|
934
|
+
sys.__stderr__.flush()
|
|
935
|
+
|
|
936
|
+
if fast_ok:
|
|
937
|
+
try:
|
|
938
|
+
if SF_DEBUG:
|
|
939
|
+
sys.__stderr__.write(
|
|
940
|
+
f"[DEBUG] Calling _sffastlog.print_() with message: {repr(message[:50])}\n"
|
|
941
|
+
)
|
|
942
|
+
sys.__stderr__.flush()
|
|
943
|
+
_sffastlog.print_(
|
|
944
|
+
contents=message, session_id=str(trace_id), preactive=0
|
|
945
|
+
)
|
|
946
|
+
if SF_DEBUG:
|
|
947
|
+
sys.__stderr__.write(f"[DEBUG] _sffastlog.print_() succeeded\n")
|
|
948
|
+
sys.__stderr__.flush()
|
|
949
|
+
return
|
|
950
|
+
except Exception as e:
|
|
951
|
+
if SF_DEBUG:
|
|
952
|
+
sys.__stderr__.write(f"[DEBUG] _sffastlog.print_() failed: {e}\n")
|
|
953
|
+
sys.__stderr__.flush()
|
|
954
|
+
pass # fall back below
|
|
955
|
+
|
|
956
|
+
# Fallback to Python interceptor path
|
|
957
|
+
if SF_DEBUG:
|
|
958
|
+
sys.__stderr__.write(f"[DEBUG] Using fallback Python interceptor\n")
|
|
959
|
+
sys.__stderr__.flush()
|
|
960
|
+
self.print_interceptor.do_send((message, trace_id), trace_id)
|
|
961
|
+
|
|
962
|
+
def flush(self):
|
|
963
|
+
self._original_stdout.flush()
|
|
964
|
+
|
|
965
|
+
# -------- print() override --------
|
|
966
|
+
def create_custom_print(self):
|
|
967
|
+
"""
|
|
968
|
+
Provide a print function compatible with the builtins.print signature,
|
|
969
|
+
but as lean as possible.
|
|
970
|
+
"""
|
|
971
|
+
_orig = self._original_stdout
|
|
972
|
+
_pi = self.print_interceptor
|
|
973
|
+
|
|
974
|
+
def custom_print(*args, sep=" ", end="\n", file=None, flush=False, log=True):
|
|
975
|
+
# Perform the minimal formatting work once.
|
|
976
|
+
out = sep.join(map(str, args)) + end
|
|
977
|
+
|
|
978
|
+
# Always write to real stdout.
|
|
979
|
+
# (Ignore 'file' param to avoid recursion into our own wrappers.)
|
|
980
|
+
_orig.write(out)
|
|
981
|
+
if flush:
|
|
982
|
+
_orig.flush()
|
|
983
|
+
|
|
984
|
+
if not log:
|
|
985
|
+
return
|
|
986
|
+
msg = out
|
|
987
|
+
if not msg.strip():
|
|
988
|
+
return
|
|
989
|
+
|
|
990
|
+
# Trace id once
|
|
991
|
+
_, trace_id = get_or_set_sf_trace_id()
|
|
992
|
+
|
|
993
|
+
# Native fast path
|
|
994
|
+
if _ensure_fast_print_initialized():
|
|
995
|
+
try:
|
|
996
|
+
_sffastlog.print_(
|
|
997
|
+
contents=msg, session_id=str(trace_id), preactive=0
|
|
998
|
+
)
|
|
999
|
+
return
|
|
1000
|
+
except Exception:
|
|
1001
|
+
pass
|
|
1002
|
+
|
|
1003
|
+
# Fallback
|
|
1004
|
+
_pi.do_send((msg, trace_id), trace_id)
|
|
1005
|
+
|
|
1006
|
+
return custom_print
|
|
1007
|
+
|
|
1008
|
+
def __getattr__(self, attr):
|
|
1009
|
+
"""
|
|
1010
|
+
Delegate attribute access to original stdout or stderr when needed.
|
|
1011
|
+
"""
|
|
1012
|
+
if hasattr(self._original_stdout, attr):
|
|
1013
|
+
return getattr(self._original_stdout, attr)
|
|
1014
|
+
# TODO: If you later intercept stderr, mirror the same behavior
|
|
1015
|
+
# elif hasattr(self._original_stderr, attr):
|
|
1016
|
+
# return getattr(self._original_stderr, attr)
|
|
1017
|
+
raise AttributeError(
|
|
1018
|
+
f"'{self.__class__.__name__}' object has no attribute '{attr}'"
|
|
1019
|
+
)
|
|
1020
|
+
|
|
1021
|
+
def intercept_stdout(self):
|
|
1022
|
+
"""
|
|
1023
|
+
Replace sys.stdout and builtins.print to intercept all output.
|
|
1024
|
+
"""
|
|
1025
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1026
|
+
self._original_stdout.write("Intercepting stdout and print...\n")
|
|
1027
|
+
self._original_stdout.flush()
|
|
1028
|
+
|
|
1029
|
+
# Replace stdout
|
|
1030
|
+
sys.stdout = self
|
|
1031
|
+
# NOTE: stderr interception left as-is; uncomment if you want parity:
|
|
1032
|
+
# sys.stderr = self
|
|
1033
|
+
|
|
1034
|
+
# Save original print if not already saved
|
|
1035
|
+
if not hasattr(builtins, "_original_print"):
|
|
1036
|
+
builtins._original_print = builtins.print
|
|
1037
|
+
|
|
1038
|
+
# Override builtins.print with our ultra-thin implementation
|
|
1039
|
+
custom_print_function = self.create_custom_print()
|
|
1040
|
+
builtins.print = functools.partial(custom_print_function)
|
|
1041
|
+
|
|
1042
|
+
# Update __builtins__ reference if needed
|
|
1043
|
+
if isinstance(__builtins__, dict):
|
|
1044
|
+
__builtins__["print"] = custom_print_function
|
|
1045
|
+
elif isinstance(__builtins__, ModuleType):
|
|
1046
|
+
setattr(__builtins__, "print", custom_print_function)
|
|
1047
|
+
|
|
1048
|
+
# Also ensure __main__.print and builtins module reference are updated
|
|
1049
|
+
if "__main__" in sys.modules:
|
|
1050
|
+
sys.modules["__main__"].__dict__["print"] = custom_print_function
|
|
1051
|
+
sys.modules["builtins"].print = custom_print_function
|
|
1052
|
+
|
|
1053
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1054
|
+
self._original_stdout.write("Intercepting stdout and print...DONE\n")
|
|
1055
|
+
self._original_stdout.flush()
|
|
1056
|
+
|
|
1057
|
+
# -------- exceptions --------
|
|
1058
|
+
def intercept_exceptions(self):
|
|
1059
|
+
start_profiling()
|
|
1060
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1061
|
+
self._original_stdout.write("Intercepting uncaught exceptions...\n")
|
|
1062
|
+
self._original_stdout.flush()
|
|
1063
|
+
|
|
1064
|
+
sys.excepthook = custom_excepthook
|
|
1065
|
+
if hasattr(threading, "excepthook"):
|
|
1066
|
+
threading.excepthook = custom_thread_excepthook
|
|
1067
|
+
|
|
1068
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1069
|
+
self._original_stdout.write("Intercepting uncaught exceptions...DONE\n")
|
|
1070
|
+
self._original_stdout.flush()
|
|
1071
|
+
|
|
1072
|
+
# TODO - Figure out how to make this work universally
|
|
1073
|
+
def patch_exception_class(self):
|
|
1074
|
+
import builtins as _b
|
|
1075
|
+
|
|
1076
|
+
if hasattr(_b.Exception, "transmit_to_sailfish"):
|
|
1077
|
+
return
|
|
1078
|
+
try:
|
|
1079
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1080
|
+
self._original_stdout.write("Monkey-patching Exceptions class...\n")
|
|
1081
|
+
self._original_stdout.flush()
|
|
1082
|
+
_ = _b.Exception
|
|
1083
|
+
_b.Exception = PatchedException
|
|
1084
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1085
|
+
self._original_stdout.write("Monkey-patching Exceptions class...DONE\n")
|
|
1086
|
+
self._original_stdout.flush()
|
|
1087
|
+
except Exception as e:
|
|
1088
|
+
print(f"[Warning] Failed to patch `builtins.Exception`: {e}")
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
# ----------------- setup entrypoint -----------------
|
|
1092
|
+
|
|
1093
|
+
|
|
1094
|
+
@validate_call
|
|
1095
|
+
def setup_interceptors(
|
|
1096
|
+
api_key: str,
|
|
1097
|
+
graphql_endpoint: str = None,
|
|
1098
|
+
service_identifier: Optional[str] = None,
|
|
1099
|
+
service_version: Optional[str] = None,
|
|
1100
|
+
git_sha: Optional[str] = None,
|
|
1101
|
+
service_additional_metadata: Optional[
|
|
1102
|
+
Dict[str, Union[str, int, float, None]]
|
|
1103
|
+
] = None,
|
|
1104
|
+
profiling_mode_enabled: bool = False,
|
|
1105
|
+
profiling_max_depth: int = 5,
|
|
1106
|
+
domains_to_not_propagate_headers_to: Optional[List[str]] = None,
|
|
1107
|
+
routes_to_skip_network_hops: Optional[List[str]] = None,
|
|
1108
|
+
site_and_dist_packages_to_collect_local_variables_on: Optional[List[str]] = None,
|
|
1109
|
+
setup_global_time_at_app_spinup: bool = True,
|
|
1110
|
+
):
|
|
1111
|
+
if service_identifier is None:
|
|
1112
|
+
service_identifier = os.getenv("SERVICE_VERSION", os.getenv("GIT_SHA"))
|
|
1113
|
+
if git_sha is None:
|
|
1114
|
+
git_sha = os.getenv("GIT_SHA")
|
|
1115
|
+
app_config._service_identifier = service_identifier
|
|
1116
|
+
app_config._service_version = service_version
|
|
1117
|
+
app_config._git_sha = git_sha
|
|
1118
|
+
app_config._service_additional_metadata = service_additional_metadata
|
|
1119
|
+
app_config._profiling_mode_enabled = profiling_mode_enabled
|
|
1120
|
+
app_config._profiling_max_depth = profiling_max_depth
|
|
1121
|
+
app_config._set_site_and_dist_packages_to_collect_local_variables_on = (
|
|
1122
|
+
site_and_dist_packages_to_collect_local_variables_on
|
|
1123
|
+
)
|
|
1124
|
+
|
|
1125
|
+
# Use parameter if provided, otherwise fall back to environment variable
|
|
1126
|
+
if routes_to_skip_network_hops is not None:
|
|
1127
|
+
app_config._routes_to_skip_network_hops = routes_to_skip_network_hops
|
|
1128
|
+
else:
|
|
1129
|
+
# Parse env var as comma-separated list
|
|
1130
|
+
if SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES:
|
|
1131
|
+
app_config._routes_to_skip_network_hops = [
|
|
1132
|
+
p.strip()
|
|
1133
|
+
for p in SF_DISABLE_INBOUND_NETWORK_TRACING_ON_ROUTES.split(",")
|
|
1134
|
+
if p.strip()
|
|
1135
|
+
]
|
|
1136
|
+
else:
|
|
1137
|
+
app_config._routes_to_skip_network_hops = []
|
|
1138
|
+
|
|
1139
|
+
# Capture caller file/line (avoid site-packages etc)
|
|
1140
|
+
for frame in inspect.stack():
|
|
1141
|
+
if any(s in frame.filename for s in STRINGS_NOT_FOUND_IN_CALLER_LOCATIONS):
|
|
1142
|
+
continue
|
|
1143
|
+
app_config._setup_interceptors_call_filename = frame.filename
|
|
1144
|
+
app_config._setup_interceptors_call_lineno = frame.lineno
|
|
1145
|
+
break
|
|
1146
|
+
|
|
1147
|
+
# Configure core endpoints/keys
|
|
1148
|
+
app_config._sailfish_api_key = api_key
|
|
1149
|
+
app_config._sailfish_graphql_endpoint = (
|
|
1150
|
+
graphql_endpoint or app_config._sailfish_graphql_endpoint
|
|
1151
|
+
)
|
|
1152
|
+
|
|
1153
|
+
# Idempotent setup
|
|
1154
|
+
if app_config._interceptors_initialized:
|
|
1155
|
+
if SF_DEBUG:
|
|
1156
|
+
print("[[DEBUG]] Interceptors already set up. Skipping setup.")
|
|
1157
|
+
return
|
|
1158
|
+
|
|
1159
|
+
if not app_config._sailfish_api_key:
|
|
1160
|
+
raise RuntimeError(
|
|
1161
|
+
"The 'api_key' parameter is missing. Please provide a valid value."
|
|
1162
|
+
)
|
|
1163
|
+
|
|
1164
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1165
|
+
print("Setting up interceptors")
|
|
1166
|
+
|
|
1167
|
+
# Register shutdown handlers to cleanly stop C extensions and prevent exit code 137
|
|
1168
|
+
# atexit: handles normal Python exit (sys.exit(), end of script, etc.)
|
|
1169
|
+
atexit.register(_shutdown_all_c_extensions)
|
|
1170
|
+
|
|
1171
|
+
# Monkey-patch signal.signal() to intercept ALL signal handler registrations
|
|
1172
|
+
# This ensures our C extension cleanup runs first, regardless of when
|
|
1173
|
+
# frameworks (Django, Celery, Uvicorn, etc.) install their handlers
|
|
1174
|
+
global _original_signal_signal
|
|
1175
|
+
if _original_signal_signal is None: # Only patch once
|
|
1176
|
+
_original_signal_signal = signal.signal
|
|
1177
|
+
signal.signal = _patched_signal_signal
|
|
1178
|
+
|
|
1179
|
+
# ALWAYS log this (not just SF_DEBUG) so we can debug 137 issues
|
|
1180
|
+
sys.stderr.write(
|
|
1181
|
+
f"[SAILFISH_INIT] Monkey-patched signal.signal() in PID {os.getpid()}\n"
|
|
1182
|
+
)
|
|
1183
|
+
sys.stderr.flush()
|
|
1184
|
+
|
|
1185
|
+
# Check if handlers are already registered and wrap them
|
|
1186
|
+
for sig in (signal.SIGTERM, signal.SIGINT):
|
|
1187
|
+
current_handler = signal.getsignal(sig)
|
|
1188
|
+
if current_handler not in (signal.SIG_DFL, signal.SIG_IGN, None):
|
|
1189
|
+
# A handler is already registered - wrap it
|
|
1190
|
+
sys.stderr.write(
|
|
1191
|
+
f"[SAILFISH_INIT] Found existing {sig} handler: {current_handler}, wrapping it\n"
|
|
1192
|
+
)
|
|
1193
|
+
sys.stderr.flush()
|
|
1194
|
+
|
|
1195
|
+
# Use our patched signal.signal to wrap it
|
|
1196
|
+
signal.signal(sig, current_handler)
|
|
1197
|
+
else:
|
|
1198
|
+
sys.stderr.write(
|
|
1199
|
+
f"[SAILFISH_INIT] No existing handler for signal {sig} (current: {current_handler})\n"
|
|
1200
|
+
)
|
|
1201
|
+
sys.stderr.flush()
|
|
1202
|
+
|
|
1203
|
+
# Setup parent death signal (Linux only, best-effort)
|
|
1204
|
+
# This ensures Python receives SIGTERM even when shell wrappers don't forward signals
|
|
1205
|
+
# Critical for Docker/Kubernetes environments where customers can't modify infrastructure
|
|
1206
|
+
_setup_parent_death_signal()
|
|
1207
|
+
|
|
1208
|
+
# Start parent process monitor thread (cross-platform)
|
|
1209
|
+
# This actively detects when parent process dies by checking for reparenting
|
|
1210
|
+
# More reliable than signals, works on all platforms
|
|
1211
|
+
if SF_PARENT_MONITOR_INTERVAL_MS > 0:
|
|
1212
|
+
parent_monitor_thread = threading.Thread(
|
|
1213
|
+
target=_monitor_parent_process,
|
|
1214
|
+
name="sailfish-parent-monitor",
|
|
1215
|
+
daemon=True, # Daemon thread won't prevent process exit
|
|
1216
|
+
)
|
|
1217
|
+
parent_monitor_thread.start()
|
|
1218
|
+
else:
|
|
1219
|
+
if SF_DEBUG:
|
|
1220
|
+
sys.stderr.write(
|
|
1221
|
+
"[SAILFISH_INIT] Parent monitoring disabled (SF_PARENT_MONITOR_INTERVAL_MS=0)\n"
|
|
1222
|
+
)
|
|
1223
|
+
sys.stderr.flush()
|
|
1224
|
+
|
|
1225
|
+
# Global time sync
|
|
1226
|
+
if setup_global_time_at_app_spinup:
|
|
1227
|
+
TimeSync.get_instance()
|
|
1228
|
+
|
|
1229
|
+
# Local env detect
|
|
1230
|
+
set_sf_is_local_flag()
|
|
1231
|
+
|
|
1232
|
+
# Install hooks
|
|
1233
|
+
unified_interceptor = UnifiedInterceptor()
|
|
1234
|
+
unified_interceptor.intercept_exceptions()
|
|
1235
|
+
|
|
1236
|
+
# Configure logging to capture ALL logs (including those with propagate=False like Uvicorn)
|
|
1237
|
+
logging.basicConfig(level=LOG_LEVEL)
|
|
1238
|
+
custom_handler = CustomLogHandler()
|
|
1239
|
+
|
|
1240
|
+
# Add to root logger (captures all logs with propagate=True)
|
|
1241
|
+
root_logger = logging.getLogger()
|
|
1242
|
+
root_logger.addHandler(custom_handler)
|
|
1243
|
+
|
|
1244
|
+
# OPTIMIZATION: Cache loggers we've already processed to avoid repeated checks
|
|
1245
|
+
# Use dict instead of set for faster lookups (dicts have slightly better cache locality)
|
|
1246
|
+
# This cache tracks which loggers we've seen and don't need to check again
|
|
1247
|
+
_processed_loggers = {}
|
|
1248
|
+
|
|
1249
|
+
# Store reference to check if handler is already added
|
|
1250
|
+
# OPTIMIZATION: Cache the CustomLogHandler class to avoid repeated lookups
|
|
1251
|
+
_handler_class = CustomLogHandler
|
|
1252
|
+
|
|
1253
|
+
def _needs_handler(logger_instance):
|
|
1254
|
+
"""Check if logger needs our handler added.
|
|
1255
|
+
|
|
1256
|
+
OPTIMIZED: Use direct iteration instead of generator expression to avoid overhead.
|
|
1257
|
+
"""
|
|
1258
|
+
# Fast path: If no handlers, definitely needs one
|
|
1259
|
+
if not logger_instance.handlers:
|
|
1260
|
+
return True
|
|
1261
|
+
|
|
1262
|
+
# Check if our handler is already present (avoid generator overhead)
|
|
1263
|
+
for h in logger_instance.handlers:
|
|
1264
|
+
if isinstance(h, _handler_class):
|
|
1265
|
+
return False
|
|
1266
|
+
return True
|
|
1267
|
+
|
|
1268
|
+
# Monkey-patch logging.Logger.__setattr__ to detect when propagate is set to False
|
|
1269
|
+
# This catches cases where logger is created before our patch, but propagate set later
|
|
1270
|
+
_original_Logger_setattr = logging.Logger.__setattr__
|
|
1271
|
+
|
|
1272
|
+
def _patched_Logger_setattr(self, name, value):
|
|
1273
|
+
_original_Logger_setattr(self, name, value)
|
|
1274
|
+
# If propagate was just set to False, add our handler
|
|
1275
|
+
if name == "propagate" and value is False and self.name:
|
|
1276
|
+
if _needs_handler(self):
|
|
1277
|
+
self.addHandler(custom_handler)
|
|
1278
|
+
_processed_loggers[self.name] = (
|
|
1279
|
+
self # Mark as processed (cache logger instance)
|
|
1280
|
+
)
|
|
1281
|
+
if SF_DEBUG:
|
|
1282
|
+
print(
|
|
1283
|
+
f"[[DEBUG]] Auto-added handler to {self.name} (propagate set to False)",
|
|
1284
|
+
)
|
|
1285
|
+
|
|
1286
|
+
logging.Logger.__setattr__ = _patched_Logger_setattr
|
|
1287
|
+
|
|
1288
|
+
# Monkey-patch logging.getLogger() to auto-add handler to propagate=False loggers
|
|
1289
|
+
# This catches loggers retrieved/accessed after setup
|
|
1290
|
+
_original_getLogger = logging.getLogger
|
|
1291
|
+
|
|
1292
|
+
def _patched_getLogger(name=None):
|
|
1293
|
+
# ULTRA-FAST PATH: Early exit for root logger (most common case)
|
|
1294
|
+
# Check BEFORE calling original getLogger to save a function call
|
|
1295
|
+
if name is None or name == "root":
|
|
1296
|
+
return _original_getLogger(name)
|
|
1297
|
+
|
|
1298
|
+
# ULTRA-FAST PATH: Check cache BEFORE calling original getLogger
|
|
1299
|
+
# Dict lookup is ~50ns, saves us from redundant processing
|
|
1300
|
+
# OPTIMIZATION: Use get() with sentinel for faster cache hits (avoids exception overhead)
|
|
1301
|
+
cached = _processed_loggers.get(name, None)
|
|
1302
|
+
if cached is not None:
|
|
1303
|
+
return cached
|
|
1304
|
+
|
|
1305
|
+
# Get logger (only called on first access for this logger name)
|
|
1306
|
+
logger = _original_getLogger(name)
|
|
1307
|
+
|
|
1308
|
+
# OPTIMIZATION: Mark as processed IMMEDIATELY by caching the logger instance
|
|
1309
|
+
# This avoids set operations and gives us O(1) lookup next time
|
|
1310
|
+
_processed_loggers[name] = logger
|
|
1311
|
+
|
|
1312
|
+
# FAST PATH: Only check propagate if it's actually False
|
|
1313
|
+
# Most loggers have propagate=True, so this avoids _needs_handler call
|
|
1314
|
+
# REMOVED: isinstance check - getLogger() always returns a Logger
|
|
1315
|
+
if not logger.propagate:
|
|
1316
|
+
# OPTIMIZATION: Inline _needs_handler check for hot path performance
|
|
1317
|
+
# Fast path: no handlers means we definitely need to add one
|
|
1318
|
+
needs_handler = not logger.handlers
|
|
1319
|
+
if not needs_handler:
|
|
1320
|
+
# Check if our handler is already present (manual loop for early exit)
|
|
1321
|
+
needs_handler = True
|
|
1322
|
+
for h in logger.handlers:
|
|
1323
|
+
if isinstance(h, _handler_class):
|
|
1324
|
+
needs_handler = False
|
|
1325
|
+
break
|
|
1326
|
+
|
|
1327
|
+
if needs_handler:
|
|
1328
|
+
logger.addHandler(custom_handler)
|
|
1329
|
+
if SF_DEBUG:
|
|
1330
|
+
print(
|
|
1331
|
+
f"[[DEBUG]] Auto-added handler to {name} (has propagate=False)",
|
|
1332
|
+
log=False,
|
|
1333
|
+
)
|
|
1334
|
+
|
|
1335
|
+
return logger
|
|
1336
|
+
|
|
1337
|
+
logging.getLogger = _patched_getLogger
|
|
1338
|
+
|
|
1339
|
+
# Also handle any existing loggers with propagate=False
|
|
1340
|
+
for logger_name in list(logging.Logger.manager.loggerDict.keys()):
|
|
1341
|
+
logger = _original_getLogger(logger_name)
|
|
1342
|
+
if isinstance(logger, logging.Logger) and not logger.propagate:
|
|
1343
|
+
if _needs_handler(logger):
|
|
1344
|
+
logger.addHandler(custom_handler)
|
|
1345
|
+
if SF_DEBUG:
|
|
1346
|
+
print(
|
|
1347
|
+
f"[[DEBUG]] Added handler to existing logger {logger_name} (has propagate=False)",
|
|
1348
|
+
)
|
|
1349
|
+
# Mark all existing loggers as processed to avoid checking them again
|
|
1350
|
+
_processed_loggers[logger_name] = logger
|
|
1351
|
+
|
|
1352
|
+
if SF_DEBUG:
|
|
1353
|
+
print(
|
|
1354
|
+
f"[[DEBUG]] Configured logging: root handler + auto-patching getLogger() and Logger.__setattr__",
|
|
1355
|
+
)
|
|
1356
|
+
|
|
1357
|
+
# stdout + print override (this is the hot path)
|
|
1358
|
+
unified_interceptor.intercept_stdout()
|
|
1359
|
+
|
|
1360
|
+
# Framework wrappers / network patches
|
|
1361
|
+
if SF_DEBUG:
|
|
1362
|
+
print(
|
|
1363
|
+
f"[[DEBUG]] Before patch_web_frameworks, sys.getprofile() = {sys.getprofile()}",
|
|
1364
|
+
log=False,
|
|
1365
|
+
)
|
|
1366
|
+
# Initialize service operations C extension FIRST (before patching)
|
|
1367
|
+
# This ensures the C extension is ready when DomainsToNotPassHeaderToTransmitter
|
|
1368
|
+
# is called during patch_all_http_clients()
|
|
1369
|
+
if _ensure_service_initialized():
|
|
1370
|
+
try:
|
|
1371
|
+
import json
|
|
1372
|
+
|
|
1373
|
+
# Prepare parameters for service_identifier()
|
|
1374
|
+
service_identifier_val = app_config._service_identifier or ""
|
|
1375
|
+
service_version_val = app_config._service_version or ""
|
|
1376
|
+
git_sha_val = app_config._git_sha or ""
|
|
1377
|
+
|
|
1378
|
+
# Serialize additional metadata dict to JSON string
|
|
1379
|
+
service_additional_metadata_json = ""
|
|
1380
|
+
if app_config._service_additional_metadata:
|
|
1381
|
+
try:
|
|
1382
|
+
service_additional_metadata_json = json.dumps(
|
|
1383
|
+
app_config._service_additional_metadata
|
|
1384
|
+
)
|
|
1385
|
+
except Exception as e:
|
|
1386
|
+
if SF_DEBUG:
|
|
1387
|
+
print(
|
|
1388
|
+
f"[[DEBUG]] Failed to serialize service_additional_metadata: {e}",
|
|
1389
|
+
log=False,
|
|
1390
|
+
)
|
|
1391
|
+
|
|
1392
|
+
# Get infrastructure details
|
|
1393
|
+
infrastructure_type_val = ""
|
|
1394
|
+
infrastructure_details_json = ""
|
|
1395
|
+
try:
|
|
1396
|
+
infrastructure_type_val = app_config._infra_details.system.value
|
|
1397
|
+
infrastructure_details_json = json.dumps(
|
|
1398
|
+
app_config._infra_details.details
|
|
1399
|
+
)
|
|
1400
|
+
except Exception as e:
|
|
1401
|
+
if SF_DEBUG:
|
|
1402
|
+
print(
|
|
1403
|
+
f"[[DEBUG]] Failed to get infrastructure details: {e}",
|
|
1404
|
+
log=False,
|
|
1405
|
+
)
|
|
1406
|
+
|
|
1407
|
+
# Get setup_interceptors call location
|
|
1408
|
+
setup_file_path = app_config._setup_interceptors_call_filename or ""
|
|
1409
|
+
setup_line_number = app_config._setup_interceptors_call_lineno or 0
|
|
1410
|
+
|
|
1411
|
+
# Call the C extension to send service identification
|
|
1412
|
+
_sfservice.service_identifier(
|
|
1413
|
+
service_identifier=service_identifier_val,
|
|
1414
|
+
service_version=service_version_val,
|
|
1415
|
+
service_additional_metadata=service_additional_metadata_json,
|
|
1416
|
+
git_sha=git_sha_val,
|
|
1417
|
+
infrastructure_type=infrastructure_type_val,
|
|
1418
|
+
infrastructure_details=infrastructure_details_json,
|
|
1419
|
+
setup_interceptors_file_path=setup_file_path,
|
|
1420
|
+
setup_interceptors_line_number=setup_line_number,
|
|
1421
|
+
)
|
|
1422
|
+
|
|
1423
|
+
if SF_DEBUG:
|
|
1424
|
+
print(
|
|
1425
|
+
"[[DEBUG]] Service identification sent via _sfservice C extension",
|
|
1426
|
+
log=False,
|
|
1427
|
+
)
|
|
1428
|
+
except Exception as e:
|
|
1429
|
+
if SF_DEBUG:
|
|
1430
|
+
print(
|
|
1431
|
+
f"[[DEBUG]] Failed to send service identification: {e}", log=False
|
|
1432
|
+
)
|
|
1433
|
+
|
|
1434
|
+
# Now that C extension is initialized, apply framework/network patches
|
|
1435
|
+
# The DomainsToNotPassHeaderToTransmitter will use the C extension now
|
|
1436
|
+
patch_web_frameworks(routes_to_skip_network_hops)
|
|
1437
|
+
if SF_DEBUG:
|
|
1438
|
+
print(
|
|
1439
|
+
f"[[DEBUG]] After patch_web_frameworks, sys.getprofile() = {sys.getprofile()}",
|
|
1440
|
+
log=False,
|
|
1441
|
+
)
|
|
1442
|
+
patch_all_http_clients(domains_to_not_propagate_headers_to)
|
|
1443
|
+
|
|
1444
|
+
# Patch ThreadPoolExecutor to copy ContextVars (eliminates lock contention!)
|
|
1445
|
+
patch_threading()
|
|
1446
|
+
if SF_DEBUG:
|
|
1447
|
+
print(
|
|
1448
|
+
f"[[DEBUG]] After patch_threading, ThreadPoolExecutor will copy ContextVars",
|
|
1449
|
+
log=False,
|
|
1450
|
+
)
|
|
1451
|
+
|
|
1452
|
+
# Initialize function span profiler if enabled
|
|
1453
|
+
if _ensure_funcspan_initialized():
|
|
1454
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1455
|
+
print("Function span profiler initialized and started.", log=False)
|
|
1456
|
+
if SF_DEBUG:
|
|
1457
|
+
print(
|
|
1458
|
+
f"[[DEBUG]] After funcspan init, sys.getprofile() = {sys.getprofile()}",
|
|
1459
|
+
log=False,
|
|
1460
|
+
)
|
|
1461
|
+
|
|
1462
|
+
app_config._interceptors_initialized = True
|
|
1463
|
+
|
|
1464
|
+
# CRITICAL: Mark interceptors as ready - this enables profiling
|
|
1465
|
+
# The profiler skips all events until interceptors are fully initialized to prevent
|
|
1466
|
+
# crashes from profiling code in an inconsistent state during initialization.
|
|
1467
|
+
if _FUNCSPAN_OK and _sffuncspan:
|
|
1468
|
+
_sffuncspan.set_interceptors_ready()
|
|
1469
|
+
if SF_DEBUG:
|
|
1470
|
+
print("[[DEBUG]] Profiling enabled (interceptors ready)", log=False)
|
|
1471
|
+
|
|
1472
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
1473
|
+
print("Interceptors setup completed.", log=False)
|
|
1474
|
+
|
|
1475
|
+
|
|
1476
|
+
def reinitialize_after_fork():
|
|
1477
|
+
"""
|
|
1478
|
+
Reinitialize only the C extensions after a fork (for multiprocessing frameworks like Robyn).
|
|
1479
|
+
Does NOT re-apply patches - those are inherited from the parent process.
|
|
1480
|
+
Only resets initialization flags and reinitializes C extension background threads/libcurl.
|
|
1481
|
+
"""
|
|
1482
|
+
global _FAST_PRINT_READY, _FUNCSPAN_READY, _FUNCSPAN_PROFILER
|
|
1483
|
+
|
|
1484
|
+
if SF_DEBUG:
|
|
1485
|
+
print(
|
|
1486
|
+
f"[[DEBUG]] reinitialize_after_fork() called in PID {os.getpid()}",
|
|
1487
|
+
log=False,
|
|
1488
|
+
)
|
|
1489
|
+
|
|
1490
|
+
# Shutdown C extensions first (resets g_running flag and cleans up state)
|
|
1491
|
+
# Note: We don't call _shutdown_all_c_extensions() here because we're reinitializing,
|
|
1492
|
+
# not shutting down permanently, so we don't want to set the shutdown flag.
|
|
1493
|
+
|
|
1494
|
+
# Shutdown function span config C extension
|
|
1495
|
+
try:
|
|
1496
|
+
from . import _sffuncspan_config
|
|
1497
|
+
|
|
1498
|
+
if SF_DEBUG:
|
|
1499
|
+
print("[[DEBUG]] Shutting down _sffuncspan_config before reinit", log=False)
|
|
1500
|
+
_sffuncspan_config.shutdown()
|
|
1501
|
+
except Exception as e:
|
|
1502
|
+
if SF_DEBUG:
|
|
1503
|
+
print(
|
|
1504
|
+
f"[[DEBUG]] _sffuncspan_config.shutdown() failed (non-fatal): {e}",
|
|
1505
|
+
log=False,
|
|
1506
|
+
)
|
|
1507
|
+
|
|
1508
|
+
# Shutdown function span C extension
|
|
1509
|
+
try:
|
|
1510
|
+
if _FUNCSPAN_OK and _sffuncspan:
|
|
1511
|
+
if SF_DEBUG:
|
|
1512
|
+
print("[[DEBUG]] Shutting down _sffuncspan before reinit", log=False)
|
|
1513
|
+
_sffuncspan.shutdown()
|
|
1514
|
+
except Exception as e:
|
|
1515
|
+
if SF_DEBUG:
|
|
1516
|
+
print(f"[[DEBUG]] _sffuncspan.shutdown() failed: {e}", log=False)
|
|
1517
|
+
|
|
1518
|
+
# Shutdown network hop C extension
|
|
1519
|
+
try:
|
|
1520
|
+
from . import fast_network_hop
|
|
1521
|
+
|
|
1522
|
+
if fast_network_hop._NETWORKHOP_FAST_OK and fast_network_hop._sfnetworkhop:
|
|
1523
|
+
if SF_DEBUG:
|
|
1524
|
+
print("[[DEBUG]] Shutting down _sfnetworkhop before reinit", log=False)
|
|
1525
|
+
fast_network_hop._sfnetworkhop.shutdown()
|
|
1526
|
+
except Exception as e:
|
|
1527
|
+
if SF_DEBUG:
|
|
1528
|
+
print(f"[[DEBUG]] _sfnetworkhop.shutdown() failed: {e}", log=False)
|
|
1529
|
+
|
|
1530
|
+
# Shutdown network request C extension (http.client body/header capture)
|
|
1531
|
+
try:
|
|
1532
|
+
from .patches.network_libraries import utils as net_utils
|
|
1533
|
+
|
|
1534
|
+
if net_utils._FAST_NETWORKREQUEST_AVAILABLE and net_utils._sffastnetworkrequest:
|
|
1535
|
+
if SF_DEBUG:
|
|
1536
|
+
print(
|
|
1537
|
+
"[[DEBUG]] Shutting down _sffastnetworkrequest before reinit",
|
|
1538
|
+
log=False,
|
|
1539
|
+
)
|
|
1540
|
+
net_utils._sffastnetworkrequest.shutdown()
|
|
1541
|
+
except Exception as e:
|
|
1542
|
+
if SF_DEBUG:
|
|
1543
|
+
print(f"[[DEBUG]] _sffastnetworkrequest.shutdown() failed: {e}", log=False)
|
|
1544
|
+
|
|
1545
|
+
# Shutdown fast log C extension (core - shutdown last)
|
|
1546
|
+
try:
|
|
1547
|
+
if _FAST_OK and _sffastlog:
|
|
1548
|
+
if SF_DEBUG:
|
|
1549
|
+
print("[[DEBUG]] Shutting down _sffastlog before reinit", log=False)
|
|
1550
|
+
_sffastlog.shutdown()
|
|
1551
|
+
except Exception as e:
|
|
1552
|
+
if SF_DEBUG:
|
|
1553
|
+
print(f"[[DEBUG]] _sffastlog.shutdown() failed: {e}", log=False)
|
|
1554
|
+
|
|
1555
|
+
# Reset initialization flags to force reinitialization
|
|
1556
|
+
_FAST_PRINT_READY = False
|
|
1557
|
+
_FUNCSPAN_READY = False
|
|
1558
|
+
_FUNCSPAN_PROFILER = None
|
|
1559
|
+
|
|
1560
|
+
# Reset network hop flag
|
|
1561
|
+
from . import fast_network_hop
|
|
1562
|
+
|
|
1563
|
+
fast_network_hop._FAST_NETWORKHOP_READY = False
|
|
1564
|
+
|
|
1565
|
+
# Reset network request flag
|
|
1566
|
+
from .patches.network_libraries import utils as net_utils
|
|
1567
|
+
|
|
1568
|
+
net_utils._FAST_NETWORKREQUEST_INITIALIZED = False
|
|
1569
|
+
|
|
1570
|
+
# Reinitialize C extensions (but not patches)
|
|
1571
|
+
_ensure_fast_print_initialized()
|
|
1572
|
+
_ensure_funcspan_initialized()
|
|
1573
|
+
fast_network_hop._ensure_fast_networkhop_initialized()
|
|
1574
|
+
net_utils.init_fast_networkrequest_tracking()
|
|
1575
|
+
|
|
1576
|
+
if SF_DEBUG:
|
|
1577
|
+
print(
|
|
1578
|
+
f"[[DEBUG]] reinitialize_after_fork() completed in PID {os.getpid()}",
|
|
1579
|
+
log=False,
|
|
1580
|
+
)
|