sf-veritas 0.9.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sf-veritas might be problematic. Click here for more details.
- sf_veritas/.gitignore +2 -0
- sf_veritas/__init__.py +4 -0
- sf_veritas/app_config.py +49 -0
- sf_veritas/cli.py +336 -0
- sf_veritas/constants.py +3 -0
- sf_veritas/custom_excepthook.py +285 -0
- sf_veritas/custom_log_handler.py +53 -0
- sf_veritas/custom_output_wrapper.py +107 -0
- sf_veritas/custom_print.py +34 -0
- sf_veritas/django_app.py +5 -0
- sf_veritas/env_vars.py +83 -0
- sf_veritas/exception_handling_middleware.py +18 -0
- sf_veritas/exception_metaclass.py +69 -0
- sf_veritas/frame_tools.py +112 -0
- sf_veritas/import_hook.py +62 -0
- sf_veritas/infra_details/__init__.py +3 -0
- sf_veritas/infra_details/get_infra_details.py +24 -0
- sf_veritas/infra_details/kubernetes/__init__.py +3 -0
- sf_veritas/infra_details/kubernetes/get_cluster_name.py +147 -0
- sf_veritas/infra_details/kubernetes/get_details.py +7 -0
- sf_veritas/infra_details/running_on/__init__.py +17 -0
- sf_veritas/infra_details/running_on/kubernetes.py +11 -0
- sf_veritas/interceptors.py +252 -0
- sf_veritas/local_env_detect.py +118 -0
- sf_veritas/package_metadata.py +6 -0
- sf_veritas/patches/__init__.py +0 -0
- sf_veritas/patches/concurrent_futures.py +19 -0
- sf_veritas/patches/constants.py +1 -0
- sf_veritas/patches/exceptions.py +82 -0
- sf_veritas/patches/multiprocessing.py +32 -0
- sf_veritas/patches/network_libraries/__init__.py +51 -0
- sf_veritas/patches/network_libraries/aiohttp.py +100 -0
- sf_veritas/patches/network_libraries/curl_cffi.py +93 -0
- sf_veritas/patches/network_libraries/http_client.py +64 -0
- sf_veritas/patches/network_libraries/httpcore.py +152 -0
- sf_veritas/patches/network_libraries/httplib2.py +76 -0
- sf_veritas/patches/network_libraries/httpx.py +123 -0
- sf_veritas/patches/network_libraries/niquests.py +192 -0
- sf_veritas/patches/network_libraries/pycurl.py +71 -0
- sf_veritas/patches/network_libraries/requests.py +187 -0
- sf_veritas/patches/network_libraries/tornado.py +139 -0
- sf_veritas/patches/network_libraries/treq.py +122 -0
- sf_veritas/patches/network_libraries/urllib_request.py +129 -0
- sf_veritas/patches/network_libraries/utils.py +101 -0
- sf_veritas/patches/os.py +17 -0
- sf_veritas/patches/threading.py +32 -0
- sf_veritas/patches/web_frameworks/__init__.py +45 -0
- sf_veritas/patches/web_frameworks/aiohttp.py +133 -0
- sf_veritas/patches/web_frameworks/async_websocket_consumer.py +132 -0
- sf_veritas/patches/web_frameworks/blacksheep.py +107 -0
- sf_veritas/patches/web_frameworks/bottle.py +142 -0
- sf_veritas/patches/web_frameworks/cherrypy.py +246 -0
- sf_veritas/patches/web_frameworks/django.py +307 -0
- sf_veritas/patches/web_frameworks/eve.py +138 -0
- sf_veritas/patches/web_frameworks/falcon.py +229 -0
- sf_veritas/patches/web_frameworks/fastapi.py +145 -0
- sf_veritas/patches/web_frameworks/flask.py +186 -0
- sf_veritas/patches/web_frameworks/klein.py +40 -0
- sf_veritas/patches/web_frameworks/litestar.py +217 -0
- sf_veritas/patches/web_frameworks/pyramid.py +89 -0
- sf_veritas/patches/web_frameworks/quart.py +155 -0
- sf_veritas/patches/web_frameworks/robyn.py +114 -0
- sf_veritas/patches/web_frameworks/sanic.py +120 -0
- sf_veritas/patches/web_frameworks/starlette.py +144 -0
- sf_veritas/patches/web_frameworks/strawberry.py +269 -0
- sf_veritas/patches/web_frameworks/tornado.py +129 -0
- sf_veritas/patches/web_frameworks/utils.py +55 -0
- sf_veritas/print_override.py +13 -0
- sf_veritas/regular_data_transmitter.py +358 -0
- sf_veritas/request_interceptor.py +399 -0
- sf_veritas/request_utils.py +104 -0
- sf_veritas/server_status.py +1 -0
- sf_veritas/shutdown_flag.py +11 -0
- sf_veritas/subprocess_startup.py +3 -0
- sf_veritas/test_cli.py +145 -0
- sf_veritas/thread_local.py +436 -0
- sf_veritas/timeutil.py +114 -0
- sf_veritas/transmit_exception_to_sailfish.py +28 -0
- sf_veritas/transmitter.py +58 -0
- sf_veritas/types.py +44 -0
- sf_veritas/unified_interceptor.py +323 -0
- sf_veritas/utils.py +39 -0
- sf_veritas-0.9.7.dist-info/METADATA +83 -0
- sf_veritas-0.9.7.dist-info/RECORD +86 -0
- sf_veritas-0.9.7.dist-info/WHEEL +4 -0
- sf_veritas-0.9.7.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import socket
|
|
3
|
+
|
|
4
|
+
import requests
|
|
5
|
+
|
|
6
|
+
DEFAULT_CLUSTER_NAME = "UNKNOWN"
|
|
7
|
+
|
|
8
|
+
# ─── 1. ConfigMap (optional) ─────────────────────────────────
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_from_config_map(path: str = "/etc/cluster-info/cluster-name"):
|
|
12
|
+
"""If you’ve mounted a ConfigMap at this path, read it."""
|
|
13
|
+
try:
|
|
14
|
+
with open(path, "r") as f:
|
|
15
|
+
name = f.read().strip()
|
|
16
|
+
if name:
|
|
17
|
+
return name
|
|
18
|
+
except IOError:
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# ─── 2. Cloud Metadata ────────────────────────────────────────
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_gke_cluster_name(timeout: float = 1.0):
|
|
26
|
+
"""GKE nodes automatically get a 'cluster-name' instance attribute."""
|
|
27
|
+
try:
|
|
28
|
+
resp = requests.get(
|
|
29
|
+
"http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name",
|
|
30
|
+
headers={"Metadata-Flavor": "Google"},
|
|
31
|
+
timeout=timeout,
|
|
32
|
+
)
|
|
33
|
+
if resp.ok and resp.text:
|
|
34
|
+
return resp.text
|
|
35
|
+
except requests.RequestException:
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def get_eks_cluster_name(timeout: float = 1.0):
|
|
40
|
+
"""EKS-backed EC2 instances are tagged 'eks:cluster-name'."""
|
|
41
|
+
try:
|
|
42
|
+
# 1) fetch IMDSv2 token
|
|
43
|
+
token = requests.put(
|
|
44
|
+
"http://169.254.169.254/latest/api/token",
|
|
45
|
+
headers={"X-aws-ec2-metadata-token-ttl-seconds": "21600"},
|
|
46
|
+
timeout=timeout,
|
|
47
|
+
).text
|
|
48
|
+
# 2) read the eks:cluster-name tag
|
|
49
|
+
resp = requests.get(
|
|
50
|
+
"http://169.254.169.254/latest/meta-data/tags/instance/eks:cluster-name",
|
|
51
|
+
headers={"X-aws-ec2-metadata-token": token},
|
|
52
|
+
timeout=timeout,
|
|
53
|
+
)
|
|
54
|
+
if resp.ok and resp.text:
|
|
55
|
+
return resp.text
|
|
56
|
+
except requests.RequestException:
|
|
57
|
+
pass
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def get_aks_cluster_name(timeout: float = 1.0):
|
|
61
|
+
"""AKS nodes live in a VM RG named MC_<resourceGroup>_<clusterName>_<zone>."""
|
|
62
|
+
try:
|
|
63
|
+
resp = requests.get(
|
|
64
|
+
"http://169.254.169.254/metadata/instance",
|
|
65
|
+
params={"api-version": "2021-02-01", "format": "json"},
|
|
66
|
+
headers={"Metadata": "true"},
|
|
67
|
+
timeout=timeout,
|
|
68
|
+
)
|
|
69
|
+
if resp.ok:
|
|
70
|
+
compute = resp.json().get("compute", {})
|
|
71
|
+
rg = compute.get("resourceGroupName", "")
|
|
72
|
+
parts = rg.split("_")
|
|
73
|
+
if len(parts) >= 3:
|
|
74
|
+
return parts[2]
|
|
75
|
+
except requests.RequestException:
|
|
76
|
+
pass
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
# ─── 3. Kubernetes API fallback ────────────────────────────────
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def get_via_k8s_api(timeout: float = 1.0):
|
|
83
|
+
"""
|
|
84
|
+
If you’re running in K8s and have in‑cluster RBAC, try:
|
|
85
|
+
A) ClusterProperty CRD (KEP‑2149)
|
|
86
|
+
B) Node labels on your own Pod’s node
|
|
87
|
+
"""
|
|
88
|
+
try:
|
|
89
|
+
from kubernetes import client, config
|
|
90
|
+
except ImportError:
|
|
91
|
+
return
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
# load service account creds
|
|
95
|
+
config.load_incluster_config()
|
|
96
|
+
|
|
97
|
+
# A) ClusterProperty CRD
|
|
98
|
+
co = client.CustomObjectsApi()
|
|
99
|
+
props = co.list_cluster_custom_object(
|
|
100
|
+
group="multicluster.k8s.io",
|
|
101
|
+
version="v1alpha6",
|
|
102
|
+
plural="clusterproperties",
|
|
103
|
+
).get("items", [])
|
|
104
|
+
if props:
|
|
105
|
+
name = props[0].get("spec", {}).get("clusterName")
|
|
106
|
+
if name:
|
|
107
|
+
return name
|
|
108
|
+
|
|
109
|
+
# B) read this Pod → its Node → cluster label
|
|
110
|
+
v1 = client.CoreV1Api()
|
|
111
|
+
# Pod name = hostname in K8s
|
|
112
|
+
pod_name = socket.gethostname()
|
|
113
|
+
ns = (
|
|
114
|
+
open("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
|
115
|
+
.read()
|
|
116
|
+
.strip()
|
|
117
|
+
)
|
|
118
|
+
pod = v1.read_namespaced_pod(
|
|
119
|
+
name=pod_name, namespace=ns, _request_timeout=timeout
|
|
120
|
+
)
|
|
121
|
+
node = v1.read_node(pod.spec.node_name, _request_timeout=timeout)
|
|
122
|
+
labels = node.metadata.labels or {}
|
|
123
|
+
for key in ("cluster.x-k8s.io/cluster-name", "topology.kubernetes.io/cluster"):
|
|
124
|
+
if labels.get(key):
|
|
125
|
+
return labels[key]
|
|
126
|
+
except Exception:
|
|
127
|
+
pass
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# ─── 4. Aggregator ────────────────────────────────────────────
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_cluster_name():
|
|
134
|
+
for fn in (
|
|
135
|
+
get_from_config_map,
|
|
136
|
+
get_gke_cluster_name,
|
|
137
|
+
get_eks_cluster_name,
|
|
138
|
+
get_aks_cluster_name,
|
|
139
|
+
get_via_k8s_api,
|
|
140
|
+
):
|
|
141
|
+
try:
|
|
142
|
+
name = fn()
|
|
143
|
+
if name:
|
|
144
|
+
return name
|
|
145
|
+
except Exception:
|
|
146
|
+
continue
|
|
147
|
+
return DEFAULT_CLUSTER_NAME
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
from .kubernetes import kubernetes
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class System(Enum):
|
|
7
|
+
KUBERNETES = "Kubernetes"
|
|
8
|
+
UNKNOWN = "Unknown"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def running_on() -> System:
|
|
12
|
+
if kubernetes():
|
|
13
|
+
return System.KUBERNETES
|
|
14
|
+
return System.UNKNOWN
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
__all__ = ["running_on"]
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def kubernetes() -> bool:
|
|
5
|
+
# 1) service‐account token (default mount in every Pod)
|
|
6
|
+
if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/token"):
|
|
7
|
+
return True
|
|
8
|
+
# 2) built‐in K8s env var
|
|
9
|
+
if os.getenv("KUBERNETES_SERVICE_HOST"):
|
|
10
|
+
return True
|
|
11
|
+
return False
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import re
|
|
4
|
+
import threading
|
|
5
|
+
import time
|
|
6
|
+
import uuid
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from . import app_config
|
|
10
|
+
from .env_vars import SF_DEBUG
|
|
11
|
+
from .package_metadata import PACKAGE_LIBRARY_TYPE, __version__
|
|
12
|
+
from .regular_data_transmitter import ServiceIdentifier
|
|
13
|
+
from .request_utils import non_blocking_post
|
|
14
|
+
from .thread_local import ( # reentrancy_guard, activate_reentrancy_guards_logging_preactive,
|
|
15
|
+
activate_reentrancy_guards_logging,
|
|
16
|
+
get_or_set_sf_trace_id,
|
|
17
|
+
)
|
|
18
|
+
from .timeutil import TimeSync
|
|
19
|
+
from .types import CustomJSONEncoderForFrameInfo, FrameInfo
|
|
20
|
+
from .utils import serialize_json_with_exclusions, strtobool
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class OutputInterceptor(object):
|
|
26
|
+
def __init__(self, api_key: str = None):
|
|
27
|
+
self.api_key = api_key or app_config._sailfish_api_key
|
|
28
|
+
self.endpoint = app_config._sailfish_graphql_endpoint
|
|
29
|
+
self.operation_name: Optional[str] = ""
|
|
30
|
+
self.query_type = "mutation"
|
|
31
|
+
self.service_identifier = ServiceIdentifier()
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def query_name(self) -> str:
|
|
35
|
+
return (
|
|
36
|
+
self.operation_name[0].lower() + self.operation_name[1:]
|
|
37
|
+
if self.operation_name
|
|
38
|
+
else ""
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
def get_default_variables(self, session_id: Optional[str] = None):
|
|
42
|
+
trace_id = session_id
|
|
43
|
+
if not session_id:
|
|
44
|
+
_, trace_id = get_or_set_sf_trace_id(session_id)
|
|
45
|
+
timestamp_ms = TimeSync.get_instance().get_utc_time_in_ms()
|
|
46
|
+
return {
|
|
47
|
+
"apiKey": self.api_key,
|
|
48
|
+
"serviceUuid": app_config._service_uuid,
|
|
49
|
+
"library": PACKAGE_LIBRARY_TYPE,
|
|
50
|
+
"sessionId": trace_id,
|
|
51
|
+
"timestampMs": str(timestamp_ms),
|
|
52
|
+
"version": __version__,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
def get_variables(
|
|
56
|
+
self,
|
|
57
|
+
additional_variables: Optional[Dict[str, Any]] = None,
|
|
58
|
+
session_id: Optional[str] = None,
|
|
59
|
+
) -> Dict[str, Any]:
|
|
60
|
+
additional_variables = (
|
|
61
|
+
additional_variables if additional_variables is not None else {}
|
|
62
|
+
)
|
|
63
|
+
return {
|
|
64
|
+
**additional_variables,
|
|
65
|
+
**self.get_default_variables(session_id),
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
def check_if_contents_should_be_ignored(
|
|
69
|
+
self, contents
|
|
70
|
+
): # pylint: disable=unused-argument
|
|
71
|
+
return False
|
|
72
|
+
|
|
73
|
+
def _send_app_identifier(self, session_id: str) -> None:
|
|
74
|
+
if SF_DEBUG:
|
|
75
|
+
print(
|
|
76
|
+
"_send_app_identifier...SENDING DATA...args=",
|
|
77
|
+
set(),
|
|
78
|
+
log=False,
|
|
79
|
+
)
|
|
80
|
+
self.service_identifier.do_send(set())
|
|
81
|
+
|
|
82
|
+
def do_send(self, args, session_id: str) -> None:
|
|
83
|
+
self._send_app_identifier(session_id)
|
|
84
|
+
if SF_DEBUG:
|
|
85
|
+
print(f"[[OutputInterceptor.do_send]] session_id={session_id}", log=False)
|
|
86
|
+
try:
|
|
87
|
+
threading.Thread(target=self.send, args=args).start()
|
|
88
|
+
except RuntimeError:
|
|
89
|
+
return
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class LogInterceptor(OutputInterceptor):
|
|
93
|
+
def __init__(self, api_key: str = app_config._sailfish_api_key):
|
|
94
|
+
super().__init__(api_key)
|
|
95
|
+
self.operation_name = "CollectLogs"
|
|
96
|
+
|
|
97
|
+
def check_if_contents_should_be_ignored(self, contents):
|
|
98
|
+
if SF_DEBUG:
|
|
99
|
+
print(
|
|
100
|
+
"LogInterceptor...check_if_contents_should_be_ignored(self, contents)",
|
|
101
|
+
"||||",
|
|
102
|
+
contents,
|
|
103
|
+
"||||",
|
|
104
|
+
log=False,
|
|
105
|
+
)
|
|
106
|
+
pattern = r"HTTP\s(POST|GET)\s(\/healthz|\/graphql\/)\s.*"
|
|
107
|
+
result = re.match(pattern, contents)
|
|
108
|
+
return result is not None
|
|
109
|
+
|
|
110
|
+
def send(
|
|
111
|
+
self, level, contents, session_id: str
|
|
112
|
+
): # pylint: disable=arguments-differ
|
|
113
|
+
if SF_DEBUG:
|
|
114
|
+
print(f"LogInterceptor: Running send, session_id={session_id}", log=False)
|
|
115
|
+
if self.check_if_contents_should_be_ignored(contents):
|
|
116
|
+
if SF_DEBUG:
|
|
117
|
+
print("LogInterceptor: EARLY EXIT - contents:", contents, log=False)
|
|
118
|
+
return
|
|
119
|
+
query = f"""
|
|
120
|
+
{self.query_type} {self.operation_name}($apiKey: String!, $serviceUuid: String!, $sessionId: String!, $level: String!, $contents: String!, $reentrancyGuardPreactive: Boolean!, $library: String!, $timestampMs: String!, $version: String!) {{
|
|
121
|
+
{self.query_name}(apiKey: $apiKey, serviceUuid: $serviceUuid, sessionId: $sessionId, level: $level, contents: $contents, reentrancyGuardPreactive: $reentrancyGuardPreactive, library: $library, timestampMs: $timestampMs, version: $version)
|
|
122
|
+
}}
|
|
123
|
+
"""
|
|
124
|
+
if SF_DEBUG:
|
|
125
|
+
print(
|
|
126
|
+
"LogInterceptor: non_blocking_post is next",
|
|
127
|
+
"level",
|
|
128
|
+
level,
|
|
129
|
+
"contents:",
|
|
130
|
+
contents,
|
|
131
|
+
log=False,
|
|
132
|
+
)
|
|
133
|
+
non_blocking_post(
|
|
134
|
+
self.endpoint,
|
|
135
|
+
self.operation_name,
|
|
136
|
+
query,
|
|
137
|
+
self.get_variables(
|
|
138
|
+
{
|
|
139
|
+
"level": level if level else "UNKNOWN",
|
|
140
|
+
"contents": contents,
|
|
141
|
+
"reentrancyGuardPreactive": False,
|
|
142
|
+
},
|
|
143
|
+
session_id,
|
|
144
|
+
),
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class PrintInterceptor(OutputInterceptor):
|
|
149
|
+
def __init__(self, api_key: str = app_config._sailfish_api_key):
|
|
150
|
+
super().__init__(api_key)
|
|
151
|
+
self.operation_name = "CollectPrintStatements"
|
|
152
|
+
|
|
153
|
+
def send(self, contents, session_id: str):
|
|
154
|
+
if self.check_if_contents_should_be_ignored(contents):
|
|
155
|
+
return
|
|
156
|
+
query = f"""
|
|
157
|
+
{self.query_type} {self.operation_name}($apiKey: String!, $serviceUuid: String!, $sessionId: String!, $contents: String!, $reentrancyGuardPreactive: Boolean!, $library: String!, $timestampMs: String!, $version: String!) {{
|
|
158
|
+
{self.query_name}(apiKey: $apiKey, serviceUuid: $serviceUuid, sessionId: $sessionId, contents: $contents, reentrancyGuardPreactive: $reentrancyGuardPreactive, library: $library, timestampMs: $timestampMs, version: $version)
|
|
159
|
+
}}
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
non_blocking_post(
|
|
163
|
+
self.endpoint,
|
|
164
|
+
self.operation_name,
|
|
165
|
+
query,
|
|
166
|
+
self.get_variables(
|
|
167
|
+
{
|
|
168
|
+
"contents": contents,
|
|
169
|
+
"reentrancyGuardPreactive": False,
|
|
170
|
+
},
|
|
171
|
+
session_id,
|
|
172
|
+
),
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class ExceptionInterceptor(OutputInterceptor):
|
|
177
|
+
def __init__(self, api_key: str = app_config._sailfish_api_key):
|
|
178
|
+
super().__init__(api_key)
|
|
179
|
+
self.operation_name = "CollectExceptions"
|
|
180
|
+
|
|
181
|
+
def send(
|
|
182
|
+
self,
|
|
183
|
+
exception_message: str,
|
|
184
|
+
trace: List[FrameInfo],
|
|
185
|
+
session_id: str,
|
|
186
|
+
was_caught: bool = True,
|
|
187
|
+
is_from_local_service: bool = False
|
|
188
|
+
):
|
|
189
|
+
query = f"""
|
|
190
|
+
{self.query_type} {self.operation_name}($apiKey: String!, $serviceUuid: String!, $sessionId: String!, $exceptionMessage: String!, $wasCaught: Boolean!, $traceJson: String!, $reentrancyGuardPreactive: Boolean!, $library: String!, $timestampMs: String!, $version: String!, $isFromLocalService: Boolean!) {{
|
|
191
|
+
{self.query_name}(apiKey: $apiKey, serviceUuid: $serviceUuid, sessionId: $sessionId, exceptionMessage: $exceptionMessage, wasCaught: $wasCaught, traceJson: $traceJson, reentrancyGuardPreactive: $reentrancyGuardPreactive, library: $library, timestampMs: $timestampMs, version: $version, isFromLocalService: $isFromLocalService)
|
|
192
|
+
}}
|
|
193
|
+
"""
|
|
194
|
+
|
|
195
|
+
if SF_DEBUG:
|
|
196
|
+
print("SENDING EXCEPTION...", log=False)
|
|
197
|
+
non_blocking_post(
|
|
198
|
+
self.endpoint,
|
|
199
|
+
self.operation_name,
|
|
200
|
+
query,
|
|
201
|
+
self.get_variables(
|
|
202
|
+
{
|
|
203
|
+
"apiKey": self.api_key,
|
|
204
|
+
"exceptionMessage": exception_message,
|
|
205
|
+
"traceJson": json.dumps(trace, cls=CustomJSONEncoderForFrameInfo),
|
|
206
|
+
"reentrancyGuardPreactive": False,
|
|
207
|
+
"wasCaught": was_caught,
|
|
208
|
+
"isFromLocalService": is_from_local_service
|
|
209
|
+
},
|
|
210
|
+
session_id,
|
|
211
|
+
),
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
class CollectMetadataTransmitter(OutputInterceptor):
|
|
216
|
+
def __init__(self, api_key: str = app_config._sailfish_api_key):
|
|
217
|
+
super().__init__(api_key)
|
|
218
|
+
self.operation_name = "CollectMetadata"
|
|
219
|
+
|
|
220
|
+
def send(
|
|
221
|
+
self,
|
|
222
|
+
user_id: str,
|
|
223
|
+
traits: Optional[Dict[str, Any]],
|
|
224
|
+
traits_json: Optional[str],
|
|
225
|
+
override: bool,
|
|
226
|
+
session_id: str,
|
|
227
|
+
):
|
|
228
|
+
if traits is None and traits_json is None:
|
|
229
|
+
raise Exception(
|
|
230
|
+
'Must pass in either traits or traits_json to "add_or_update_traits"'
|
|
231
|
+
)
|
|
232
|
+
query = f"""
|
|
233
|
+
{self.query_type} {self.operation_name}($apiKey: String!, $serviceUuid: String!, $sessionId: String!, $userId: String!, $traitsJson: String!, $excludedFields: [String!]!, $library: String!, $timestampMs: String!, $version: String!, $override: Boolean!) {{
|
|
234
|
+
{self.query_name}(apiKey: $apiKey, serviceUuid: $serviceUuid, sessionId: $sessionId, userId: $userId, traitsJson: $traitsJson, excludedFields: $excludedFields, library: $library, timestampMs: $timestampMs, version: $version, override: $override)
|
|
235
|
+
}}
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
excluded_fields = []
|
|
239
|
+
if traits_json is None:
|
|
240
|
+
traits_json, excluded_fields = serialize_json_with_exclusions(traits)
|
|
241
|
+
|
|
242
|
+
variables = self.get_variables(
|
|
243
|
+
{
|
|
244
|
+
"userId": user_id,
|
|
245
|
+
"traitsJson": traits_json,
|
|
246
|
+
"excludedFields": excluded_fields,
|
|
247
|
+
"override": override,
|
|
248
|
+
},
|
|
249
|
+
session_id,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
non_blocking_post(self.endpoint, self.operation_name, query, variables)
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import os, sys, socket, urllib.request, urllib.error
|
|
3
|
+
|
|
4
|
+
DEFAULT_TIMEOUT_S = 0.15
|
|
5
|
+
|
|
6
|
+
def _quick_http(url: str, headers: dict[str, str] | None = None, timeout: float = DEFAULT_TIMEOUT_S) -> tuple[int | None, str]:
|
|
7
|
+
req = urllib.request.Request(url, headers=headers or {}, method="GET")
|
|
8
|
+
try:
|
|
9
|
+
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
10
|
+
return resp.getcode(), "ok"
|
|
11
|
+
except urllib.error.HTTPError as e:
|
|
12
|
+
return e.code, "http_error"
|
|
13
|
+
except Exception as e:
|
|
14
|
+
return None, str(e)
|
|
15
|
+
|
|
16
|
+
def _is_cloud_instance() -> tuple[bool, str]:
|
|
17
|
+
try:
|
|
18
|
+
import urllib.request as _u
|
|
19
|
+
tok_req = _u.Request(
|
|
20
|
+
"http://169.254.169.254/latest/api/token",
|
|
21
|
+
headers={"X-aws-ec2-metadata-token-ttl-seconds": "60"},
|
|
22
|
+
method="PUT",
|
|
23
|
+
)
|
|
24
|
+
with _u.urlopen(tok_req, timeout=DEFAULT_TIMEOUT_S) as r:
|
|
25
|
+
if r.getcode() == 200:
|
|
26
|
+
return True, "aws-imdsv2"
|
|
27
|
+
except urllib.error.HTTPError as e:
|
|
28
|
+
if e.code in (401, 403, 404, 405):
|
|
29
|
+
return True, f"aws-imds({e.code})"
|
|
30
|
+
except Exception:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
code, _ = _quick_http("http://169.254.169.254/latest/meta-data/")
|
|
34
|
+
if code == 200:
|
|
35
|
+
return True, "aws-imdsv1"
|
|
36
|
+
|
|
37
|
+
code, _ = _quick_http(
|
|
38
|
+
"http://169.254.169.254/computeMetadata/v1/instance/id",
|
|
39
|
+
headers={"Metadata-Flavor": "Google"},
|
|
40
|
+
)
|
|
41
|
+
if code == 200:
|
|
42
|
+
return True, "gcp-metadata"
|
|
43
|
+
|
|
44
|
+
code, _ = _quick_http(
|
|
45
|
+
"http://169.254.169.254/metadata/instance?api-version=2021-02-01",
|
|
46
|
+
headers={"Metadata": "true"},
|
|
47
|
+
)
|
|
48
|
+
if code == 200:
|
|
49
|
+
return True, "azure-imds"
|
|
50
|
+
|
|
51
|
+
return False, "no-cloud-metadata"
|
|
52
|
+
|
|
53
|
+
def _resolves_host_docker_internal() -> bool:
|
|
54
|
+
try:
|
|
55
|
+
socket.gethostbyname("host.docker.internal")
|
|
56
|
+
return True
|
|
57
|
+
except Exception:
|
|
58
|
+
return False
|
|
59
|
+
|
|
60
|
+
# ---- globals to hold state ----
|
|
61
|
+
SF_IS_LOCAL_ENV: bool | None = None
|
|
62
|
+
SF_LOCAL_ENV_REASON: str | None = None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _detect() -> tuple[bool, str]:
|
|
66
|
+
"""Detect environment once. Raise nothing; always return a tuple."""
|
|
67
|
+
try:
|
|
68
|
+
if any(os.getenv(k) for k in (
|
|
69
|
+
"CI", "GITHUB_ACTIONS", "GITLAB_CI", "CIRCLECI",
|
|
70
|
+
"BUILDkite", "TEAMCITY_VERSION", "JENKINS_URL", "DRONE"
|
|
71
|
+
)):
|
|
72
|
+
return (False, "ci-env-detected")
|
|
73
|
+
|
|
74
|
+
on_cloud, cloud_reason = _is_cloud_instance()
|
|
75
|
+
if on_cloud:
|
|
76
|
+
return (False, cloud_reason)
|
|
77
|
+
|
|
78
|
+
if sys.platform in ("darwin", "win32"):
|
|
79
|
+
return (True, f"desktop-os:{sys.platform}")
|
|
80
|
+
try:
|
|
81
|
+
if "microsoft" in os.uname().release.lower() \
|
|
82
|
+
or "microsoft" in open("/proc/version", "rt", errors="ignore").read().lower():
|
|
83
|
+
return (True, "wsl-kernel")
|
|
84
|
+
except OSError:
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
if _resolves_host_docker_internal():
|
|
88
|
+
return (True, "docker-desktop-dns")
|
|
89
|
+
|
|
90
|
+
return (True, "no-cloud-metadata-and-no-ci")
|
|
91
|
+
|
|
92
|
+
except Exception as e:
|
|
93
|
+
# fallback: treat as local if detection fails
|
|
94
|
+
return (True, f"detect-error:{type(e).__name__}")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def set_sf_is_local_flag() -> None:
|
|
98
|
+
"""
|
|
99
|
+
Run detection once and store results in global variables.
|
|
100
|
+
Call this at app startup. Never raises.
|
|
101
|
+
"""
|
|
102
|
+
global SF_IS_LOCAL_ENV, SF_LOCAL_ENV_REASON
|
|
103
|
+
try:
|
|
104
|
+
SF_IS_LOCAL_ENV, SF_LOCAL_ENV_REASON = _detect()
|
|
105
|
+
except Exception as e:
|
|
106
|
+
# absolute fallback, so setup never fails
|
|
107
|
+
SF_IS_LOCAL_ENV, SF_LOCAL_ENV_REASON = True, f"setup-error:{type(e).__name__}"
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def sf_is_local_dev_environment() -> tuple[bool, str]:
|
|
111
|
+
"""
|
|
112
|
+
Return cached values if sf_set_is_local_flag() has been called,
|
|
113
|
+
otherwise run detection on the fly. Never raises.
|
|
114
|
+
"""
|
|
115
|
+
global SF_IS_LOCAL_ENV, SF_LOCAL_ENV_REASON
|
|
116
|
+
if SF_IS_LOCAL_ENV is None or SF_LOCAL_ENV_REASON is None:
|
|
117
|
+
set_sf_is_local_flag()
|
|
118
|
+
return SF_IS_LOCAL_ENV, SF_LOCAL_ENV_REASON
|
|
File without changes
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
|
|
2
|
+
from ..thread_local import get_context, set_context
|
|
3
|
+
|
|
4
|
+
_original_submit = ThreadPoolExecutor.submit
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def patched_submit(self, fn, *args, **kwargs):
|
|
8
|
+
current_context = get_context()
|
|
9
|
+
|
|
10
|
+
def wrapped_fn(*fn_args, **fn_kwargs):
|
|
11
|
+
set_context(current_context)
|
|
12
|
+
fn(*fn_args, **fn_kwargs)
|
|
13
|
+
|
|
14
|
+
return _original_submit(self, wrapped_fn, *args, **kwargs)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def patch_concurrent_futures():
|
|
18
|
+
ThreadPoolExecutor.submit = patched_submit
|
|
19
|
+
ProcessPoolExecutor.submit = patched_submit
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
supported_network_verbs = ("get", "post", "put", "patch", "delete", "head", "options")
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import importlib.util
|
|
2
|
+
import sys
|
|
3
|
+
import threading
|
|
4
|
+
import time
|
|
5
|
+
from importlib import abc
|
|
6
|
+
|
|
7
|
+
from ..env_vars import PRINT_CONFIGURATION_STATUSES
|
|
8
|
+
|
|
9
|
+
# Thread-local storage to avoid re-entry problems
|
|
10
|
+
patch_lock = threading.local()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def patch_exceptions(module):
|
|
14
|
+
if hasattr(patch_lock, "active"):
|
|
15
|
+
return
|
|
16
|
+
patch_lock.active = True
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ExceptionPatchingFinder(abc.MetaPathFinder):
|
|
20
|
+
def find_spec(self, fullname, path, target=None):
|
|
21
|
+
if hasattr(patch_lock, "loading") and patch_lock.loading:
|
|
22
|
+
return None
|
|
23
|
+
try:
|
|
24
|
+
patch_lock.loading = True
|
|
25
|
+
original_spec = importlib.util.find_spec(fullname, path)
|
|
26
|
+
if original_spec:
|
|
27
|
+
return importlib.util.spec_from_loader(
|
|
28
|
+
fullname,
|
|
29
|
+
ExceptionPatchingLoader(original_spec.loader),
|
|
30
|
+
origin=original_spec.origin,
|
|
31
|
+
)
|
|
32
|
+
return None
|
|
33
|
+
finally:
|
|
34
|
+
patch_lock.loading = False
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ExceptionPatchingLoader(abc.Loader):
|
|
38
|
+
def __init__(self, loader):
|
|
39
|
+
self._original_loader = loader
|
|
40
|
+
|
|
41
|
+
def create_module(self, spec):
|
|
42
|
+
return self._original_loader.create_module(spec)
|
|
43
|
+
|
|
44
|
+
def exec_module(self, module):
|
|
45
|
+
self._original_loader.exec_module(module)
|
|
46
|
+
patch_exceptions(module)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def install_import_hook():
|
|
50
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
51
|
+
print("EXCEPTIONS - install_import_hook", log=False)
|
|
52
|
+
sys.meta_path.insert(0, ExceptionPatchingFinder())
|
|
53
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
54
|
+
print("EXCEPTIONS - install_import_hook...DONE", log=False)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# Initially store the current state of sys.excepthook
|
|
58
|
+
original_excepthook = sys.excepthook
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def monitor_excepthook(interval=1):
|
|
62
|
+
global original_excepthook
|
|
63
|
+
|
|
64
|
+
while True:
|
|
65
|
+
current_hook = sys.excepthook
|
|
66
|
+
if current_hook != original_excepthook and PRINT_CONFIGURATION_STATUSES:
|
|
67
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
68
|
+
print("sys.excepthook has been modified!")
|
|
69
|
+
original_excepthook = current_hook
|
|
70
|
+
continue
|
|
71
|
+
if PRINT_CONFIGURATION_STATUSES:
|
|
72
|
+
print("No change detected in sys.excepthook.")
|
|
73
|
+
|
|
74
|
+
# Pause for the specified interval before the next check
|
|
75
|
+
time.sleep(interval)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
# Function to start monitoring in a separate thread
|
|
79
|
+
def start_monitoring(interval=2):
|
|
80
|
+
thread = threading.Thread(target=monitor_excepthook, args=(interval,))
|
|
81
|
+
# thread.daemon = True # This makes the thread exit when the main program exits
|
|
82
|
+
thread.start()
|