datadog_lambda 5.91.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datadog_lambda/__init__.py +23 -0
- datadog_lambda/api.py +93 -0
- datadog_lambda/cold_start.py +257 -0
- datadog_lambda/constants.py +53 -0
- datadog_lambda/dogstatsd.py +143 -0
- datadog_lambda/extension.py +29 -0
- datadog_lambda/handler.py +31 -0
- datadog_lambda/logger.py +27 -0
- datadog_lambda/metric.py +136 -0
- datadog_lambda/module_name.py +3 -0
- datadog_lambda/patch.py +159 -0
- datadog_lambda/stats_writer.py +9 -0
- datadog_lambda/statsd_writer.py +17 -0
- datadog_lambda/tag_object.py +68 -0
- datadog_lambda/tags.py +104 -0
- datadog_lambda/thread_stats_writer.py +65 -0
- datadog_lambda/tracing.py +1309 -0
- datadog_lambda/trigger.py +352 -0
- datadog_lambda/wrapper.py +395 -0
- datadog_lambda/xray.py +118 -0
- datadog_lambda-5.91.0.dist-info/LICENSE +203 -0
- datadog_lambda-5.91.0.dist-info/LICENSE-3rdparty.csv +3 -0
- datadog_lambda-5.91.0.dist-info/METADATA +106 -0
- datadog_lambda-5.91.0.dist-info/NOTICE +4 -0
- datadog_lambda-5.91.0.dist-info/RECORD +26 -0
- datadog_lambda-5.91.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from datadog_lambda.cold_start import initialize_cold_start_tracing
|
|
2
|
+
from datadog_lambda.logger import initialize_logging
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
if os.environ.get("DD_INSTRUMENTATION_TELEMETRY_ENABLED") is None:
|
|
7
|
+
os.environ["DD_INSTRUMENTATION_TELEMETRY_ENABLED"] = "false"
|
|
8
|
+
|
|
9
|
+
if os.environ.get("DD_API_SECURITY_ENABLED") is None:
|
|
10
|
+
os.environ["DD_API_SECURITY_ENABLED"] = "False"
|
|
11
|
+
|
|
12
|
+
initialize_cold_start_tracing()
|
|
13
|
+
|
|
14
|
+
# The minor version corresponds to the Lambda layer version.
|
|
15
|
+
# E.g.,, version 0.5.0 gets packaged into layer version 5.
|
|
16
|
+
try:
|
|
17
|
+
import importlib.metadata as importlib_metadata
|
|
18
|
+
except ModuleNotFoundError:
|
|
19
|
+
import importlib_metadata
|
|
20
|
+
|
|
21
|
+
__version__ = importlib_metadata.version(__name__)
|
|
22
|
+
|
|
23
|
+
initialize_logging(__name__)
|
datadog_lambda/api.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
import base64
|
|
4
|
+
from datadog_lambda.extension import should_use_extension
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger(__name__)
|
|
7
|
+
KMS_ENCRYPTION_CONTEXT_KEY = "LambdaFunctionName"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def decrypt_kms_api_key(kms_client, ciphertext):
|
|
11
|
+
from botocore.exceptions import ClientError
|
|
12
|
+
|
|
13
|
+
"""
|
|
14
|
+
Decodes and deciphers the base64-encoded ciphertext given as a parameter using KMS.
|
|
15
|
+
For this to work properly, the Lambda function must have the appropriate IAM permissions.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
kms_client: The KMS client to use for decryption
|
|
19
|
+
ciphertext (string): The base64-encoded ciphertext to decrypt
|
|
20
|
+
"""
|
|
21
|
+
decoded_bytes = base64.b64decode(ciphertext)
|
|
22
|
+
|
|
23
|
+
"""
|
|
24
|
+
When the API key is encrypted using the AWS console, the function name is added as an
|
|
25
|
+
encryption context. When the API key is encrypted using the AWS CLI, no encryption context
|
|
26
|
+
is added. We need to try decrypting the API key both with and without the encryption context.
|
|
27
|
+
"""
|
|
28
|
+
# Try without encryption context, in case API key was encrypted using the AWS CLI
|
|
29
|
+
function_name = os.environ.get("AWS_LAMBDA_FUNCTION_NAME")
|
|
30
|
+
try:
|
|
31
|
+
plaintext = kms_client.decrypt(CiphertextBlob=decoded_bytes)[
|
|
32
|
+
"Plaintext"
|
|
33
|
+
].decode("utf-8")
|
|
34
|
+
except ClientError:
|
|
35
|
+
logger.debug(
|
|
36
|
+
"Failed to decrypt ciphertext without encryption context, \
|
|
37
|
+
retrying with encryption context"
|
|
38
|
+
)
|
|
39
|
+
# Try with encryption context, in case API key was encrypted using the AWS Console
|
|
40
|
+
plaintext = kms_client.decrypt(
|
|
41
|
+
CiphertextBlob=decoded_bytes,
|
|
42
|
+
EncryptionContext={
|
|
43
|
+
KMS_ENCRYPTION_CONTEXT_KEY: function_name,
|
|
44
|
+
},
|
|
45
|
+
)["Plaintext"].decode("utf-8")
|
|
46
|
+
|
|
47
|
+
return plaintext
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def init_api():
|
|
51
|
+
if (
|
|
52
|
+
not should_use_extension
|
|
53
|
+
and not os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true"
|
|
54
|
+
):
|
|
55
|
+
# Make sure that this package would always be lazy-loaded/outside from the critical path
|
|
56
|
+
# since underlying packages are quite heavy to load
|
|
57
|
+
# and useless when the extension is present
|
|
58
|
+
from datadog import api
|
|
59
|
+
|
|
60
|
+
if not api._api_key:
|
|
61
|
+
import boto3
|
|
62
|
+
|
|
63
|
+
DD_API_KEY_SECRET_ARN = os.environ.get("DD_API_KEY_SECRET_ARN", "")
|
|
64
|
+
DD_API_KEY_SSM_NAME = os.environ.get("DD_API_KEY_SSM_NAME", "")
|
|
65
|
+
DD_KMS_API_KEY = os.environ.get("DD_KMS_API_KEY", "")
|
|
66
|
+
DD_API_KEY = os.environ.get(
|
|
67
|
+
"DD_API_KEY", os.environ.get("DATADOG_API_KEY", "")
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
if DD_API_KEY_SECRET_ARN:
|
|
71
|
+
api._api_key = boto3.client("secretsmanager").get_secret_value(
|
|
72
|
+
SecretId=DD_API_KEY_SECRET_ARN
|
|
73
|
+
)["SecretString"]
|
|
74
|
+
elif DD_API_KEY_SSM_NAME:
|
|
75
|
+
api._api_key = boto3.client("ssm").get_parameter(
|
|
76
|
+
Name=DD_API_KEY_SSM_NAME, WithDecryption=True
|
|
77
|
+
)["Parameter"]["Value"]
|
|
78
|
+
elif DD_KMS_API_KEY:
|
|
79
|
+
kms_client = boto3.client("kms")
|
|
80
|
+
api._api_key = decrypt_kms_api_key(kms_client, DD_KMS_API_KEY)
|
|
81
|
+
else:
|
|
82
|
+
api._api_key = DD_API_KEY
|
|
83
|
+
|
|
84
|
+
logger.debug("Setting DATADOG_API_KEY of length %d", len(api._api_key))
|
|
85
|
+
|
|
86
|
+
# Set DATADOG_HOST, to send data to a non-default Datadog datacenter
|
|
87
|
+
api._api_host = os.environ.get(
|
|
88
|
+
"DATADOG_HOST", "https://api." + os.environ.get("DD_SITE", "datadoghq.com")
|
|
89
|
+
)
|
|
90
|
+
logger.debug("Setting DATADOG_HOST to %s", api._api_host)
|
|
91
|
+
|
|
92
|
+
# Unmute exceptions from datadog api client, so we can catch and handle them
|
|
93
|
+
api._mute = False
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import os
|
|
3
|
+
from typing import List, Hashable
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger(__name__)
|
|
7
|
+
|
|
8
|
+
_cold_start = True
|
|
9
|
+
_proactive_initialization = False
|
|
10
|
+
_lambda_container_initialized = False
|
|
11
|
+
_tracer = None
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def set_cold_start(init_timestamp_ns):
|
|
15
|
+
"""Set the value of the cold start global
|
|
16
|
+
|
|
17
|
+
This should be executed once per Lambda execution before the execution
|
|
18
|
+
"""
|
|
19
|
+
global _cold_start
|
|
20
|
+
global _lambda_container_initialized
|
|
21
|
+
global _proactive_initialization
|
|
22
|
+
global _tracer
|
|
23
|
+
if not _lambda_container_initialized:
|
|
24
|
+
now = time.time_ns()
|
|
25
|
+
if (now - init_timestamp_ns) // 1_000_000_000 > 10:
|
|
26
|
+
_cold_start = False
|
|
27
|
+
_proactive_initialization = True
|
|
28
|
+
else:
|
|
29
|
+
_cold_start = not _lambda_container_initialized
|
|
30
|
+
else:
|
|
31
|
+
_cold_start = False
|
|
32
|
+
_proactive_initialization = False
|
|
33
|
+
_lambda_container_initialized = True
|
|
34
|
+
from ddtrace import tracer as _tracer
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def is_cold_start():
|
|
38
|
+
"""Returns the value of the global cold_start"""
|
|
39
|
+
return _cold_start
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def is_proactive_init():
|
|
43
|
+
"""Returns the value of the global proactive_initialization"""
|
|
44
|
+
return _proactive_initialization
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def is_new_sandbox():
|
|
48
|
+
return is_cold_start() or is_proactive_init()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def get_cold_start_tag():
|
|
52
|
+
"""Returns the cold start tag to be used in metrics"""
|
|
53
|
+
return "cold_start:{}".format(str(is_cold_start()).lower())
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def get_proactive_init_tag():
|
|
57
|
+
"""Returns the proactive init tag to be used in metrics"""
|
|
58
|
+
return "proactive_initialization:{}".format(str(is_proactive_init()).lower())
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ImportNode(object):
|
|
62
|
+
def __init__(self, module_name, full_file_path, start_time_ns, end_time_ns=None):
|
|
63
|
+
self.module_name = module_name
|
|
64
|
+
self.full_file_path = full_file_path
|
|
65
|
+
self.start_time_ns = start_time_ns
|
|
66
|
+
self.end_time_ns = end_time_ns
|
|
67
|
+
self.children = []
|
|
68
|
+
self.context = None
|
|
69
|
+
if _lambda_container_initialized:
|
|
70
|
+
self.context = _tracer.context_provider.active()
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
root_nodes: List[ImportNode] = []
|
|
74
|
+
import_stack: List[ImportNode] = []
|
|
75
|
+
already_wrapped_loaders = set()
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def reset_node_stacks():
|
|
79
|
+
root_nodes.clear()
|
|
80
|
+
import_stack.clear()
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def push_node(module_name, file_path):
|
|
84
|
+
node = ImportNode(module_name, file_path, time.time_ns())
|
|
85
|
+
global import_stack
|
|
86
|
+
if import_stack:
|
|
87
|
+
import_stack[-1].children.append(node)
|
|
88
|
+
import_stack.append(node)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def pop_node(module_name):
|
|
92
|
+
global import_stack
|
|
93
|
+
if not import_stack:
|
|
94
|
+
return
|
|
95
|
+
node = import_stack.pop()
|
|
96
|
+
if node.module_name != module_name:
|
|
97
|
+
return
|
|
98
|
+
end_time_ns = time.time_ns()
|
|
99
|
+
node.end_time_ns = end_time_ns
|
|
100
|
+
if not import_stack: # import_stack empty, a root node has been found
|
|
101
|
+
global root_nodes
|
|
102
|
+
root_nodes.append(node)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def wrap_exec_module(original_exec_module):
|
|
106
|
+
def wrapped_method(module):
|
|
107
|
+
should_pop = False
|
|
108
|
+
try:
|
|
109
|
+
spec = module.__spec__
|
|
110
|
+
push_node(spec.name, spec.origin)
|
|
111
|
+
should_pop = True
|
|
112
|
+
except Exception:
|
|
113
|
+
pass
|
|
114
|
+
try:
|
|
115
|
+
return original_exec_module(module)
|
|
116
|
+
finally:
|
|
117
|
+
if should_pop:
|
|
118
|
+
pop_node(spec.name)
|
|
119
|
+
|
|
120
|
+
return wrapped_method
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def wrap_find_spec(original_find_spec):
|
|
124
|
+
def wrapped_find_spec(*args, **kwargs):
|
|
125
|
+
spec = original_find_spec(*args, **kwargs)
|
|
126
|
+
if spec is None:
|
|
127
|
+
return None
|
|
128
|
+
loader = getattr(spec, "loader", None)
|
|
129
|
+
if (
|
|
130
|
+
loader is not None
|
|
131
|
+
and isinstance(loader, Hashable)
|
|
132
|
+
and loader not in already_wrapped_loaders
|
|
133
|
+
):
|
|
134
|
+
if hasattr(loader, "exec_module"):
|
|
135
|
+
try:
|
|
136
|
+
loader.exec_module = wrap_exec_module(loader.exec_module)
|
|
137
|
+
already_wrapped_loaders.add(loader)
|
|
138
|
+
except Exception as e:
|
|
139
|
+
logger.debug("Failed to wrap the loader. %s", e)
|
|
140
|
+
return spec
|
|
141
|
+
|
|
142
|
+
return wrapped_find_spec
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def initialize_cold_start_tracing():
|
|
146
|
+
if (
|
|
147
|
+
is_new_sandbox()
|
|
148
|
+
and os.environ.get("DD_TRACE_ENABLED", "true").lower() == "true"
|
|
149
|
+
and os.environ.get("DD_COLD_START_TRACING", "true").lower() == "true"
|
|
150
|
+
):
|
|
151
|
+
from sys import meta_path
|
|
152
|
+
|
|
153
|
+
for importer in meta_path:
|
|
154
|
+
try:
|
|
155
|
+
importer.find_spec = wrap_find_spec(importer.find_spec)
|
|
156
|
+
except Exception:
|
|
157
|
+
pass
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
class ColdStartTracer(object):
|
|
161
|
+
def __init__(
|
|
162
|
+
self,
|
|
163
|
+
tracer,
|
|
164
|
+
function_name,
|
|
165
|
+
current_span_start_time_ns,
|
|
166
|
+
trace_ctx,
|
|
167
|
+
min_duration_ms: int,
|
|
168
|
+
ignored_libs: List[str] = None,
|
|
169
|
+
):
|
|
170
|
+
if ignored_libs is None:
|
|
171
|
+
ignored_libs = []
|
|
172
|
+
self._tracer = tracer
|
|
173
|
+
self.function_name = function_name
|
|
174
|
+
self.current_span_start_time_ns = current_span_start_time_ns
|
|
175
|
+
self.min_duration_ms = min_duration_ms
|
|
176
|
+
self.trace_ctx = trace_ctx
|
|
177
|
+
self.ignored_libs = ignored_libs
|
|
178
|
+
self.need_to_reactivate_context = True
|
|
179
|
+
|
|
180
|
+
def trace(self, root_nodes: List[ImportNode] = root_nodes):
|
|
181
|
+
if not root_nodes:
|
|
182
|
+
return
|
|
183
|
+
cold_start_span_start_time_ns = root_nodes[0].start_time_ns
|
|
184
|
+
cold_start_span_end_time_ns = min(
|
|
185
|
+
root_nodes[-1].end_time_ns, self.current_span_start_time_ns
|
|
186
|
+
)
|
|
187
|
+
cold_start_span = self.create_cold_start_span(cold_start_span_start_time_ns)
|
|
188
|
+
while root_nodes:
|
|
189
|
+
root_node = root_nodes.pop()
|
|
190
|
+
parent = root_node.context or cold_start_span
|
|
191
|
+
self.trace_tree(root_node, parent)
|
|
192
|
+
self.finish_span(cold_start_span, cold_start_span_end_time_ns)
|
|
193
|
+
|
|
194
|
+
def trace_tree(self, import_node: ImportNode, parent_span):
|
|
195
|
+
if (
|
|
196
|
+
import_node.end_time_ns - import_node.start_time_ns
|
|
197
|
+
< self.min_duration_ms * 1e6
|
|
198
|
+
or import_node.module_name in self.ignored_libs
|
|
199
|
+
):
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
span = self.start_span(
|
|
203
|
+
"aws.lambda.import", import_node.module_name, import_node.start_time_ns
|
|
204
|
+
)
|
|
205
|
+
tags = {
|
|
206
|
+
"resource_names": import_node.module_name,
|
|
207
|
+
"resource.name": import_node.module_name,
|
|
208
|
+
"filename": import_node.full_file_path,
|
|
209
|
+
"operation_name": self.get_operation_name(import_node.full_file_path),
|
|
210
|
+
}
|
|
211
|
+
span.set_tags(tags)
|
|
212
|
+
if parent_span:
|
|
213
|
+
span.parent_id = parent_span.span_id
|
|
214
|
+
for child_node in import_node.children:
|
|
215
|
+
self.trace_tree(child_node, span)
|
|
216
|
+
self.finish_span(span, import_node.end_time_ns)
|
|
217
|
+
|
|
218
|
+
def create_cold_start_span(self, start_time_ns):
|
|
219
|
+
span = self.start_span("aws.lambda.load", self.function_name, start_time_ns)
|
|
220
|
+
tags = {
|
|
221
|
+
"resource_names": self.function_name,
|
|
222
|
+
"resource.name": self.function_name,
|
|
223
|
+
"operation_name": "aws.lambda.load",
|
|
224
|
+
}
|
|
225
|
+
span.set_tags(tags)
|
|
226
|
+
return span
|
|
227
|
+
|
|
228
|
+
def start_span(self, span_type, resource, start_time_ns):
|
|
229
|
+
if self.need_to_reactivate_context:
|
|
230
|
+
self._tracer.context_provider.activate(
|
|
231
|
+
self.trace_ctx
|
|
232
|
+
) # reactivate required after each finish() call
|
|
233
|
+
self.need_to_reactivate_context = False
|
|
234
|
+
span_kwargs = {
|
|
235
|
+
"service": "aws.lambda",
|
|
236
|
+
"resource": resource,
|
|
237
|
+
"span_type": span_type,
|
|
238
|
+
}
|
|
239
|
+
span = self._tracer.trace(span_type, **span_kwargs)
|
|
240
|
+
span.start_ns = start_time_ns
|
|
241
|
+
return span
|
|
242
|
+
|
|
243
|
+
def finish_span(self, span, finish_time_ns):
|
|
244
|
+
span.finish(finish_time_ns / 1e9)
|
|
245
|
+
self.need_to_reactivate_context = True
|
|
246
|
+
|
|
247
|
+
def get_operation_name(self, filename: str):
|
|
248
|
+
if filename is None:
|
|
249
|
+
return "aws.lambda.import_core_module"
|
|
250
|
+
if not isinstance(filename, str):
|
|
251
|
+
return "aws.lambda.import"
|
|
252
|
+
if filename.startswith("/opt/"):
|
|
253
|
+
return "aws.lambda.import_layer"
|
|
254
|
+
elif filename.startswith("/var/lang/"):
|
|
255
|
+
return "aws.lambda.import_runtime"
|
|
256
|
+
else:
|
|
257
|
+
return "aws.lambda.import"
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# Unless explicitly stated otherwise all files in this repository are licensed
|
|
2
|
+
# under the Apache License Version 2.0.
|
|
3
|
+
# This product includes software developed at Datadog (https://www.datadoghq.com/).
|
|
4
|
+
# Copyright 2019 Datadog, Inc.
|
|
5
|
+
|
|
6
|
+
# Datadog trace sampling priority
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SamplingPriority(object):
|
|
10
|
+
USER_REJECT = -1
|
|
11
|
+
AUTO_REJECT = 0
|
|
12
|
+
AUTO_KEEP = 1
|
|
13
|
+
USER_KEEP = 2
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# Datadog trace headers
|
|
17
|
+
class TraceHeader(object):
|
|
18
|
+
TRACE_ID = "x-datadog-trace-id"
|
|
19
|
+
PARENT_ID = "x-datadog-parent-id"
|
|
20
|
+
SAMPLING_PRIORITY = "x-datadog-sampling-priority"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# X-Ray subsegment to save Datadog trace metadata
|
|
24
|
+
class XraySubsegment(object):
|
|
25
|
+
NAME = "datadog-metadata"
|
|
26
|
+
TRACE_KEY = "trace"
|
|
27
|
+
LAMBDA_FUNCTION_TAGS_KEY = "lambda_function_tags"
|
|
28
|
+
NAMESPACE = "datadog"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# TraceContextSource of datadog context. The DD_MERGE_XRAY_TRACES
|
|
32
|
+
# feature uses this to determine when to use X-Ray as the parent
|
|
33
|
+
# trace.
|
|
34
|
+
class TraceContextSource(object):
|
|
35
|
+
XRAY = "xray"
|
|
36
|
+
EVENT = "event"
|
|
37
|
+
DDTRACE = "ddtrace"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# X-Ray deamon
|
|
41
|
+
class XrayDaemon(object):
|
|
42
|
+
XRAY_TRACE_ID_HEADER_NAME = "_X_AMZN_TRACE_ID"
|
|
43
|
+
XRAY_DAEMON_ADDRESS = "AWS_XRAY_DAEMON_ADDRESS"
|
|
44
|
+
FUNCTION_NAME_HEADER_NAME = "AWS_LAMBDA_FUNCTION_NAME"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class Headers(object):
|
|
48
|
+
Parent_Span_Finish_Time = "x-datadog-parent-span-finish-time"
|
|
49
|
+
|
|
50
|
+
# For one request from the client, the event.requestContext.requestIds in the authorizer lambda
|
|
51
|
+
# invocation and the main function invocation are IDENTICAL. Therefore we can use it to tell
|
|
52
|
+
# whether current invocation is the actual original authorizing request or a cached request.
|
|
53
|
+
Authorizing_Request_Id = "x-datadog-authorizing-requestid"
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import socket
|
|
4
|
+
import errno
|
|
5
|
+
import re
|
|
6
|
+
from threading import Lock
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
MIN_SEND_BUFFER_SIZE = 32 * 1024
|
|
10
|
+
log = logging.getLogger("datadog_lambda.dogstatsd")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DogStatsd(object):
|
|
14
|
+
def __init__(self):
|
|
15
|
+
self._socket_lock = Lock()
|
|
16
|
+
self.socket_path = None
|
|
17
|
+
self.host = "localhost"
|
|
18
|
+
self.port = 8125
|
|
19
|
+
self.socket = None
|
|
20
|
+
self.encoding = "utf-8"
|
|
21
|
+
|
|
22
|
+
def get_socket(self, telemetry=False):
|
|
23
|
+
"""
|
|
24
|
+
Return a connected socket.
|
|
25
|
+
|
|
26
|
+
Note: connect the socket before assigning it to the class instance to
|
|
27
|
+
avoid bad thread race conditions.
|
|
28
|
+
"""
|
|
29
|
+
with self._socket_lock:
|
|
30
|
+
self.socket = self._get_udp_socket(
|
|
31
|
+
self.host,
|
|
32
|
+
self.port,
|
|
33
|
+
)
|
|
34
|
+
return self.socket
|
|
35
|
+
|
|
36
|
+
@classmethod
|
|
37
|
+
def _ensure_min_send_buffer_size(cls, sock, min_size=MIN_SEND_BUFFER_SIZE):
|
|
38
|
+
# Increase the receiving buffer size where needed (e.g. MacOS has 4k RX
|
|
39
|
+
# buffers which is half of the max packet size that the client will send.
|
|
40
|
+
if os.name == "posix":
|
|
41
|
+
try:
|
|
42
|
+
recv_buff_size = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
|
|
43
|
+
if recv_buff_size <= min_size:
|
|
44
|
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, min_size)
|
|
45
|
+
log.debug("Socket send buffer increased to %dkb", min_size / 1024)
|
|
46
|
+
finally:
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def _get_udp_socket(cls, host, port):
|
|
51
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
52
|
+
sock.setblocking(0)
|
|
53
|
+
cls._ensure_min_send_buffer_size(sock)
|
|
54
|
+
sock.connect((host, port))
|
|
55
|
+
|
|
56
|
+
return sock
|
|
57
|
+
|
|
58
|
+
def distribution(self, metric, value, tags=None):
|
|
59
|
+
"""
|
|
60
|
+
Send a global distribution value, optionally setting tags.
|
|
61
|
+
|
|
62
|
+
>>> statsd.distribution("uploaded.file.size", 1445)
|
|
63
|
+
>>> statsd.distribution("album.photo.count", 26, tags=["gender:female"])
|
|
64
|
+
"""
|
|
65
|
+
self._report(metric, "d", value, tags)
|
|
66
|
+
|
|
67
|
+
def close_socket(self):
|
|
68
|
+
"""
|
|
69
|
+
Closes connected socket if connected.
|
|
70
|
+
"""
|
|
71
|
+
with self._socket_lock:
|
|
72
|
+
if self.socket:
|
|
73
|
+
try:
|
|
74
|
+
self.socket.close()
|
|
75
|
+
except OSError as e:
|
|
76
|
+
log.error("Unexpected error: %s", str(e))
|
|
77
|
+
self.socket = None
|
|
78
|
+
|
|
79
|
+
def normalize_tags(self, tag_list):
|
|
80
|
+
TAG_INVALID_CHARS_RE = re.compile(r"[^\w\d_\-:/\.]", re.UNICODE)
|
|
81
|
+
TAG_INVALID_CHARS_SUBS = "_"
|
|
82
|
+
return [
|
|
83
|
+
re.sub(TAG_INVALID_CHARS_RE, TAG_INVALID_CHARS_SUBS, tag)
|
|
84
|
+
for tag in tag_list
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
def _serialize_metric(self, metric, metric_type, value, tags):
|
|
88
|
+
# Create/format the metric packet
|
|
89
|
+
return "%s:%s|%s%s" % (
|
|
90
|
+
metric,
|
|
91
|
+
value,
|
|
92
|
+
metric_type,
|
|
93
|
+
("|#" + ",".join(self.normalize_tags(tags))) if tags else "",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
def _report(self, metric, metric_type, value, tags):
|
|
97
|
+
if value is None:
|
|
98
|
+
return
|
|
99
|
+
|
|
100
|
+
payload = self._serialize_metric(metric, metric_type, value, tags)
|
|
101
|
+
|
|
102
|
+
# Send it
|
|
103
|
+
self._send_to_server(payload)
|
|
104
|
+
|
|
105
|
+
def _send_to_server(self, packet):
|
|
106
|
+
try:
|
|
107
|
+
mysocket = self.socket or self.get_socket()
|
|
108
|
+
mysocket.send(packet.encode(self.encoding))
|
|
109
|
+
return True
|
|
110
|
+
except socket.timeout:
|
|
111
|
+
# dogstatsd is overflowing, drop the packets (mimicks the UDP behaviour)
|
|
112
|
+
pass
|
|
113
|
+
except (socket.herror, socket.gaierror) as socket_err:
|
|
114
|
+
log.warning(
|
|
115
|
+
"Error submitting packet: %s, dropping the packet and closing the socket",
|
|
116
|
+
socket_err,
|
|
117
|
+
)
|
|
118
|
+
self.close_socket()
|
|
119
|
+
except socket.error as socket_err:
|
|
120
|
+
if socket_err.errno == errno.EAGAIN:
|
|
121
|
+
log.debug(
|
|
122
|
+
"Socket send would block: %s, dropping the packet", socket_err
|
|
123
|
+
)
|
|
124
|
+
elif socket_err.errno == errno.ENOBUFS:
|
|
125
|
+
log.debug("Socket buffer full: %s, dropping the packet", socket_err)
|
|
126
|
+
elif socket_err.errno == errno.EMSGSIZE:
|
|
127
|
+
log.debug(
|
|
128
|
+
"Packet size too big (size: %d): %s, dropping the packet",
|
|
129
|
+
len(packet.encode(self.encoding)),
|
|
130
|
+
socket_err,
|
|
131
|
+
)
|
|
132
|
+
else:
|
|
133
|
+
log.warning(
|
|
134
|
+
"Error submitting packet: %s, dropping the packet and closing the socket",
|
|
135
|
+
socket_err,
|
|
136
|
+
)
|
|
137
|
+
self.close_socket()
|
|
138
|
+
except Exception as e:
|
|
139
|
+
log.error("Unexpected error: %s", str(e))
|
|
140
|
+
return False
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
statsd = DogStatsd()
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from os import path
|
|
3
|
+
|
|
4
|
+
AGENT_URL = "http://127.0.0.1:8124"
|
|
5
|
+
FLUSH_PATH = "/lambda/flush"
|
|
6
|
+
EXTENSION_PATH = "/opt/extensions/datadog-agent"
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def is_extension_present():
|
|
12
|
+
if path.exists(EXTENSION_PATH):
|
|
13
|
+
return True
|
|
14
|
+
return False
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def flush_extension():
|
|
18
|
+
try:
|
|
19
|
+
import urllib.request
|
|
20
|
+
|
|
21
|
+
req = urllib.request.Request(AGENT_URL + FLUSH_PATH, "".encode("ascii"))
|
|
22
|
+
urllib.request.urlopen(req)
|
|
23
|
+
except Exception as e:
|
|
24
|
+
logger.debug("Failed to flush extension, returned with error %s", e)
|
|
25
|
+
return False
|
|
26
|
+
return True
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
should_use_extension = is_extension_present()
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Unless explicitly stated otherwise all files in this repository are licensed
|
|
2
|
+
# under the Apache License Version 2.0.
|
|
3
|
+
# This product includes software developed at Datadog (https://www.datadoghq.com/).
|
|
4
|
+
# Copyright 2020 Datadog, Inc.
|
|
5
|
+
|
|
6
|
+
from __future__ import absolute_import
|
|
7
|
+
from importlib import import_module
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
from datadog_lambda.wrapper import datadog_lambda_wrapper
|
|
11
|
+
from datadog_lambda.module_name import modify_module_name
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class HandlerError(Exception):
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
path = os.environ.get("DD_LAMBDA_HANDLER", None)
|
|
19
|
+
if path is None:
|
|
20
|
+
raise HandlerError(
|
|
21
|
+
"DD_LAMBDA_HANDLER is not defined. Can't use prebuilt datadog handler"
|
|
22
|
+
)
|
|
23
|
+
parts = path.rsplit(".", 1)
|
|
24
|
+
if len(parts) != 2:
|
|
25
|
+
raise HandlerError("Value %s for DD_LAMBDA_HANDLER has invalid format." % path)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
(mod_name, handler_name) = parts
|
|
29
|
+
modified_mod_name = modify_module_name(mod_name)
|
|
30
|
+
handler_module = import_module(modified_mod_name)
|
|
31
|
+
handler = datadog_lambda_wrapper(getattr(handler_module, handler_name))
|
datadog_lambda/logger.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
try:
|
|
5
|
+
_level_mappping = logging.getLevelNamesMapping()
|
|
6
|
+
except AttributeError:
|
|
7
|
+
# python 3.8
|
|
8
|
+
_level_mappping = {name: num for num, name in logging._levelToName.items()}
|
|
9
|
+
# https://docs.datadoghq.com/agent/troubleshooting/debug_mode/?tab=agentv6v7#agent-log-level
|
|
10
|
+
_level_mappping.update(
|
|
11
|
+
{
|
|
12
|
+
"TRACE": 5,
|
|
13
|
+
"WARN": logging.WARNING,
|
|
14
|
+
"OFF": 100,
|
|
15
|
+
}
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def initialize_logging(name):
|
|
20
|
+
logger = logging.getLogger(name)
|
|
21
|
+
str_level = (os.environ.get("DD_LOG_LEVEL") or "INFO").upper()
|
|
22
|
+
level = _level_mappping.get(str_level)
|
|
23
|
+
if level is None:
|
|
24
|
+
logger.setLevel(logging.INFO)
|
|
25
|
+
logger.warning("Invalid log level: %s Defaulting to INFO", str_level)
|
|
26
|
+
else:
|
|
27
|
+
logger.setLevel(level)
|