datadog_lambda 5.91.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,136 @@
1
+ # Unless explicitly stated otherwise all files in this repository are licensed
2
+ # under the Apache License Version 2.0.
3
+ # This product includes software developed at Datadog (https://www.datadoghq.com/).
4
+ # Copyright 2019 Datadog, Inc.
5
+
6
+ import os
7
+ import json
8
+ import time
9
+ import logging
10
+
11
+ from datadog_lambda.extension import should_use_extension
12
+ from datadog_lambda.tags import get_enhanced_metrics_tags, tag_dd_lambda_layer
13
+ from datadog_lambda.api import init_api
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ lambda_stats = None
18
+
19
+ init_api()
20
+
21
+ if should_use_extension:
22
+ from datadog_lambda.statsd_writer import StatsDWriter
23
+
24
+ lambda_stats = StatsDWriter()
25
+ else:
26
+ # Periodical flushing in a background thread is NOT guaranteed to succeed
27
+ # and leads to data loss. When disabled, metrics are only flushed at the
28
+ # end of invocation. To make metrics submitted from a long-running Lambda
29
+ # function available sooner, consider using the Datadog Lambda extension.
30
+ from datadog_lambda.thread_stats_writer import ThreadStatsWriter
31
+
32
+ flush_in_thread = os.environ.get("DD_FLUSH_IN_THREAD", "").lower() == "true"
33
+ lambda_stats = ThreadStatsWriter(flush_in_thread)
34
+
35
+
36
+ def lambda_metric(metric_name, value, timestamp=None, tags=None, force_async=False):
37
+ """
38
+ Submit a data point to Datadog distribution metrics.
39
+ https://docs.datadoghq.com/graphing/metrics/distributions/
40
+
41
+ When DD_FLUSH_TO_LOG is True, write metric to log, and
42
+ wait for the Datadog Log Forwarder Lambda function to submit
43
+ the metrics asynchronously.
44
+
45
+ Otherwise, the metrics will be submitted to the Datadog API
46
+ periodically and at the end of the function execution in a
47
+ background thread.
48
+
49
+ Note that if the extension is present, it will override the DD_FLUSH_TO_LOG value
50
+ and always use the layer to send metrics to the extension
51
+ """
52
+ flush_to_logs = os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true"
53
+ tags = tag_dd_lambda_layer(tags)
54
+
55
+ if should_use_extension:
56
+ logger.debug(
57
+ "Sending metric %s value %s to Datadog via extension", metric_name, value
58
+ )
59
+ lambda_stats.distribution(metric_name, value, tags=tags, timestamp=timestamp)
60
+ else:
61
+ if flush_to_logs or force_async:
62
+ write_metric_point_to_stdout(
63
+ metric_name, value, timestamp=timestamp, tags=tags
64
+ )
65
+ else:
66
+ lambda_stats.distribution(
67
+ metric_name, value, tags=tags, timestamp=timestamp
68
+ )
69
+
70
+
71
+ def write_metric_point_to_stdout(metric_name, value, timestamp=None, tags=[]):
72
+ """Writes the specified metric point to standard output"""
73
+ logger.debug(
74
+ "Sending metric %s value %s to Datadog via log forwarder", metric_name, value
75
+ )
76
+ print(
77
+ json.dumps(
78
+ {
79
+ "m": metric_name,
80
+ "v": value,
81
+ "e": timestamp or int(time.time()),
82
+ "t": tags,
83
+ }
84
+ )
85
+ )
86
+
87
+
88
+ def flush_stats():
89
+ lambda_stats.flush()
90
+
91
+
92
+ def are_enhanced_metrics_enabled():
93
+ """Check env var to find if enhanced metrics should be submitted
94
+
95
+ Returns:
96
+ boolean for whether enhanced metrics are enabled
97
+ """
98
+ # DD_ENHANCED_METRICS defaults to true
99
+ return os.environ.get("DD_ENHANCED_METRICS", "true").lower() == "true"
100
+
101
+
102
+ def submit_enhanced_metric(metric_name, lambda_context):
103
+ """Submits the enhanced metric with the given name
104
+
105
+ Args:
106
+ metric_name (str): metric name w/o enhanced prefix i.e. "invocations" or "errors"
107
+ lambda_context (dict): Lambda context dict passed to the function by AWS
108
+ """
109
+ if not are_enhanced_metrics_enabled():
110
+ logger.debug(
111
+ "Not submitting enhanced metric %s because enhanced metrics are disabled",
112
+ metric_name,
113
+ )
114
+ return
115
+ tags = get_enhanced_metrics_tags(lambda_context)
116
+ metric_name = "aws.lambda.enhanced." + metric_name
117
+ # Enhanced metrics always use an async submission method, (eg logs or extension).
118
+ lambda_metric(metric_name, 1, timestamp=None, tags=tags, force_async=True)
119
+
120
+
121
+ def submit_invocations_metric(lambda_context):
122
+ """Increment aws.lambda.enhanced.invocations by 1, applying runtime, layer, and cold_start tags
123
+
124
+ Args:
125
+ lambda_context (dict): Lambda context dict passed to the function by AWS
126
+ """
127
+ submit_enhanced_metric("invocations", lambda_context)
128
+
129
+
130
+ def submit_errors_metric(lambda_context):
131
+ """Increment aws.lambda.enhanced.errors by 1, applying runtime, layer, and cold_start tags
132
+
133
+ Args:
134
+ lambda_context (dict): Lambda context dict passed to the function by AWS
135
+ """
136
+ submit_enhanced_metric("errors", lambda_context)
@@ -0,0 +1,3 @@
1
+ def modify_module_name(module_name):
2
+ """Returns a valid modified module to get imported"""
3
+ return ".".join(module_name.split("/"))
@@ -0,0 +1,159 @@
1
+ # Unless explicitly stated otherwise all files in this repository are licensed
2
+ # under the Apache License Version 2.0.
3
+ # This product includes software developed at Datadog (https://www.datadoghq.com/).
4
+ # Copyright 2019 Datadog, Inc.
5
+
6
+ import json
7
+ import os
8
+ import sys
9
+ import logging
10
+ import zlib
11
+
12
+ from wrapt import wrap_function_wrapper as wrap
13
+ from wrapt.importer import when_imported
14
+ from ddtrace import patch_all as patch_all_dd
15
+
16
+ from datadog_lambda.tracing import (
17
+ get_dd_trace_context,
18
+ dd_tracing_enabled,
19
+ )
20
+ from collections.abc import MutableMapping
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ _http_patched = False
25
+ _requests_patched = False
26
+ _integration_tests_patched = False
27
+
28
+
29
+ def patch_all():
30
+ """
31
+ Patch third-party libraries for tracing.
32
+ """
33
+ _patch_for_integration_tests()
34
+
35
+ if dd_tracing_enabled:
36
+ patch_all_dd()
37
+ else:
38
+ _patch_http()
39
+ _ensure_patch_requests()
40
+
41
+
42
+ def _patch_for_integration_tests():
43
+ """
44
+ Patch `requests` to log the outgoing requests for integration tests.
45
+ """
46
+ global _integration_tests_patched
47
+ is_in_tests = os.environ.get("DD_INTEGRATION_TEST", "false").lower() == "true"
48
+ if not _integration_tests_patched and is_in_tests:
49
+ wrap("requests", "Session.send", _log_request)
50
+ _integration_tests_patched = True
51
+
52
+
53
+ def _patch_http():
54
+ """
55
+ Patch `http.client` (Python 3) module.
56
+ """
57
+ global _http_patched
58
+ http_module = "http.client"
59
+ if not _http_patched:
60
+ _http_patched = True
61
+ wrap(http_module, "HTTPConnection.request", _wrap_http_request)
62
+
63
+ logger.debug("Patched %s", http_module)
64
+
65
+
66
+ def _ensure_patch_requests():
67
+ """
68
+ `requests` is third-party, may not be installed or used,
69
+ but ensure it gets patched if installed and used.
70
+ """
71
+ if "requests" in sys.modules:
72
+ # already imported, patch now
73
+ _patch_requests(sys.modules["requests"])
74
+ else:
75
+ # patch when imported
76
+ when_imported("requests")(_patch_requests)
77
+
78
+
79
+ def _patch_requests(module):
80
+ """
81
+ Patch the high-level HTTP client module `requests`
82
+ if it's installed.
83
+ """
84
+ global _requests_patched
85
+ if not _requests_patched:
86
+ _requests_patched = True
87
+ try:
88
+ wrap("requests", "Session.request", _wrap_requests_request)
89
+ logger.debug("Patched requests")
90
+ except Exception:
91
+ logger.debug("Failed to patch requests", exc_info=True)
92
+
93
+
94
+ def _wrap_requests_request(func, instance, args, kwargs):
95
+ """
96
+ Wrap `requests.Session.request` to inject the Datadog trace headers
97
+ into the outgoing requests.
98
+ """
99
+ context = get_dd_trace_context()
100
+ if "headers" in kwargs and isinstance(kwargs["headers"], MutableMapping):
101
+ kwargs["headers"].update(context)
102
+ elif len(args) >= 5 and isinstance(args[4], MutableMapping):
103
+ args[4].update(context)
104
+ else:
105
+ kwargs["headers"] = context
106
+
107
+ return func(*args, **kwargs)
108
+
109
+
110
+ def _wrap_http_request(func, instance, args, kwargs):
111
+ """
112
+ Wrap `http.client` (python3) to inject
113
+ the Datadog trace headers into the outgoing requests.
114
+ """
115
+ context = get_dd_trace_context()
116
+ if "headers" in kwargs and isinstance(kwargs["headers"], MutableMapping):
117
+ kwargs["headers"].update(context)
118
+ elif len(args) >= 4 and isinstance(args[3], MutableMapping):
119
+ args[3].update(context)
120
+ else:
121
+ kwargs["headers"] = context
122
+
123
+ return func(*args, **kwargs)
124
+
125
+
126
+ def _log_request(func, instance, args, kwargs):
127
+ request = kwargs.get("request") or args[0]
128
+ _print_request_string(request)
129
+ return func(*args, **kwargs)
130
+
131
+
132
+ def _print_request_string(request):
133
+ """Print the request so that it can be checked in integration tests
134
+
135
+ Only used by integration tests.
136
+ """
137
+ method = request.method
138
+ url = request.url
139
+
140
+ # Sort the datapoints POSTed by their name so that snapshots always align
141
+ data = request.body or "{}"
142
+ # If payload is compressed, decompress it so we can parse it
143
+ if request.headers.get("Content-Encoding") == "deflate":
144
+ data = zlib.decompress(data)
145
+ data_dict = json.loads(data)
146
+ data_dict.get("series", []).sort(key=lambda series: series.get("metric"))
147
+ sorted_data = json.dumps(data_dict)
148
+
149
+ # Sort headers to prevent any differences in ordering
150
+ headers = request.headers or {}
151
+ sorted_headers = sorted(
152
+ "{}:{}".format(key, value) for key, value in headers.items()
153
+ )
154
+ sorted_header_str = json.dumps(sorted_headers)
155
+ print(
156
+ "HTTP {} {} Headers: {} Data: {}".format(
157
+ method, url, sorted_header_str, sorted_data
158
+ )
159
+ )
@@ -0,0 +1,9 @@
1
+ class StatsWriter:
2
+ def distribution(self, metric_name, value, tags=[], timestamp=None):
3
+ raise NotImplementedError()
4
+
5
+ def flush(self):
6
+ raise NotImplementedError()
7
+
8
+ def stop(self):
9
+ raise NotImplementedError()
@@ -0,0 +1,17 @@
1
+ from datadog_lambda.stats_writer import StatsWriter
2
+ from datadog_lambda.dogstatsd import statsd
3
+
4
+
5
+ class StatsDWriter(StatsWriter):
6
+ """
7
+ Writes distribution metrics using StatsD protocol
8
+ """
9
+
10
+ def distribution(self, metric_name, value, tags=[], timestamp=None):
11
+ statsd.distribution(metric_name, value, tags=tags)
12
+
13
+ def flush(self):
14
+ pass
15
+
16
+ def stop(self):
17
+ pass
@@ -0,0 +1,68 @@
1
+ # Unless explicitly stated otherwise all files in this repository are licensed
2
+ # under the Apache License Version 2.0.
3
+ # This product includes software developed at Datadog (https://www.datadoghq.com/).
4
+ # Copyright 2021 Datadog, Inc.
5
+
6
+ from decimal import Decimal
7
+ import json
8
+ import logging
9
+
10
+ redactable_keys = ["authorization", "x-authorization", "password", "token"]
11
+ max_depth = 10
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def tag_object(span, key, obj, depth=0):
16
+ if obj is None:
17
+ return span.set_tag(key, obj)
18
+ if depth >= max_depth:
19
+ return span.set_tag(key, _redact_val(key, str(obj)[0:5000]))
20
+ depth += 1
21
+ if _should_try_string(obj):
22
+ parsed = None
23
+ try:
24
+ parsed = json.loads(obj)
25
+ return tag_object(span, key, parsed, depth)
26
+ except ValueError:
27
+ redacted = _redact_val(key, obj[0:5000])
28
+ return span.set_tag(key, redacted)
29
+ if isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, Decimal):
30
+ return span.set_tag(key, str(obj))
31
+ if isinstance(obj, list):
32
+ for k, v in enumerate(obj):
33
+ formatted_key = "{}.{}".format(key, k)
34
+ tag_object(span, formatted_key, v, depth)
35
+ return
36
+ if hasattr(obj, "items"):
37
+ for k, v in obj.items():
38
+ formatted_key = "{}.{}".format(key, k)
39
+ tag_object(span, formatted_key, v, depth)
40
+ return
41
+ if hasattr(obj, "to_dict"):
42
+ for k, v in obj.to_dict().items():
43
+ formatted_key = "{}.{}".format(key, k)
44
+ tag_object(span, formatted_key, v, depth)
45
+ return
46
+ try:
47
+ value_as_str = str(obj)
48
+ except Exception:
49
+ value_as_str = "UNKNOWN"
50
+ return span.set_tag(key, value_as_str)
51
+
52
+
53
+ def _should_try_string(obj):
54
+ try:
55
+ if isinstance(obj, str) or isinstance(obj, unicode):
56
+ return True
57
+ except NameError:
58
+ if isinstance(obj, bytes):
59
+ return True
60
+
61
+ return False
62
+
63
+
64
+ def _redact_val(k, v):
65
+ split_key = k.split(".").pop() or k
66
+ if split_key in redactable_keys:
67
+ return "redacted"
68
+ return v
datadog_lambda/tags.py ADDED
@@ -0,0 +1,104 @@
1
+ import sys
2
+
3
+ from platform import python_version_tuple
4
+
5
+ from datadog_lambda import __version__
6
+ from datadog_lambda.cold_start import get_cold_start_tag
7
+
8
+
9
+ def _format_dd_lambda_layer_tag():
10
+ """
11
+ Formats the dd_lambda_layer tag, e.g., 'dd_lambda_layer:datadog-python39_1'
12
+ """
13
+ runtime = "python{}{}".format(sys.version_info[0], sys.version_info[1])
14
+ return "dd_lambda_layer:datadog-{}_{}".format(runtime, __version__)
15
+
16
+
17
+ def tag_dd_lambda_layer(tags):
18
+ """
19
+ Used by lambda_metric to insert the dd_lambda_layer tag
20
+ """
21
+ dd_lambda_layer_tag = _format_dd_lambda_layer_tag()
22
+ if tags:
23
+ return tags + [dd_lambda_layer_tag]
24
+ else:
25
+ return [dd_lambda_layer_tag]
26
+
27
+
28
+ def parse_lambda_tags_from_arn(lambda_context):
29
+ """Generate the list of lambda tags based on the data in the arn
30
+ Args:
31
+ lambda_context: Aws lambda context object
32
+ ex: lambda_context.arn = arn:aws:lambda:us-east-1:123597598159:function:my-lambda:1
33
+ """
34
+ # Set up flag for extra testing to distinguish between a version or alias
35
+ hasAlias = False
36
+ # Cap the number of times to spli
37
+ split_arn = lambda_context.invoked_function_arn.split(":")
38
+
39
+ if len(split_arn) > 7:
40
+ hasAlias = True
41
+ _, _, _, region, account_id, _, function_name, alias = split_arn
42
+ else:
43
+ _, _, _, region, account_id, _, function_name = split_arn
44
+
45
+ # Add the standard tags to a list
46
+ tags = [
47
+ "region:{}".format(region),
48
+ "account_id:{}".format(account_id),
49
+ "functionname:{}".format(function_name),
50
+ ]
51
+
52
+ # Check if we have a version or alias
53
+ if hasAlias:
54
+ # If $Latest, drop the $ for datadog tag convention. A lambda alias can't start with $
55
+ if alias.startswith("$"):
56
+ alias = alias[1:]
57
+ # Versions are numeric. Aliases need the executed version tag
58
+ elif not check_if_number(alias):
59
+ tags.append("executedversion:{}".format(lambda_context.function_version))
60
+ # create resource tag with function name and alias/version
61
+ resource = "resource:{}:{}".format(function_name, alias)
62
+ else:
63
+ # Resource is only the function name otherwise
64
+ resource = "resource:{}".format(function_name)
65
+
66
+ tags.append(resource)
67
+
68
+ return tags
69
+
70
+
71
+ def get_runtime_tag():
72
+ """Get the runtime tag from the current Python version"""
73
+ major_version, minor_version, _ = python_version_tuple()
74
+
75
+ return "runtime:python{major}.{minor}".format(
76
+ major=major_version, minor=minor_version
77
+ )
78
+
79
+
80
+ def get_library_version_tag():
81
+ """Get datadog lambda library tag"""
82
+ return "datadog_lambda:v{}".format(__version__)
83
+
84
+
85
+ def get_enhanced_metrics_tags(lambda_context):
86
+ """Get the list of tags to apply to enhanced metrics"""
87
+ return parse_lambda_tags_from_arn(lambda_context) + [
88
+ get_cold_start_tag(),
89
+ "memorysize:{}".format(lambda_context.memory_limit_in_mb),
90
+ get_runtime_tag(),
91
+ get_library_version_tag(),
92
+ ]
93
+
94
+
95
+ def check_if_number(alias):
96
+ """
97
+ Check if the alias is a version or number.
98
+ Python 2 has no easy way to test this like Python 3
99
+ """
100
+ try:
101
+ float(alias)
102
+ return True
103
+ except ValueError:
104
+ return False
@@ -0,0 +1,65 @@
1
+ import logging
2
+
3
+ # Make sure that this package would always be lazy-loaded/outside from the critical path
4
+ # since underlying packages are quite heavy to load and useless when the extension is present
5
+ from datadog.threadstats import ThreadStats
6
+ from datadog_lambda.stats_writer import StatsWriter
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class ThreadStatsWriter(StatsWriter):
12
+ """
13
+ Writes distribution metrics using the ThreadStats class
14
+ """
15
+
16
+ def __init__(self, flush_in_thread):
17
+ self.thread_stats = ThreadStats(compress_payload=True)
18
+ self.thread_stats.start(flush_in_thread=flush_in_thread)
19
+
20
+ def distribution(self, metric_name, value, tags=[], timestamp=None):
21
+ self.thread_stats.distribution(
22
+ metric_name, value, tags=tags, timestamp=timestamp
23
+ )
24
+
25
+ def flush(self):
26
+ """ "Flush distributions from ThreadStats to Datadog.
27
+ Modified based on `datadog.threadstats.base.ThreadStats.flush()`,
28
+ to gain better control over exception handling.
29
+ """
30
+ _, dists = self.thread_stats._get_aggregate_metrics_and_dists(float("inf"))
31
+ count_dists = len(dists)
32
+ if not count_dists:
33
+ logger.debug("No distributions to flush. Continuing.")
34
+
35
+ self.thread_stats.flush_count += 1
36
+ logger.debug(
37
+ "Flush #%s sending %s distributions",
38
+ self.thread_stats.flush_count,
39
+ count_dists,
40
+ )
41
+ try:
42
+ self.thread_stats.reporter.flush_distributions(dists)
43
+ except Exception as e:
44
+ # The nature of the root issue https://bugs.python.org/issue41345 is complex,
45
+ # but comprehensive tests suggest that it is safe to retry on this specific error.
46
+ if type(e).__name__ == "ClientError" and "RemoteDisconnected" in str(e):
47
+ logger.debug(
48
+ "Retry flush #%s due to RemoteDisconnected",
49
+ self.thread_stats.flush_count,
50
+ )
51
+ try:
52
+ self.thread_stats.reporter.flush_distributions(dists)
53
+ except Exception:
54
+ logger.debug(
55
+ "Flush #%s failed after retry",
56
+ self.thread_stats.flush_count,
57
+ exc_info=True,
58
+ )
59
+ else:
60
+ logger.debug(
61
+ "Flush #%s failed", self.thread_stats.flush_count, exc_info=True
62
+ )
63
+
64
+ def stop(self):
65
+ self.thread_stats.stop()