datadog-checks-base 37.15.0__py2.py3-none-any.whl → 37.17.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. datadog_checks/base/__about__.py +1 -1
  2. datadog_checks/base/checks/base.py +30 -39
  3. datadog_checks/base/checks/kube_leader/base_check.py +2 -1
  4. datadog_checks/base/checks/kube_leader/mixins.py +1 -1
  5. datadog_checks/base/checks/kubelet_base/base.py +2 -2
  6. datadog_checks/base/checks/network.py +1 -1
  7. datadog_checks/base/checks/openmetrics/base_check.py +4 -3
  8. datadog_checks/base/checks/openmetrics/mixins.py +6 -7
  9. datadog_checks/base/checks/openmetrics/v2/base.py +4 -3
  10. datadog_checks/base/checks/openmetrics/v2/labels.py +1 -1
  11. datadog_checks/base/checks/openmetrics/v2/scraper/__init__.py +8 -0
  12. datadog_checks/base/checks/openmetrics/v2/{scraper.py → scraper/base_scraper.py} +19 -16
  13. datadog_checks/base/checks/openmetrics/v2/scraper/decorators.py +48 -0
  14. datadog_checks/base/checks/openmetrics/v2/transform.py +2 -1
  15. datadog_checks/base/checks/openmetrics/v2/transformers/histogram.py +2 -2
  16. datadog_checks/base/checks/openmetrics/v2/transformers/service_check.py +1 -1
  17. datadog_checks/base/checks/openmetrics/v2/transformers/temporal_percent.py +2 -2
  18. datadog_checks/base/checks/openmetrics/v2/transformers/time_elapsed.py +1 -1
  19. datadog_checks/base/checks/prometheus/base_check.py +4 -3
  20. datadog_checks/base/checks/prometheus/mixins.py +7 -7
  21. datadog_checks/base/checks/prometheus/prometheus_base.py +3 -2
  22. datadog_checks/base/checks/win/winpdh_base.py +2 -2
  23. datadog_checks/base/checks/win/wmi/base.py +13 -12
  24. datadog_checks/base/checks/win/wmi/sampler.py +10 -10
  25. datadog_checks/base/checks/windows/perf_counters/base.py +5 -4
  26. datadog_checks/base/checks/windows/perf_counters/connection.py +2 -2
  27. datadog_checks/base/checks/windows/perf_counters/counter.py +2 -1
  28. datadog_checks/base/checks/windows/perf_counters/transformers/service_check.py +2 -2
  29. datadog_checks/base/checks/windows/perf_counters/transformers/temporal_percent.py +3 -3
  30. datadog_checks/base/checks/windows/perf_counters/transformers/time_elapsed.py +1 -1
  31. datadog_checks/base/stubs/aggregator.py +21 -4
  32. datadog_checks/base/stubs/datadog_agent.py +5 -5
  33. datadog_checks/base/stubs/log.py +1 -1
  34. datadog_checks/base/utils/db/core.py +2 -2
  35. datadog_checks/base/utils/db/query.py +1 -3
  36. datadog_checks/base/utils/db/transform.py +6 -8
  37. datadog_checks/base/utils/db/utils.py +4 -5
  38. datadog_checks/base/utils/http.py +170 -71
  39. datadog_checks/base/utils/metadata/core.py +1 -1
  40. datadog_checks/base/utils/metadata/version.py +1 -1
  41. datadog_checks/base/utils/prometheus/metrics_pb2.py +2 -1
  42. datadog_checks/base/utils/replay/execute.py +2 -2
  43. datadog_checks/base/utils/replay/redirect.py +5 -6
  44. datadog_checks/base/utils/subprocess_output.py +2 -2
  45. datadog_checks/base/utils/tagging.py +1 -1
  46. datadog_checks/base/utils/tailfile.py +0 -2
  47. datadog_checks/base/utils/tls.py +96 -54
  48. datadog_checks/base/utils/tracing.py +5 -6
  49. datadog_checks/checks/libs/wmi/sampler.py +1 -0
  50. {datadog_checks_base-37.15.0.dist-info → datadog_checks_base-37.17.0.dist-info}/METADATA +11 -11
  51. {datadog_checks_base-37.15.0.dist-info → datadog_checks_base-37.17.0.dist-info}/RECORD +52 -51
  52. datadog_checks/base/utils/network.py +0 -49
  53. {datadog_checks_base-37.15.0.dist-info → datadog_checks_base-37.17.0.dist-info}/WHEEL +0 -0
@@ -23,6 +23,7 @@ Please refer to `checks.lib.wmi.counter_type` for more information*
23
23
  Original discussion thread: https://github.com/DataDog/dd-agent/issues/1952
24
24
  Credits to @TheCloudlessSky (https://github.com/TheCloudlessSky)
25
25
  """
26
+
26
27
  from copy import deepcopy
27
28
  from threading import Event, Thread
28
29
 
@@ -227,7 +228,7 @@ class WMISampler(object):
227
228
  result = parsed_value
228
229
 
229
230
  if result is None:
230
- self.logger.error(u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value)
231
+ self.logger.error("Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value)
231
232
 
232
233
  self._provider = result or ProviderArchitecture.DEFAULT
233
234
 
@@ -278,7 +279,7 @@ class WMISampler(object):
278
279
  """
279
280
  # No data is returned while sampling
280
281
  if self._sampling:
281
- raise TypeError(u"Sampling `WMISampler` object has no len()")
282
+ raise TypeError("Sampling `WMISampler` object has no len()")
282
283
 
283
284
  return len(self._current_sample)
284
285
 
@@ -288,7 +289,7 @@ class WMISampler(object):
288
289
  """
289
290
  # No data is returned while sampling
290
291
  if self._sampling:
291
- raise TypeError(u"Sampling `WMISampler` object is not iterable")
292
+ raise TypeError("Sampling `WMISampler` object is not iterable")
292
293
 
293
294
  if self.is_raw_perf_class:
294
295
  # Format required
@@ -334,7 +335,7 @@ class WMISampler(object):
334
335
  calculator = get_calculator(counter_type)
335
336
  except UndefinedCalculator:
336
337
  self.logger.warning(
337
- u"Undefined WMI calculator for counter_type %s. Values are reported as RAW.", counter_type
338
+ "Undefined WMI calculator for counter_type %s. Values are reported as RAW.", counter_type
338
339
  )
339
340
 
340
341
  return calculator
@@ -364,7 +365,7 @@ class WMISampler(object):
364
365
  Create a new WMI connection
365
366
  """
366
367
  self.logger.debug(
367
- u"Connecting to WMI server (host=%s, namespace=%s, provider=%s, username=%s).",
368
+ "Connecting to WMI server (host=%s, namespace=%s, provider=%s, username=%s).",
368
369
  self.host,
369
370
  self.namespace,
370
371
  self.provider,
@@ -550,7 +551,7 @@ class WMISampler(object):
550
551
  wql = "Select {property_names} from {class_name}{filters}".format(
551
552
  property_names=formated_property_names, class_name=self.class_name, filters=self.formatted_filters
552
553
  )
553
- self.logger.debug(u"Querying WMI: %s", wql)
554
+ self.logger.debug("Querying WMI: %s", wql)
554
555
  except Exception as e:
555
556
  self.logger.error(str(e))
556
557
  return []
@@ -575,7 +576,7 @@ class WMISampler(object):
575
576
  results = self._parse_results(raw_results, includes_qualifiers=includes_qualifiers)
576
577
 
577
578
  except pywintypes.com_error:
578
- self.logger.warning(u"Failed to execute WMI query (%s)", wql, exc_info=True)
579
+ self.logger.warning("Failed to execute WMI query (%s)", wql, exc_info=True)
579
580
  results = []
580
581
 
581
582
  return results
@@ -615,7 +616,6 @@ class WMISampler(object):
615
616
  )
616
617
 
617
618
  if should_get_qualifier_type:
618
-
619
619
  # Can't index into "Qualifiers_" for keys that don't exist
620
620
  # without getting an exception.
621
621
  qualifiers = dict((q.Name, q.Value) for q in wmi_property.Qualifiers_)
@@ -628,14 +628,14 @@ class WMISampler(object):
628
628
  self._property_counter_types[wmi_property.Name] = counter_type
629
629
 
630
630
  self.logger.debug(
631
- u"Caching property qualifier CounterType: %s.%s = %s",
631
+ "Caching property qualifier CounterType: %s.%s = %s",
632
632
  self.class_name,
633
633
  wmi_property.Name,
634
634
  counter_type,
635
635
  )
636
636
  else:
637
637
  self.logger.debug(
638
- u"CounterType qualifier not found for %s.%s", self.class_name, wmi_property.Name
638
+ "CounterType qualifier not found for %s.%s", self.class_name, wmi_property.Name
639
639
  )
640
640
 
641
641
  try:
@@ -7,10 +7,11 @@ from contextlib import contextmanager, suppress
7
7
  import pywintypes
8
8
  import win32pdh
9
9
 
10
- from ....config import is_affirmative
11
- from ....errors import ConfigTypeError, ConfigurationError
12
- from ....utils.functions import raise_exception
13
- from ... import AgentCheck
10
+ from datadog_checks.base.checks import AgentCheck
11
+ from datadog_checks.base.config import is_affirmative
12
+ from datadog_checks.base.errors import ConfigTypeError, ConfigurationError
13
+ from datadog_checks.base.utils.functions import raise_exception
14
+
14
15
  from .connection import Connection
15
16
  from .counter import PerfObject
16
17
 
@@ -9,7 +9,7 @@ import win32api
9
9
  import win32pdh
10
10
  import win32wnet
11
11
 
12
- from ....errors import ConfigTypeError
12
+ from datadog_checks.base.errors import ConfigTypeError
13
13
 
14
14
 
15
15
  class NetworkResources:
@@ -77,7 +77,7 @@ class Connection:
77
77
  # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/ns-winnetwk-netresourcea
78
78
  # https://mhammond.github.io/pywin32/PyNETRESOURCE.html
79
79
  self.network_resource = win32wnet.NETRESOURCE()
80
- self.network_resource.lpRemoteName = fr'\\{server}'
80
+ self.network_resource.lpRemoteName = rf'\\{server}'
81
81
 
82
82
  self.__query_handle = None
83
83
 
@@ -7,7 +7,8 @@ import weakref
7
7
  import pywintypes
8
8
  import win32pdh
9
9
 
10
- from ....errors import ConfigTypeError, ConfigValueError
10
+ from datadog_checks.base.errors import ConfigTypeError, ConfigValueError
11
+
11
12
  from .constants import PDH_CSTATUS_INVALID_DATA, PDH_INVALID_DATA
12
13
  from .transform import NATIVE_TRANSFORMERS, TRANSFORMERS
13
14
  from .utils import construct_counter_path, get_counter_value, get_counter_values, validate_path
@@ -1,8 +1,8 @@
1
1
  # (C) Datadog, Inc. 2021-present
2
2
  # All rights reserved
3
3
  # Licensed under a 3-clause BSD style license (see LICENSE)
4
- from .....constants import ServiceCheck
5
- from .....errors import ConfigTypeError, ConfigValueError
4
+ from datadog_checks.base.constants import ServiceCheck
5
+ from datadog_checks.base.errors import ConfigTypeError, ConfigValueError
6
6
 
7
7
 
8
8
  def get_service_check(check, metric_name, modifiers):
@@ -1,9 +1,9 @@
1
1
  # (C) Datadog, Inc. 2021-present
2
2
  # All rights reserved
3
3
  # Licensed under a 3-clause BSD style license (see LICENSE)
4
- from .....errors import ConfigTypeError, ConfigValueError
5
- from .....utils.common import total_time_to_temporal_percent
6
- from .....utils.constants import TIME_UNITS
4
+ from datadog_checks.base.errors import ConfigTypeError, ConfigValueError
5
+ from datadog_checks.base.utils.common import total_time_to_temporal_percent
6
+ from datadog_checks.base.utils.constants import TIME_UNITS
7
7
 
8
8
 
9
9
  def get_temporal_percent(check, metric_name, modifiers):
@@ -1,7 +1,7 @@
1
1
  # (C) Datadog, Inc. 2021-present
2
2
  # All rights reserved
3
3
  # Licensed under a 3-clause BSD style license (see LICENSE)
4
- from .....utils.time import get_timestamp
4
+ from datadog_checks.base.utils.time import get_timestamp
5
5
 
6
6
 
7
7
  def get_time_elapsed(check, metric_name, modifiers):
@@ -8,8 +8,9 @@ import os
8
8
  import re
9
9
  from collections import OrderedDict, defaultdict
10
10
 
11
- from ..constants import ServiceCheck
12
- from ..utils.common import ensure_unicode, to_native_string
11
+ from datadog_checks.base.constants import ServiceCheck
12
+ from datadog_checks.base.utils.common import ensure_unicode, to_native_string
13
+
13
14
  from .common import HistogramBucketStub, MetricStub, ServiceCheckStub
14
15
  from .similar import build_similar_elements_msg
15
16
 
@@ -425,10 +426,19 @@ class AggregatorStub(object):
425
426
  assert condition, msg
426
427
 
427
428
  def assert_metrics_using_metadata(
428
- self, metadata_metrics, check_metric_type=True, check_submission_type=False, exclude=None
429
+ self,
430
+ metadata_metrics,
431
+ check_metric_type=True,
432
+ check_submission_type=False,
433
+ exclude=None,
434
+ check_symmetric_inclusion=False,
429
435
  ):
430
436
  """
431
- Assert metrics using metadata.csv
437
+ Assert metrics using metadata.csv. The assertion fails if there are metrics emitted that are
438
+ not in metadata.csv. Metrics passed in the `exclude` parameter are ignored.
439
+
440
+ Pass `check_symmetric_inclusion=True` to assert that both set of metrics, those submitted and
441
+ those in metadata.csv, are the same.
432
442
 
433
443
  Checking type: By default we are asserting the in-app metric type (`check_submission_type=False`),
434
444
  asserting this type make sense for e2e (metrics collected from agent).
@@ -444,6 +454,7 @@ class AggregatorStub(object):
444
454
 
445
455
  exclude = exclude or []
446
456
  errors = set()
457
+ submitted_metrics = set()
447
458
  for metric_name, metric_stubs in self._metrics.items():
448
459
  if metric_name in exclude:
449
460
  continue
@@ -456,6 +467,8 @@ class AggregatorStub(object):
456
467
  if check_submission_type and actual_metric_type in ['histogram', 'historate']:
457
468
  metric_stub_name += '.count'
458
469
 
470
+ submitted_metrics.add(metric_stub_name)
471
+
459
472
  # Checking the metric is in `metadata.csv`
460
473
  if metric_stub_name not in metadata_metrics:
461
474
  errors.add("Expect `{}` to be in metadata.csv.".format(metric_stub_name))
@@ -478,6 +491,10 @@ class AggregatorStub(object):
478
491
  )
479
492
  )
480
493
 
494
+ if check_symmetric_inclusion:
495
+ missing_metrics = metadata_metrics.keys() - submitted_metrics
496
+ errors.update(f"Expect `{m}` from metadata.csv but not submitted." for m in missing_metrics)
497
+
481
498
  assert not errors, "Metadata assertion errors using metadata.csv:" + "\n\t- ".join([''] + sorted(errors))
482
499
 
483
500
  def assert_service_checks(self, service_checks):
@@ -52,7 +52,7 @@ class DatadogAgentStub(object):
52
52
  key = (check_id, name)
53
53
  if key in self._metadata:
54
54
  actual[name] = self._metadata[key]
55
- assert data == actual
55
+ assert data == actual, f'Expected metadata: {data}; actual metadata: {actual}'
56
56
 
57
57
  def assert_metadata_count(self, count):
58
58
  metadata_items = len(self._metadata)
@@ -67,10 +67,10 @@ class DatadogAgentStub(object):
67
67
  external_tags = {k: sorted(v) for (k, v) in external_tags.items()}
68
68
  tags = {k: sorted(v) for (k, v) in tags.items()}
69
69
 
70
- assert (
71
- external_tags == tags
72
- ), 'Expected {} external tags for hostname {}, found {}. Submitted external tags: {}'.format(
73
- external_tags, hostname, tags, repr(self._external_tags)
70
+ assert external_tags == tags, (
71
+ 'Expected {} external tags for hostname {}, found {}. Submitted external tags: {}'.format(
72
+ external_tags, hostname, tags, repr(self._external_tags)
73
+ )
74
74
  )
75
75
  return
76
76
 
@@ -3,7 +3,7 @@
3
3
  # Licensed under a 3-clause BSD style license (see LICENSE)
4
4
  import logging
5
5
 
6
- from ..log import CheckLoggingAdapter as AgentLoggingAdapter
6
+ from datadog_checks.base.log import CheckLoggingAdapter as AgentLoggingAdapter
7
7
 
8
8
  TRACE_LEVEL = 7
9
9
 
@@ -6,10 +6,10 @@ from itertools import chain
6
6
  from typing import Any, Callable, Dict, List, Tuple # noqa: F401
7
7
 
8
8
  from datadog_checks.base import AgentCheck # noqa: F401
9
+ from datadog_checks.base.config import is_affirmative
10
+ from datadog_checks.base.utils.containers import iter_unique
9
11
  from datadog_checks.base.utils.db.types import QueriesExecutor, QueriesSubmitter, Transformer # noqa: F401
10
12
 
11
- from ...config import is_affirmative
12
- from ..containers import iter_unique
13
13
  from .query import Query
14
14
  from .transform import COLUMN_TRANSFORMERS, EXTRA_TRANSFORMERS
15
15
  from .utils import SUBMISSION_METHODS, create_submission_transformer, tracked_query
@@ -214,9 +214,7 @@ class Query(object):
214
214
  elif extra_type not in extra_transformers and extra_type not in submission_transformers:
215
215
  raise ValueError('unknown type `{}` for extra {} of {}'.format(extra_type, extra_name, query_name))
216
216
 
217
- transformer_factory = extra_transformers.get(
218
- extra_type, submission_transformers.get(extra_type)
219
- ) # type: TransformerFactory
217
+ transformer_factory = extra_transformers.get(extra_type, submission_transformers.get(extra_type)) # type: TransformerFactory
220
218
 
221
219
  extra_source = extra.get('source')
222
220
  if extra_type in submission_transformers:
@@ -8,14 +8,14 @@ import time
8
8
  from datetime import datetime
9
9
  from typing import Any, Callable, Dict, List, Tuple # noqa: F401
10
10
 
11
+ from datadog_checks.base import is_affirmative
12
+ from datadog_checks.base.constants import ServiceCheck
11
13
  from datadog_checks.base.types import ServiceCheckStatus # noqa: F401
14
+ from datadog_checks.base.utils import constants
15
+ from datadog_checks.base.utils.common import compute_percent, total_time_to_temporal_percent
12
16
  from datadog_checks.base.utils.db.types import Transformer, TransformerFactory # noqa: F401
17
+ from datadog_checks.base.utils.time import ensure_aware_datetime
13
18
 
14
- from ... import is_affirmative
15
- from ...constants import ServiceCheck
16
- from .. import constants
17
- from ..common import compute_percent, total_time_to_temporal_percent
18
- from ..time import ensure_aware_datetime
19
19
  from .utils import create_extra_transformer
20
20
 
21
21
  # Used for the user-defined `expression`s
@@ -85,9 +85,7 @@ def get_monotonic_gauge(transformers, column_name, **modifiers):
85
85
  Send the result as both a `gauge` suffixed by `.total` and a `monotonic_count` suffixed by `.count`.
86
86
  """
87
87
  gauge = transformers['gauge'](transformers, '{}.total'.format(column_name), **modifiers) # type: Callable
88
- monotonic_count = transformers['monotonic_count'](
89
- transformers, '{}.count'.format(column_name), **modifiers
90
- ) # type: Callable
88
+ monotonic_count = transformers['monotonic_count'](transformers, '{}.count'.format(column_name), **modifiers) # type: Callable
91
89
 
92
90
  def monotonic_gauge(_, value, **kwargs):
93
91
  # type: (List, str, Dict[str, Any]) -> None
@@ -20,12 +20,11 @@ from cachetools import TTLCache
20
20
  from datadog_checks.base import is_affirmative
21
21
  from datadog_checks.base.agent import datadog_agent
22
22
  from datadog_checks.base.log import get_check_logger
23
+ from datadog_checks.base.utils.common import to_native_string
23
24
  from datadog_checks.base.utils.db.types import Transformer # noqa: F401
24
25
  from datadog_checks.base.utils.format import json
25
26
  from datadog_checks.base.utils.tracing import INTEGRATION_TRACING_SERVICE_NAME, tracing_enabled
26
27
 
27
- from ..common import to_native_string
28
-
29
28
  logger = logging.getLogger(__file__)
30
29
 
31
30
  # AgentCheck methods to transformer name e.g. set_metadata -> metadata
@@ -335,7 +334,7 @@ class DBMAsyncJob(object):
335
334
  try:
336
335
  self._log.info("[%s] Starting job loop", self._job_tags_str)
337
336
  while True:
338
- if self._cancel_event.isSet():
337
+ if self._cancel_event.is_set():
339
338
  self._log.info("[%s] Job loop cancelled", self._job_tags_str)
340
339
  self._check.count("dd.{}.async_job.cancel".format(self._dbms), 1, tags=self._job_tags, raw=True)
341
340
  break
@@ -354,7 +353,7 @@ class DBMAsyncJob(object):
354
353
  else:
355
354
  self._run_job_rate_limited()
356
355
  except Exception as e:
357
- if self._cancel_event.isSet():
356
+ if self._cancel_event.is_set():
358
357
  # canceling can cause exceptions if the connection is closed the middle of the check run
359
358
  # in this case we still want to report it as a cancellation instead of a crash
360
359
  self._log.debug("[%s] Job loop error after cancel: %s", self._job_tags_str, e)
@@ -401,7 +400,7 @@ class DBMAsyncJob(object):
401
400
  except:
402
401
  raise
403
402
  finally:
404
- if not self._cancel_event.isSet():
403
+ if not self._cancel_event.is_set():
405
404
  self._rate_limiter.update_last_time_and_sleep()
406
405
  else:
407
406
  self._rate_limiter.update_last_time()