dt-extensions-sdk 1.5.1__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dt-extensions-sdk
3
- Version: 1.5.1
3
+ Version: 1.6.0
4
4
  Project-URL: Documentation, https://github.com/dynatrace-extensions/dt-extensions-python-sdk#readme
5
5
  Project-URL: Issues, https://github.com/dynatrace-extensions/dt-extensions-python-sdk/issues
6
6
  Project-URL: Source, https://github.com/dynatrace-extensions/dt-extensions-python-sdk
@@ -1,5 +1,5 @@
1
- dynatrace_extension/__about__.py,sha256=5K7C4flDpJDmdHw8pNksZ4c9GLxQrwNkYMwLMvckJ-I,110
2
- dynatrace_extension/__init__.py,sha256=BvQuknmA7ti3WJi3zEXZfY7aAxJrie37VNitWICsUvI,752
1
+ dynatrace_extension/__about__.py,sha256=ni-lNZjGGk8HaFm6WRu8u-nPm9ucCp8I4xM0ONtz83c,110
2
+ dynatrace_extension/__init__.py,sha256=fe3nw1UVecc0cGu9R7J82V1A0n7c-Z_Z4C_LuNXU6WU,813
3
3
  dynatrace_extension/cli/__init__.py,sha256=HCboY_eJPoqjFmoPDsBL8Jk6aNvank8K7JpkVrgwzUM,123
4
4
  dynatrace_extension/cli/main.py,sha256=OTjJ4XHJvvYXj10a7WFFHVNnkyECPg1ClW6Os8piN8k,20168
5
5
  dynatrace_extension/cli/schema.py,sha256=d8wKUodRiaU3hfSZDWVNpD15lBfhmif2oQ-k07IxcaA,3230
@@ -17,20 +17,21 @@ dynatrace_extension/cli/create/extension_template/extension_name/__init__.py.tem
17
17
  dynatrace_extension/cli/create/extension_template/extension_name/__main__.py.template,sha256=cS79GVxJB-V-gocu4ZOjmZ54HXJNg89eXdLf89zDHJQ,1249
18
18
  dynatrace_extension/sdk/__init__.py,sha256=RsqQ1heGyCmSK3fhuEKAcxQIRCg4gEK0-eSkIehL5Nc,86
19
19
  dynatrace_extension/sdk/activation.py,sha256=KIoPWMZs3tKiMG8XhCfeNgRlz2vxDKcAASgSACcEfIQ,1456
20
- dynatrace_extension/sdk/callback.py,sha256=--GyC5aDAhgRix8QLHHvp7KjYMIECTecy9jJWX0wyj8,6488
21
- dynatrace_extension/sdk/communication.py,sha256=_u3VdftaI8N59Qxjtn9H0pJetWMhhrC6fNLPEixHmFw,19142
20
+ dynatrace_extension/sdk/callback.py,sha256=K2jUacVU5FJZaeZgZPOMVLhXr_L39Sz_munYeMMm48w,6688
21
+ dynatrace_extension/sdk/communication.py,sha256=Kbar_SpeIJInQZ8oYXhuH6ZknZX89wfHKJQ2LevhFSw,23280
22
22
  dynatrace_extension/sdk/event.py,sha256=J261imbFKpxfuAQ6Nfu3RRcsIQKKivy6fme1nww2g-8,388
23
- dynatrace_extension/sdk/extension.py,sha256=2mFDECsqZziYLGuxY3sYoVyWfWlhgc0ZUYwuUiXJQaA,45192
23
+ dynatrace_extension/sdk/extension.py,sha256=WnC6VR3aixivaOgOJvokZTZEoQcEfOjelhJlH4wiuDo,48041
24
24
  dynatrace_extension/sdk/helper.py,sha256=m4gGHtIKYkfANC2MOGdxKUZlmH5tnZO6WTNqll27lyY,6476
25
25
  dynatrace_extension/sdk/metric.py,sha256=-kq7JWpk7UGvcjqafTt-o6k4urwhsGVXmnuQg7Sf9PQ,3622
26
26
  dynatrace_extension/sdk/runtime.py,sha256=7bC4gUJsVSHuL_E7r2EWrne95nm1BjZiMGkyNqA7ZCU,2796
27
27
  dynatrace_extension/sdk/snapshot.py,sha256=LnWVCtCK4NIEV3_kX-ly_LGHpNBSeErtsxCI1PH3L28,7521
28
+ dynatrace_extension/sdk/throttled_logger.py,sha256=JXDiHh8syl8R0gJ-wfxmmBqvGCBMQX4pxPkxscaCsXo,3292
28
29
  dynatrace_extension/sdk/vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
30
  dynatrace_extension/sdk/vendor/mureq/LICENSE,sha256=8AVcgZgiT_mvK1fOofXtRRr2f1dRXS_K21NuxQgP4VM,671
30
31
  dynatrace_extension/sdk/vendor/mureq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
32
  dynatrace_extension/sdk/vendor/mureq/mureq.py,sha256=znF4mvzk5L03CLNozRz8UpK-fMijmSkObDFwlbhwLUg,14656
32
- dt_extensions_sdk-1.5.1.dist-info/METADATA,sha256=ItVzENof2_FYkmDls53v9pCU8hexGrmaQDRkPc2BBuE,2721
33
- dt_extensions_sdk-1.5.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
34
- dt_extensions_sdk-1.5.1.dist-info/entry_points.txt,sha256=pweyOCgENGHjOlT6_kXYaBPOrE3p18K0UettqnNlnoE,55
35
- dt_extensions_sdk-1.5.1.dist-info/licenses/LICENSE.txt,sha256=3Zihv0lOVYHNfDkJC-tUAU6euP9r2NexsDW4w-zqgVk,1078
36
- dt_extensions_sdk-1.5.1.dist-info/RECORD,,
33
+ dt_extensions_sdk-1.6.0.dist-info/METADATA,sha256=ckrExeLigD1EqXceNjdUAsvIurRkCL9ziupMsXsl_QY,2721
34
+ dt_extensions_sdk-1.6.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
35
+ dt_extensions_sdk-1.6.0.dist-info/entry_points.txt,sha256=pweyOCgENGHjOlT6_kXYaBPOrE3p18K0UettqnNlnoE,55
36
+ dt_extensions_sdk-1.6.0.dist-info/licenses/LICENSE.txt,sha256=3Zihv0lOVYHNfDkJC-tUAU6euP9r2NexsDW4w-zqgVk,1078
37
+ dt_extensions_sdk-1.6.0.dist-info/RECORD,,
@@ -3,4 +3,4 @@
3
3
  # SPDX-License-Identifier: MIT
4
4
 
5
5
 
6
- __version__ = "1.5.1"
6
+ __version__ = "1.6.0"
@@ -6,7 +6,7 @@
6
6
  # ruff: noqa: F401
7
7
 
8
8
  from .sdk.activation import ActivationConfig, ActivationType
9
- from .sdk.communication import Status, StatusValue
9
+ from .sdk.communication import EndpointStatus, EndpointStatuses, IgnoreStatus, MultiStatus, Status, StatusValue
10
10
  from .sdk.event import Severity
11
11
  from .sdk.extension import DtEventType, Extension
12
12
  from .sdk.helper import (
@@ -9,7 +9,7 @@ from datetime import datetime, timedelta
9
9
  from timeit import default_timer as timer
10
10
 
11
11
  from .activation import ActivationType
12
- from .communication import MultiStatus, Status, StatusValue
12
+ from .communication import EndpointStatuses, IgnoreStatus, MultiStatus, Status, StatusValue
13
13
 
14
14
 
15
15
  class WrappedCallback:
@@ -65,6 +65,10 @@ class WrappedCallback:
65
65
  self.status = ret
66
66
  elif isinstance(ret, MultiStatus):
67
67
  self.status = ret.build()
68
+ elif isinstance(ret, EndpointStatuses):
69
+ self.status = ret
70
+ elif isinstance(ret, IgnoreStatus):
71
+ self.status = ret
68
72
  else:
69
73
  self.status = Status(StatusValue.OK)
70
74
  except Exception as e:
@@ -12,6 +12,7 @@ from collections.abc import Generator, Sequence
12
12
  from dataclasses import dataclass
13
13
  from enum import Enum
14
14
  from pathlib import Path
15
+ from threading import RLock
15
16
  from typing import Any, TypeVar
16
17
 
17
18
  from .vendor.mureq.mureq import HTTPException, Response, request
@@ -38,9 +39,14 @@ class StatusValue(Enum):
38
39
  INVALID_CONFIG_ERROR = "INVALID_CONFIG_ERROR"
39
40
  AUTHENTICATION_ERROR = "AUTHENTICATION_ERROR"
40
41
  DEVICE_CONNECTION_ERROR = "DEVICE_CONNECTION_ERROR"
42
+ WARNING = "WARNING"
41
43
  UNKNOWN_ERROR = "UNKNOWN_ERROR"
42
44
 
43
45
 
46
+ class IgnoreStatus:
47
+ pass
48
+
49
+
44
50
  class Status:
45
51
  def __init__(self, status: StatusValue = StatusValue.EMPTY, message: str = "", timestamp: int | None = None):
46
52
  self.status = status
@@ -57,12 +63,16 @@ class Status:
57
63
  return json.dumps(self.to_json())
58
64
 
59
65
  def is_error(self) -> bool:
66
+ # WARNING is treated as an error
60
67
  return self.status not in (StatusValue.OK, StatusValue.EMPTY)
61
68
 
69
+ def is_warning(self) -> bool:
70
+ return self.status == StatusValue.WARNING
71
+
62
72
 
63
73
  class MultiStatus:
64
74
  def __init__(self):
65
- self.statuses = []
75
+ self.statuses: list[Status] = []
66
76
 
67
77
  def add_status(self, status: StatusValue, message):
68
78
  self.statuses.append(Status(status, message))
@@ -73,15 +83,120 @@ class MultiStatus:
73
83
  return ret
74
84
 
75
85
  messages = []
86
+ all_ok = True
87
+ all_err = True
88
+ any_warning = False
89
+
76
90
  for stored_status in self.statuses:
77
- print(stored_status) # noqa: T201
91
+ if stored_status.message != "":
92
+ messages.append(stored_status.message)
93
+
94
+ if stored_status.is_warning():
95
+ any_warning = True
96
+
78
97
  if stored_status.is_error():
79
- ret.status = stored_status.status
80
- messages.append(stored_status.message)
81
- ret.message = "\n".join(messages)
98
+ all_ok = False
99
+ else:
100
+ all_err = False
101
+
102
+ ret.message = ", ".join(messages)
103
+
104
+ if any_warning:
105
+ ret.status = StatusValue.WARNING
106
+ elif all_ok:
107
+ ret.status = StatusValue.OK
108
+ elif all_err:
109
+ ret.status = StatusValue.GENERIC_ERROR
110
+ else:
111
+ ret.status = StatusValue.WARNING
112
+
82
113
  return ret
83
114
 
84
115
 
116
+ class EndpointStatus:
117
+ def __init__(self, endpoint_hint: str, short_status: StatusValue, message: str):
118
+ self.endpoint = endpoint_hint
119
+ self.status: StatusValue = short_status
120
+ self.message = message
121
+
122
+ def __str__(self):
123
+ return str(self.__dict__)
124
+
125
+
126
+ class EndpointStatuses:
127
+ class TooManyEndpointStatusesError(Exception):
128
+ pass
129
+
130
+ class MergeConflictError(Exception):
131
+ def __init__(self, first: EndpointStatus, second: EndpointStatus):
132
+ super().__init__(f"Endpoint Statuses conflict while merging - first: {first}; second: {second}")
133
+
134
+ def __init__(self, total_endpoints_number: int):
135
+ self._lock = RLock()
136
+ self._faulty_endpoints: dict[str, EndpointStatus] = {}
137
+ self._num_endpoints = total_endpoints_number
138
+
139
+ def add_endpoint_status(self, status: EndpointStatus):
140
+ with self._lock:
141
+ if status.status == StatusValue.OK:
142
+ self.clear_endpoint_error(status.endpoint)
143
+ else:
144
+ if len(self._faulty_endpoints) == self._num_endpoints:
145
+ message = "Cannot add another endpoint status. \
146
+ The number of reported statuses already has reached preconfigured maximum of {self._num_endpoints} endpoints."
147
+ raise EndpointStatuses.TooManyEndpointStatusesError(message)
148
+
149
+ self._faulty_endpoints[status.endpoint] = status
150
+
151
+ def clear_endpoint_error(self, endpoint_hint: str):
152
+ with self._lock:
153
+ try:
154
+ del self._faulty_endpoints[endpoint_hint]
155
+ except KeyError:
156
+ pass
157
+
158
+ def merge(self, other: EndpointStatuses):
159
+ with self._lock:
160
+ with other._lock:
161
+ self._num_endpoints += other._num_endpoints
162
+
163
+ for endpoint, status in other._faulty_endpoints.items():
164
+ if endpoint not in self._faulty_endpoints.keys():
165
+ self._faulty_endpoints[endpoint] = status
166
+ else:
167
+ self._num_endpoints -= 1
168
+ raise EndpointStatuses.MergeConflictError(
169
+ self._faulty_endpoints[endpoint], other._faulty_endpoints[endpoint]
170
+ )
171
+
172
+ def build_common_status(self) -> Status:
173
+ with self._lock:
174
+ ok_count = self._num_endpoints - len(self._faulty_endpoints)
175
+ nok_count = len(self._faulty_endpoints)
176
+
177
+ if nok_count == 0:
178
+ return Status(StatusValue.OK, f"Endpoints OK: {self._num_endpoints} NOK: 0")
179
+
180
+ error_messages = []
181
+ for ep_status in self._faulty_endpoints.values():
182
+ error_messages.append(f"{ep_status.endpoint} - {ep_status.status.value} {ep_status.message}")
183
+ common_msg = ", ".join(error_messages)
184
+
185
+ # Determine status value
186
+ all_endpoints_faulty = nok_count == self._num_endpoints
187
+ has_warning_status = StatusValue.WARNING in [
188
+ ep_status.status for ep_status in self._faulty_endpoints.values()
189
+ ]
190
+
191
+ if all_endpoints_faulty and not has_warning_status:
192
+ status_value = StatusValue.GENERIC_ERROR
193
+ else:
194
+ status_value = StatusValue.WARNING
195
+
196
+ message = f"Endpoints OK: {ok_count} NOK: {nok_count} NOK_reported_errors: {common_msg}"
197
+ return Status(status=status_value, message=message)
198
+
199
+
85
200
  class CommunicationClient(ABC):
86
201
  """
87
202
  Abstract class for extension communication
@@ -20,11 +20,20 @@ from typing import Any, ClassVar, NamedTuple
20
20
 
21
21
  from .activation import ActivationConfig, ActivationType
22
22
  from .callback import WrappedCallback
23
- from .communication import CommunicationClient, DebugClient, HttpClient, Status, StatusValue
23
+ from .communication import (
24
+ CommunicationClient,
25
+ DebugClient,
26
+ EndpointStatuses,
27
+ HttpClient,
28
+ IgnoreStatus,
29
+ Status,
30
+ StatusValue,
31
+ )
24
32
  from .event import Severity
25
33
  from .metric import Metric, MetricType, SfmMetric, SummaryStat
26
34
  from .runtime import RuntimeProperties
27
35
  from .snapshot import Snapshot
36
+ from .throttled_logger import StrictThrottledHandler, ThrottledHandler
28
37
 
29
38
  HEARTBEAT_INTERVAL = timedelta(seconds=50)
30
39
  METRIC_SENDING_INTERVAL = timedelta(seconds=30)
@@ -51,6 +60,33 @@ extension_logger.setLevel(logging.INFO)
51
60
  extension_logger.addHandler(error_handler)
52
61
  extension_logger.addHandler(std_handler)
53
62
 
63
+ throttled_err_handler = ThrottledHandler(sys.stderr)
64
+ throttled_err_handler.addFilter(lambda record: record.levelno >= logging.ERROR)
65
+ throttled_err_handler.setFormatter(formatter)
66
+
67
+ throttled_std_handler = ThrottledHandler(sys.stdout)
68
+ throttled_std_handler.addFilter(lambda record: record.levelno < logging.ERROR)
69
+ throttled_std_handler.setFormatter(formatter)
70
+
71
+ throttled_logger = logging.getLogger(f"THROTTLED_{__name__}")
72
+ throttled_logger.setLevel(logging.INFO)
73
+ throttled_logger.addHandler(throttled_err_handler)
74
+ throttled_logger.addHandler(throttled_std_handler)
75
+
76
+ strict_throttled_err_handler = StrictThrottledHandler(sys.stderr)
77
+ strict_throttled_err_handler.addFilter(lambda record: record.levelno >= logging.ERROR)
78
+ strict_throttled_err_handler.setFormatter(formatter)
79
+
80
+ strict_throttled_std_handler = StrictThrottledHandler(sys.stdout)
81
+ strict_throttled_std_handler.addFilter(lambda record: record.levelno < logging.ERROR)
82
+ strict_throttled_std_handler.setFormatter(formatter)
83
+
84
+ strict_throttled_logger = logging.getLogger(f"STRICT_THROTTLED_{__name__}")
85
+ strict_throttled_logger.setLevel(logging.INFO)
86
+ strict_throttled_logger.addHandler(strict_throttled_err_handler)
87
+ strict_throttled_logger.addHandler(strict_throttled_std_handler)
88
+
89
+
54
90
  api_logger = logging.getLogger("api")
55
91
  api_logger.setLevel(logging.INFO)
56
92
  api_logger.addHandler(error_handler)
@@ -164,6 +200,8 @@ class Extension:
164
200
  return
165
201
 
166
202
  self.logger = extension_logger
203
+ self.throttled_logger = throttled_logger
204
+ self.strict_throttled_logger = strict_throttled_logger
167
205
  self.logger.name = name
168
206
 
169
207
  self.extension_config: str = ""
@@ -380,7 +418,7 @@ class Extension:
380
418
  Optional method that can be implemented by subclasses.
381
419
  The query method is always scheduled to run every minute.
382
420
  """
383
- pass
421
+ return IgnoreStatus()
384
422
 
385
423
  def initialize(self):
386
424
  """Callback to be executed when the extension starts.
@@ -458,10 +496,10 @@ class Extension:
458
496
  key: str,
459
497
  value: float | str | int | SummaryStat,
460
498
  dimensions: dict[str, str] | None = None,
461
- device_address: str | None = None,
462
499
  techrule: str | None = None,
463
500
  timestamp: datetime | None = None,
464
501
  metric_type: MetricType = MetricType.GAUGE,
502
+ device_address: str | None = None,
465
503
  ) -> None:
466
504
  """Report a metric.
467
505
 
@@ -980,33 +1018,77 @@ class Extension:
980
1018
  )
981
1019
 
982
1020
  def _build_current_status(self):
983
- overall_status = Status(StatusValue.OK)
984
-
985
1021
  if self._initialization_error:
986
- overall_status.status = StatusValue.GENERIC_ERROR
987
- overall_status.message = self._initialization_error
988
- return overall_status
1022
+ return Status(StatusValue.OK, self._initialization_error)
989
1023
 
990
- internal_callback_error = False
991
1024
  messages = []
1025
+
1026
+ # Check for internal errors
992
1027
  with self._internal_callbacks_results_lock:
1028
+ overall_status_value = Status(StatusValue.OK)
1029
+ internal_callback_error = False
1030
+
993
1031
  for callback, result in self._internal_callbacks_results.items():
994
1032
  if result.is_error():
995
1033
  internal_callback_error = True
996
- overall_status.status = result.status
997
- messages.append(f"{callback}: {result.message}")
1034
+ overall_status_value = result.status
1035
+ messages.append(f"{callback}: {result.status} - {result.message}")
1036
+
998
1037
  if internal_callback_error:
999
- overall_status.message = "\n".join(messages)
1000
- return overall_status
1038
+ return Status(overall_status_value, "\n".join(messages))
1039
+
1040
+ # Handle regular statuses, merge all EndpointStatuses
1041
+ ep_status_merged = EndpointStatuses(0)
1042
+ all_ok = True
1043
+ all_err = True
1044
+ any_warning = False
1001
1045
 
1002
1046
  for callback in self._scheduled_callbacks:
1003
- if callback.status.is_error():
1004
- overall_status.status = callback.status.status
1005
- messages.append(f"{callback}: {callback.status.message}")
1047
+ if isinstance(callback.status, IgnoreStatus):
1048
+ continue
1049
+
1050
+ if isinstance(callback.status, EndpointStatuses):
1051
+ try:
1052
+ ep_status_merged.merge(callback.status)
1053
+ except EndpointStatuses.MergeConflictError as e:
1054
+ self.logger.exception(e)
1006
1055
  continue
1007
- if callback.status.message is not None and callback.status.message != "":
1008
- messages.append(f"{callback}: {callback.status.message}")
1009
- overall_status.message = "\n".join(messages)
1056
+
1057
+ if callback.status.is_warning():
1058
+ any_warning = True
1059
+
1060
+ if callback.status.is_error():
1061
+ all_ok = False
1062
+ else:
1063
+ all_err = False
1064
+
1065
+ if callback.status.is_error() or (callback.status.message is not None and callback.status.message != ""):
1066
+ messages.append(f"{callback.name()}: {callback.status.status.value} - {callback.status.message}")
1067
+
1068
+ # Handle merged EndpointStatuses
1069
+ if ep_status_merged._num_endpoints > 0:
1070
+ ep_status_merged = ep_status_merged.build_common_status()
1071
+ messages.insert(0, ep_status_merged.message)
1072
+
1073
+ if ep_status_merged.is_warning():
1074
+ any_warning = True
1075
+
1076
+ if ep_status_merged.is_error():
1077
+ all_ok = False
1078
+ else:
1079
+ all_err = False
1080
+
1081
+ # Build overall status
1082
+ overall_status = Status(StatusValue.OK, "\n".join(messages))
1083
+ if any_warning:
1084
+ overall_status.status = StatusValue.WARNING
1085
+ elif all_ok:
1086
+ overall_status.status = StatusValue.OK
1087
+ elif all_err:
1088
+ overall_status.status = StatusValue.GENERIC_ERROR
1089
+ else:
1090
+ overall_status.status = StatusValue.WARNING
1091
+
1010
1092
  return overall_status
1011
1093
 
1012
1094
  def _update_cluster_time_diff(self):
@@ -0,0 +1,68 @@
1
+ import logging
2
+ import threading
3
+ import time
4
+
5
+
6
+ class ThrottledHandler(logging.StreamHandler):
7
+ def __init__(self, stream, log_repeat_interval=3600, cache_clean_interval=3600):
8
+ super().__init__(stream)
9
+ self.record_to_last_print_time = {}
10
+ self.log_repeat_interval = log_repeat_interval
11
+ self.record_cache_last_clean_timestamp = time.time()
12
+ self.record_cache_clean_interval = cache_clean_interval
13
+ self.record_cache_clean_lock = threading.Lock()
14
+
15
+ def emit(self, record):
16
+ current_time = time.time()
17
+ if record.msg not in self.record_to_last_print_time.keys():
18
+ super().emit(record)
19
+ self.record_to_last_print_time[record.msg] = current_time
20
+ return
21
+ last_print_time = self.record_to_last_print_time[record.msg]
22
+ if (last_print_time - current_time) > self.log_repeat_interval:
23
+ self.record_to_last_print_time[record.msg] = current_time
24
+ super().emit(record)
25
+ self.cleanup(current_time)
26
+
27
+ def cleanup(self, current_time):
28
+ if (self.record_cache_last_clean_timestamp - current_time) > self.record_cache_clean_interval:
29
+ with self.record_cache_clean_lock:
30
+ keys_to_remove = []
31
+ for record_message, last_print_time in self.record_to_last_print_time.items():
32
+ if (current_time - last_print_time) > self.log_repeat_interval:
33
+ keys_to_remove.append(record_message)
34
+ for key in keys_to_remove:
35
+ del self.record_to_last_print_time[key]
36
+
37
+
38
+ class StrictThrottledHandler(logging.StreamHandler):
39
+ def __init__(self, stream, log_repeat_interval=3600, cache_clean_interval=3600):
40
+ super().__init__(stream)
41
+ self.record_to_last_print_time = {}
42
+ self.log_repeat_interval = log_repeat_interval
43
+ self.record_cache_last_clean_timestamp = time.time()
44
+ self.record_cache_clean_interval = cache_clean_interval
45
+ self.record_cache_clean_lock = threading.Lock()
46
+
47
+ def emit(self, record):
48
+ current_time = time.time()
49
+ log_identifier = f"{record.filename}:{record.lineno}"
50
+ if log_identifier not in self.record_to_last_print_time.keys():
51
+ super().emit(record)
52
+ self.record_to_last_print_time[log_identifier] = current_time
53
+ return
54
+ last_print_time = self.record_to_last_print_time[log_identifier]
55
+ if (last_print_time - current_time) > self.log_repeat_interval:
56
+ self.record_to_last_print_time[log_identifier] = current_time
57
+ super().emit(record)
58
+ self.cleanup(current_time)
59
+
60
+ def cleanup(self, current_time):
61
+ if (self.record_cache_last_clean_timestamp - current_time) > self.record_cache_clean_interval:
62
+ with self.record_cache_clean_lock:
63
+ keys_to_remove = []
64
+ for record_message, last_print_time in self.record_to_last_print_time.items():
65
+ if (current_time - last_print_time) > self.log_repeat_interval:
66
+ keys_to_remove.append(record_message)
67
+ for key in keys_to_remove:
68
+ del self.record_to_last_print_time[key]