qena-shared-lib 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qena_shared_lib/__init__.py +27 -0
- qena_shared_lib/application.py +190 -0
- qena_shared_lib/background.py +109 -0
- qena_shared_lib/dependencies/__init__.py +19 -0
- qena_shared_lib/dependencies/http.py +62 -0
- qena_shared_lib/dependencies/miscellaneous.py +35 -0
- qena_shared_lib/exception_handlers.py +165 -0
- qena_shared_lib/exceptions.py +319 -0
- qena_shared_lib/http.py +631 -0
- qena_shared_lib/logging.py +63 -0
- qena_shared_lib/logstash/__init__.py +17 -0
- qena_shared_lib/logstash/_base.py +573 -0
- qena_shared_lib/logstash/_http_sender.py +61 -0
- qena_shared_lib/logstash/_tcp_sender.py +84 -0
- qena_shared_lib/py.typed +0 -0
- qena_shared_lib/rabbitmq/__init__.py +52 -0
- qena_shared_lib/rabbitmq/_base.py +741 -0
- qena_shared_lib/rabbitmq/_channel.py +196 -0
- qena_shared_lib/rabbitmq/_exception_handlers.py +159 -0
- qena_shared_lib/rabbitmq/_exceptions.py +46 -0
- qena_shared_lib/rabbitmq/_listener.py +1292 -0
- qena_shared_lib/rabbitmq/_pool.py +74 -0
- qena_shared_lib/rabbitmq/_publisher.py +73 -0
- qena_shared_lib/rabbitmq/_rpc_client.py +286 -0
- qena_shared_lib/rabbitmq/_utils.py +18 -0
- qena_shared_lib/scheduler.py +402 -0
- qena_shared_lib/security.py +205 -0
- qena_shared_lib/utils.py +28 -0
- qena_shared_lib-0.1.0.dist-info/METADATA +473 -0
- qena_shared_lib-0.1.0.dist-info/RECORD +31 -0
- qena_shared_lib-0.1.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,17 @@
|
|
1
|
+
from ._base import (
|
2
|
+
BaseLogstashSender,
|
3
|
+
LogLevel,
|
4
|
+
LogstashLogRecord,
|
5
|
+
SenderResponse,
|
6
|
+
)
|
7
|
+
from ._http_sender import HTTPSender
|
8
|
+
from ._tcp_sender import TCPSender
|
9
|
+
|
10
|
+
__all__ = [
|
11
|
+
"BaseLogstashSender",
|
12
|
+
"HTTPSender",
|
13
|
+
"LogLevel",
|
14
|
+
"LogstashLogRecord",
|
15
|
+
"SenderResponse",
|
16
|
+
"TCPSender",
|
17
|
+
]
|
@@ -0,0 +1,573 @@
|
|
1
|
+
from asyncio import (
|
2
|
+
Future,
|
3
|
+
Queue,
|
4
|
+
QueueFull,
|
5
|
+
Task,
|
6
|
+
gather,
|
7
|
+
)
|
8
|
+
from dataclasses import dataclass
|
9
|
+
from enum import Enum
|
10
|
+
from sys import exc_info
|
11
|
+
from traceback import format_exception
|
12
|
+
|
13
|
+
from prometheus_client import Counter
|
14
|
+
from prometheus_client import Enum as PrometheusEnum
|
15
|
+
|
16
|
+
from ..logging import LoggerProvider
|
17
|
+
from ..utils import AsyncEventLoopMixin
|
18
|
+
|
19
|
+
__all__ = [
|
20
|
+
"BaseLogstashSender",
|
21
|
+
"LogLevel",
|
22
|
+
"LogstashLogRecord",
|
23
|
+
]
|
24
|
+
|
25
|
+
|
26
|
+
class LogLevel(Enum):
|
27
|
+
DEBUG = 0
|
28
|
+
INFO = 1
|
29
|
+
WARNING = 2
|
30
|
+
ERROR = 3
|
31
|
+
|
32
|
+
|
33
|
+
class LogstashLogRecord:
|
34
|
+
def __init__(
|
35
|
+
self,
|
36
|
+
message: str,
|
37
|
+
service_name: str,
|
38
|
+
log_level: LogLevel,
|
39
|
+
log_logger: str,
|
40
|
+
):
|
41
|
+
self._message = message
|
42
|
+
self._service_name = service_name
|
43
|
+
self._log_level = log_level
|
44
|
+
self._log_logger = log_logger
|
45
|
+
self._tags = None
|
46
|
+
self._labels = None
|
47
|
+
self._error_type = None
|
48
|
+
self._error_message = None
|
49
|
+
self._error_stack_trace = None
|
50
|
+
self._log_retries = 0
|
51
|
+
|
52
|
+
@property
|
53
|
+
def message(self) -> str:
|
54
|
+
return self._message
|
55
|
+
|
56
|
+
@property
|
57
|
+
def service_name(self) -> str:
|
58
|
+
return self._service_name
|
59
|
+
|
60
|
+
@property
|
61
|
+
def log_level(self) -> LogLevel:
|
62
|
+
return self._log_level
|
63
|
+
|
64
|
+
@property
|
65
|
+
def log_logger(self) -> str:
|
66
|
+
return self._log_logger
|
67
|
+
|
68
|
+
@property
|
69
|
+
def tags(self) -> list[str] | None:
|
70
|
+
return self._tags
|
71
|
+
|
72
|
+
@tags.setter
|
73
|
+
def tags(self, tags: list[str]):
|
74
|
+
self._tags = tags
|
75
|
+
|
76
|
+
@property
|
77
|
+
def labels(self) -> dict[str, str] | None:
|
78
|
+
return self._labels
|
79
|
+
|
80
|
+
@labels.setter
|
81
|
+
def labels(self, labels: dict[str, str]):
|
82
|
+
self._labels = labels
|
83
|
+
|
84
|
+
@property
|
85
|
+
def error(self) -> tuple[str | None, str | None, str | None]:
|
86
|
+
return self._error_type, self._error_message, self._error_stack_trace
|
87
|
+
|
88
|
+
@error.setter
|
89
|
+
def error(self, exception: BaseException):
|
90
|
+
self._error_type = type(exception).__name__
|
91
|
+
self._error_message = str(exception)
|
92
|
+
|
93
|
+
if exception.__traceback__ is not None:
|
94
|
+
self._error_stack_trace = "".join(format_exception(exception))
|
95
|
+
|
96
|
+
if self._labels is None:
|
97
|
+
self._labels = {}
|
98
|
+
|
99
|
+
self._labels.update(self._extract_exception_cause(exception))
|
100
|
+
|
101
|
+
def _extract_exception_cause(
|
102
|
+
self, exception: BaseException
|
103
|
+
) -> dict[str, str]:
|
104
|
+
causes = {}
|
105
|
+
cause = exception.__cause__
|
106
|
+
cause_depth = 0
|
107
|
+
|
108
|
+
while cause is not None:
|
109
|
+
cause_depth += 1
|
110
|
+
causes[self._depth_to_cause(cause_depth)] = cause.__class__.__name__
|
111
|
+
cause = cause.__cause__
|
112
|
+
|
113
|
+
return causes
|
114
|
+
|
115
|
+
def _depth_to_cause(self, depth: int) -> str:
|
116
|
+
match depth:
|
117
|
+
case 1:
|
118
|
+
return "causeOne"
|
119
|
+
case 2:
|
120
|
+
return "causeTwo"
|
121
|
+
case 3:
|
122
|
+
return "causeThree"
|
123
|
+
case 4:
|
124
|
+
return "causeFour"
|
125
|
+
case 5:
|
126
|
+
return "causeFive"
|
127
|
+
case 6:
|
128
|
+
return "causeSix"
|
129
|
+
case 7:
|
130
|
+
return "causeSeven"
|
131
|
+
case 8:
|
132
|
+
return "causeEight"
|
133
|
+
case 9:
|
134
|
+
return "causeNine"
|
135
|
+
case _:
|
136
|
+
return "causeN"
|
137
|
+
|
138
|
+
def __str__(self) -> str:
|
139
|
+
return f"level `{self._log_level.name}`, message `{self._message}`"
|
140
|
+
|
141
|
+
def __repr__(self) -> str:
|
142
|
+
return (
|
143
|
+
"LogstashLogRecord (\n\tlevel : `%s`,\n\tmessage : `%s`,\n\ttags : %s,\n\tlabel : %s,\n\terror_type : `%s`,\n\terror_message: `%s`\n)%s"
|
144
|
+
% (
|
145
|
+
self._log_level.name,
|
146
|
+
self._message,
|
147
|
+
self._tags or [],
|
148
|
+
self._labels or {},
|
149
|
+
self._error_type or "None",
|
150
|
+
self._error_message or "None",
|
151
|
+
f"\n{self._error_stack_trace}"
|
152
|
+
if self._error_stack_trace is not None
|
153
|
+
else "",
|
154
|
+
)
|
155
|
+
)
|
156
|
+
|
157
|
+
@property
|
158
|
+
def log_retries(self) -> int:
|
159
|
+
return self._log_retries
|
160
|
+
|
161
|
+
@log_retries.setter
|
162
|
+
def log_retries(self, log_retries: int):
|
163
|
+
self._log_retries = log_retries
|
164
|
+
|
165
|
+
def to_dict(self) -> dict:
|
166
|
+
log: dict[str, str | list | dict[str, str]] = {
|
167
|
+
"message": self._message,
|
168
|
+
"service.name": self._service_name,
|
169
|
+
"log.level": self._log_level.name.lower(),
|
170
|
+
"log.logger": self._log_logger,
|
171
|
+
}
|
172
|
+
|
173
|
+
if self._tags is not None:
|
174
|
+
log["tags"] = self._tags
|
175
|
+
|
176
|
+
if self._labels is not None:
|
177
|
+
log["labels"] = self._labels
|
178
|
+
|
179
|
+
if self._error_type is not None:
|
180
|
+
log["error.type"] = self._error_type
|
181
|
+
|
182
|
+
if self._error_message is not None:
|
183
|
+
log["error.message"] = self._error_message
|
184
|
+
|
185
|
+
if self._error_stack_trace is not None:
|
186
|
+
log["error.stack_trace"] = self._error_stack_trace
|
187
|
+
|
188
|
+
return log
|
189
|
+
|
190
|
+
|
191
|
+
@dataclass
|
192
|
+
class SenderResponse:
|
193
|
+
sent: bool
|
194
|
+
reason: str | None = None
|
195
|
+
should_retry: bool | None = None
|
196
|
+
|
197
|
+
|
198
|
+
class EndOfLogMarker:
|
199
|
+
pass
|
200
|
+
|
201
|
+
|
202
|
+
class BaseLogstashSender(AsyncEventLoopMixin):
|
203
|
+
LOGSTASH_LOGS = Counter(
|
204
|
+
name="successful_logstash_logs",
|
205
|
+
documentation="Successfully sent logstash log count",
|
206
|
+
labelnames=["log_level"],
|
207
|
+
)
|
208
|
+
FAILED_LOGSTASH_LOGS = Counter(
|
209
|
+
name="failed_logstash_logs",
|
210
|
+
documentation="Failed logstash log count",
|
211
|
+
labelnames=["log_level", "exception"],
|
212
|
+
)
|
213
|
+
LOGSTASH_SENDER_STATE = PrometheusEnum(
|
214
|
+
name="logstash_sender_state",
|
215
|
+
documentation="Logstash sender state",
|
216
|
+
states=["running", "stopped"],
|
217
|
+
)
|
218
|
+
|
219
|
+
def __init__(
|
220
|
+
self,
|
221
|
+
service_name: str,
|
222
|
+
max_log_retry: int = 5,
|
223
|
+
log_queue_size: int = 1000,
|
224
|
+
failed_log_queue_size: int = 1000,
|
225
|
+
):
|
226
|
+
self._sender = f"qena_shared_lib.logstash.{self.__class__.__name__}"
|
227
|
+
self._service_name = service_name
|
228
|
+
self._max_log_retry = max_log_retry
|
229
|
+
self._started = False
|
230
|
+
self._closed = False
|
231
|
+
self._close_future = self.loop.create_future()
|
232
|
+
self._log_queue: Queue[LogstashLogRecord | EndOfLogMarker] = Queue(
|
233
|
+
log_queue_size
|
234
|
+
)
|
235
|
+
self._dead_letter_log_queue: Queue[
|
236
|
+
LogstashLogRecord | EndOfLogMarker
|
237
|
+
] = Queue(failed_log_queue_size)
|
238
|
+
self._level = LogLevel.INFO
|
239
|
+
self._logger = LoggerProvider.default().get_logger(
|
240
|
+
f"logstash.{self.__class__.__name__.lower()}"
|
241
|
+
)
|
242
|
+
|
243
|
+
async def start(self):
|
244
|
+
if self._started:
|
245
|
+
raise RuntimeError("logstash sender already started")
|
246
|
+
|
247
|
+
self._started = True
|
248
|
+
self._closed = False
|
249
|
+
_, _ = await gather(
|
250
|
+
self.loop.run_in_executor(executor=None, func=self._hook_on_start),
|
251
|
+
self._hook_on_start_async(),
|
252
|
+
)
|
253
|
+
|
254
|
+
self.loop.create_task(self._flush_logs()).add_done_callback(
|
255
|
+
self._on_log_flusher_closed
|
256
|
+
)
|
257
|
+
self._logger.info(
|
258
|
+
"logstash logger `%s` started accepting logs",
|
259
|
+
self.__class__.__name__,
|
260
|
+
)
|
261
|
+
self.LOGSTASH_SENDER_STATE.state("running")
|
262
|
+
|
263
|
+
def _hook_on_start(self):
|
264
|
+
pass
|
265
|
+
|
266
|
+
async def _hook_on_start_async(self):
|
267
|
+
pass
|
268
|
+
|
269
|
+
def _on_log_flusher_closed(self, task: Task):
|
270
|
+
if task.cancelled():
|
271
|
+
self._close_future.set_result(None)
|
272
|
+
|
273
|
+
return
|
274
|
+
|
275
|
+
exception = task.exception()
|
276
|
+
|
277
|
+
if exception is not None:
|
278
|
+
self._close_future.set_exception(exception)
|
279
|
+
|
280
|
+
return
|
281
|
+
|
282
|
+
gather(
|
283
|
+
self.loop.run_in_executor(executor=None, func=self._hook_on_stop),
|
284
|
+
self._hook_on_stop_async(),
|
285
|
+
).add_done_callback(self._on_close_hook_done)
|
286
|
+
|
287
|
+
def _on_close_hook_done(self, task_or_future: Task | Future):
|
288
|
+
if task_or_future.cancelled():
|
289
|
+
self._close_future.set_result(None)
|
290
|
+
|
291
|
+
return
|
292
|
+
|
293
|
+
exception = task_or_future.exception()
|
294
|
+
|
295
|
+
if exception is not None:
|
296
|
+
self._close_future.set_exception(exception)
|
297
|
+
|
298
|
+
return
|
299
|
+
|
300
|
+
self._close_future.set_result(None)
|
301
|
+
self._logger.debug(
|
302
|
+
"logstash http logger closed, will no longer accept logs"
|
303
|
+
)
|
304
|
+
|
305
|
+
def stop(self) -> Future:
|
306
|
+
if self._closed:
|
307
|
+
raise RuntimeError("logstash sender already closed")
|
308
|
+
|
309
|
+
self._closed = True
|
310
|
+
self._started = False
|
311
|
+
|
312
|
+
try:
|
313
|
+
self._log_queue.put_nowait(EndOfLogMarker())
|
314
|
+
self._dead_letter_log_queue.put_nowait(EndOfLogMarker())
|
315
|
+
except QueueFull:
|
316
|
+
pass
|
317
|
+
|
318
|
+
self._close_future.add_done_callback(self._on_close_future_done)
|
319
|
+
|
320
|
+
return self._close_future
|
321
|
+
|
322
|
+
def _on_close_future_done(self, _):
|
323
|
+
self.LOGSTASH_SENDER_STATE.state("stopped")
|
324
|
+
|
325
|
+
def _hook_on_stop(self):
|
326
|
+
pass
|
327
|
+
|
328
|
+
async def _hook_on_stop_async(self):
|
329
|
+
pass
|
330
|
+
|
331
|
+
async def _flush_logs(self):
|
332
|
+
while (
|
333
|
+
not self._closed
|
334
|
+
or not self._log_queue.empty()
|
335
|
+
or not self._dead_letter_log_queue.empty()
|
336
|
+
):
|
337
|
+
log = None
|
338
|
+
|
339
|
+
if not self._dead_letter_log_queue.empty():
|
340
|
+
log = await self._dead_letter_log_queue.get()
|
341
|
+
|
342
|
+
if isinstance(log, LogstashLogRecord):
|
343
|
+
if log.log_retries >= self._max_log_retry:
|
344
|
+
self._logger.exception(
|
345
|
+
"failed to send log too many times, falling back to stdout or stderr. \n%r",
|
346
|
+
log,
|
347
|
+
)
|
348
|
+
|
349
|
+
continue
|
350
|
+
|
351
|
+
log.log_retries += 1
|
352
|
+
|
353
|
+
if log is None:
|
354
|
+
log = await self._log_queue.get()
|
355
|
+
|
356
|
+
if isinstance(log, EndOfLogMarker):
|
357
|
+
if (
|
358
|
+
not self._log_queue.empty()
|
359
|
+
or not self._dead_letter_log_queue.empty()
|
360
|
+
):
|
361
|
+
continue
|
362
|
+
|
363
|
+
break
|
364
|
+
|
365
|
+
try:
|
366
|
+
sender_response = await self._send(log)
|
367
|
+
except Exception as e:
|
368
|
+
self._put_to_dead_letter_log_queue(log)
|
369
|
+
self._logger.exception(
|
370
|
+
"error occurred while sending log to logstash"
|
371
|
+
)
|
372
|
+
self.FAILED_LOGSTASH_LOGS.labels(
|
373
|
+
log_level=log.log_level.name, exception=e.__class__.__name__
|
374
|
+
).inc()
|
375
|
+
|
376
|
+
continue
|
377
|
+
|
378
|
+
if not sender_response.sent:
|
379
|
+
if (
|
380
|
+
sender_response.should_retry is None
|
381
|
+
or sender_response.should_retry
|
382
|
+
):
|
383
|
+
self._put_to_dead_letter_log_queue(log)
|
384
|
+
else:
|
385
|
+
self._logger.error(
|
386
|
+
"failed log wasn't requeued, falling back to stdout or stderr.\n%r",
|
387
|
+
log,
|
388
|
+
)
|
389
|
+
|
390
|
+
self._logger.warning(
|
391
|
+
"log wasn't sent successfully, reason : %s",
|
392
|
+
sender_response.reason or "No reason",
|
393
|
+
)
|
394
|
+
else:
|
395
|
+
self.LOGSTASH_LOGS.labels(log_level=log.log_level.name).inc()
|
396
|
+
self._logger.debug("log sent to logstash.\n%r", log)
|
397
|
+
|
398
|
+
async def _send(self, log: LogstashLogRecord) -> SenderResponse:
|
399
|
+
del log
|
400
|
+
|
401
|
+
raise NotImplementedError()
|
402
|
+
|
403
|
+
def _put_to_dead_letter_log_queue(self, log: LogstashLogRecord):
|
404
|
+
if self._closed:
|
405
|
+
self._logger.error(
|
406
|
+
"%s logger closed, falling back to stdout or stderr.\n%r",
|
407
|
+
self._sender,
|
408
|
+
log,
|
409
|
+
)
|
410
|
+
|
411
|
+
return
|
412
|
+
|
413
|
+
try:
|
414
|
+
self._dead_letter_log_queue.put_nowait(log)
|
415
|
+
except QueueFull:
|
416
|
+
self._logger.error(
|
417
|
+
"unable to queue log, falling back to stdout or stderr.\n%r",
|
418
|
+
log,
|
419
|
+
)
|
420
|
+
|
421
|
+
def log(
|
422
|
+
self,
|
423
|
+
level: LogLevel,
|
424
|
+
message: str,
|
425
|
+
tags: list[str] | None = None,
|
426
|
+
extra: dict[str, str] | None = None,
|
427
|
+
exception: BaseException | None = None,
|
428
|
+
):
|
429
|
+
self._enqueue_log(
|
430
|
+
level=level,
|
431
|
+
message=message,
|
432
|
+
tags=tags,
|
433
|
+
extra=extra,
|
434
|
+
exception=exception,
|
435
|
+
)
|
436
|
+
|
437
|
+
def debug(
|
438
|
+
self,
|
439
|
+
message: str,
|
440
|
+
tags: list[str] | None = None,
|
441
|
+
extra: dict[str, str] | None = None,
|
442
|
+
exception: BaseException | None = None,
|
443
|
+
):
|
444
|
+
self._enqueue_log(
|
445
|
+
level=LogLevel.DEBUG,
|
446
|
+
message=message,
|
447
|
+
tags=tags,
|
448
|
+
extra=extra,
|
449
|
+
exception=exception,
|
450
|
+
)
|
451
|
+
|
452
|
+
def info(
|
453
|
+
self,
|
454
|
+
message: str,
|
455
|
+
tags: list[str] | None = None,
|
456
|
+
extra: dict[str, str] | None = None,
|
457
|
+
exception: BaseException | None = None,
|
458
|
+
):
|
459
|
+
self._enqueue_log(
|
460
|
+
level=LogLevel.INFO,
|
461
|
+
message=message,
|
462
|
+
tags=tags,
|
463
|
+
extra=extra,
|
464
|
+
exception=exception,
|
465
|
+
)
|
466
|
+
|
467
|
+
def warning(
|
468
|
+
self,
|
469
|
+
message: str,
|
470
|
+
tags: list[str] | None = None,
|
471
|
+
extra: dict[str, str] | None = None,
|
472
|
+
exception: BaseException | None = None,
|
473
|
+
):
|
474
|
+
self._enqueue_log(
|
475
|
+
level=LogLevel.WARNING,
|
476
|
+
message=message,
|
477
|
+
tags=tags,
|
478
|
+
extra=extra,
|
479
|
+
exception=exception,
|
480
|
+
)
|
481
|
+
|
482
|
+
def error(
|
483
|
+
self,
|
484
|
+
message: str,
|
485
|
+
tags: list[str] | None = None,
|
486
|
+
extra: dict[str, str] | None = None,
|
487
|
+
exception: BaseException | None = None,
|
488
|
+
):
|
489
|
+
self._enqueue_log(
|
490
|
+
level=LogLevel.ERROR,
|
491
|
+
message=message,
|
492
|
+
tags=tags,
|
493
|
+
extra=extra,
|
494
|
+
exception=exception,
|
495
|
+
)
|
496
|
+
|
497
|
+
def exception(
|
498
|
+
self,
|
499
|
+
message: str,
|
500
|
+
tags: list[str] | None = None,
|
501
|
+
extra: dict[str, str] | None = None,
|
502
|
+
exception: BaseException | None = None,
|
503
|
+
):
|
504
|
+
if exception is None:
|
505
|
+
_, exception, _ = exc_info()
|
506
|
+
|
507
|
+
self.error(
|
508
|
+
message=message,
|
509
|
+
tags=tags,
|
510
|
+
extra=extra,
|
511
|
+
exception=exception,
|
512
|
+
)
|
513
|
+
|
514
|
+
def set_level(self, level: LogLevel):
|
515
|
+
self._level = level
|
516
|
+
|
517
|
+
def _enqueue_log(
|
518
|
+
self,
|
519
|
+
level: LogLevel,
|
520
|
+
message: str,
|
521
|
+
tags: list[str] | None = None,
|
522
|
+
extra: dict[str, str] | None = None,
|
523
|
+
exception: BaseException | None = None,
|
524
|
+
):
|
525
|
+
if self._closed:
|
526
|
+
self._logger.warning("Logstash http logger is already close")
|
527
|
+
|
528
|
+
return
|
529
|
+
|
530
|
+
if level.value < self._level.value:
|
531
|
+
return
|
532
|
+
|
533
|
+
log = self._construct_log(
|
534
|
+
level=level,
|
535
|
+
message=message,
|
536
|
+
tags=tags,
|
537
|
+
extra=extra,
|
538
|
+
exception=exception,
|
539
|
+
)
|
540
|
+
|
541
|
+
try:
|
542
|
+
self._log_queue.put_nowait(log)
|
543
|
+
except QueueFull:
|
544
|
+
self._put_to_dead_letter_log_queue(log)
|
545
|
+
|
546
|
+
def _construct_log(
|
547
|
+
self,
|
548
|
+
level: LogLevel,
|
549
|
+
message: str,
|
550
|
+
tags: list[str] | None = None,
|
551
|
+
extra: dict[str, str] | None = None,
|
552
|
+
exception: BaseException | None = None,
|
553
|
+
) -> LogstashLogRecord:
|
554
|
+
log = LogstashLogRecord(
|
555
|
+
message=message,
|
556
|
+
service_name=self._service_name,
|
557
|
+
log_level=level,
|
558
|
+
log_logger=self._sender,
|
559
|
+
)
|
560
|
+
|
561
|
+
if tags is not None:
|
562
|
+
log.tags = tags
|
563
|
+
|
564
|
+
if extra is not None and all(
|
565
|
+
isinstance(k, str) and isinstance(v, str)
|
566
|
+
for (k, v) in extra.items()
|
567
|
+
):
|
568
|
+
log.labels = extra
|
569
|
+
|
570
|
+
if exception:
|
571
|
+
log.error = exception
|
572
|
+
|
573
|
+
return log
|
@@ -0,0 +1,61 @@
|
|
1
|
+
from httpx import AsyncClient, Timeout
|
2
|
+
|
3
|
+
from ..logging import LoggerProvider
|
4
|
+
from ._base import BaseLogstashSender, LogstashLogRecord, SenderResponse
|
5
|
+
|
6
|
+
__all__ = ["HTTPSender"]
|
7
|
+
|
8
|
+
|
9
|
+
class HTTPSender(BaseLogstashSender):
|
10
|
+
def __init__(
|
11
|
+
self,
|
12
|
+
url: str,
|
13
|
+
service_name: str,
|
14
|
+
user: str | None = None,
|
15
|
+
password: str | None = None,
|
16
|
+
http_client_timeout: Timeout | float | None = None,
|
17
|
+
max_log_retry: int = 5,
|
18
|
+
log_queue_size: int = 100,
|
19
|
+
failed_log_queue_size: int = 500,
|
20
|
+
):
|
21
|
+
super().__init__(
|
22
|
+
service_name=service_name,
|
23
|
+
max_log_retry=max_log_retry,
|
24
|
+
log_queue_size=log_queue_size,
|
25
|
+
failed_log_queue_size=failed_log_queue_size,
|
26
|
+
)
|
27
|
+
|
28
|
+
self._url = url
|
29
|
+
|
30
|
+
auth = None
|
31
|
+
|
32
|
+
if user is not None and password is not None:
|
33
|
+
auth = (
|
34
|
+
user or "",
|
35
|
+
password or "",
|
36
|
+
)
|
37
|
+
|
38
|
+
self._client = AsyncClient(
|
39
|
+
auth=auth, timeout=http_client_timeout or 5.0
|
40
|
+
)
|
41
|
+
self._logger = LoggerProvider.default().get_logger(
|
42
|
+
"logstash.httpsender"
|
43
|
+
)
|
44
|
+
|
45
|
+
async def _send(self, log: LogstashLogRecord) -> SenderResponse:
|
46
|
+
send_log_response = await self._client.post(
|
47
|
+
url=self._url,
|
48
|
+
json=log.to_dict(),
|
49
|
+
)
|
50
|
+
|
51
|
+
if not send_log_response.is_success:
|
52
|
+
return SenderResponse(
|
53
|
+
sent=False,
|
54
|
+
reason=f"status_code : {send_log_response.status_code}, body : {send_log_response.text}",
|
55
|
+
should_retry=send_log_response.is_server_error,
|
56
|
+
)
|
57
|
+
|
58
|
+
return SenderResponse(sent=True)
|
59
|
+
|
60
|
+
async def _hook_on_stop_async(self):
|
61
|
+
await self._client.aclose()
|