atomicshop 2.19.19__py3-none-any.whl → 2.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

atomicshop/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  """Atomic Basic functions and classes to make developer life easier"""
2
2
 
3
3
  __author__ = "Den Kras"
4
- __version__ = '2.19.19'
4
+ __version__ = '2.20.0'
@@ -18,6 +18,20 @@ def current_thread_id():
18
18
  return thread_id
19
19
 
20
20
 
21
+ def get_current_thread_name():
22
+ return threading.current_thread().name
23
+
24
+
25
+ def set_current_thread_name(name: str):
26
+ threading.current_thread().name = name
27
+
28
+
29
+ def set_current_thread_name_by_process_name():
30
+ import multiprocessing
31
+ current_process_name = multiprocessing.current_process().name
32
+ threading.current_thread().name = current_process_name
33
+
34
+
21
35
  def get_number_of_active_threads():
22
36
  return threading.active_count()
23
37
 
@@ -2,6 +2,7 @@ from datetime import datetime
2
2
  import threading
3
3
  import queue
4
4
  import copy
5
+ import socket
5
6
 
6
7
  from ..wrappers.socketw import receiver, sender, socket_client, base
7
8
  from .. import websocket_parse
@@ -73,12 +74,16 @@ def thread_worker_main(
73
74
 
74
75
  statistics_writer.write_row(
75
76
  thread_id=str(thread_id),
76
- host=client_message.server_name,
77
+ engine=client_message.engine_name,
78
+ source_host=client_message.client_name,
79
+ source_ip=client_message.client_ip,
77
80
  tls_type=tls_type,
78
81
  tls_version=tls_version,
79
82
  protocol=client_message.protocol,
80
83
  protocol2=client_message.protocol2,
81
84
  protocol3=client_message.protocol3,
85
+ dest_port=client_message.destination_port,
86
+ host=client_message.server_name,
82
87
  path=http_path,
83
88
  status_code=http_status_code,
84
89
  command=http_command,
@@ -287,12 +292,14 @@ def thread_worker_main(
287
292
 
288
293
  def client_message_first_start() -> ClientMessage:
289
294
  client_message: ClientMessage = ClientMessage()
295
+ client_message.client_name = client_name
290
296
  client_message.client_ip = client_ip
291
297
  client_message.source_port = source_port
292
298
  client_message.destination_port = destination_port
293
299
  client_message.server_name = server_name
294
300
  client_message.thread_id = thread_id
295
301
  client_message.process_name = process_commandline
302
+ client_message.engine_name = engine_name
296
303
 
297
304
  return client_message
298
305
 
@@ -530,7 +537,9 @@ def thread_worker_main(
530
537
  http_path_queue: queue.Queue = queue.Queue()
531
538
 
532
539
  try:
540
+ engine_name: str = recorder.engine_name
533
541
  client_ip, source_port = client_socket.getpeername()
542
+ client_name = socket.gethostbyaddr(client_ip)[0]
534
543
  destination_port = client_socket.getsockname()[1]
535
544
 
536
545
  network_logger.info(f"Thread Created - Client [{client_ip}:{source_port}] | "
@@ -66,7 +66,7 @@ class ModuleCategory:
66
66
  # logger_name=self.engine_name,
67
67
  # directory_path=logs_path,
68
68
  # add_stream=True,
69
- # add_timedfile=True,
69
+ # add_timedfile_with_internal_queue=True,
70
70
  # formatter_streamhandler='DEFAULT',
71
71
  # formatter_filehandler='DEFAULT',
72
72
  # backupCount=config_static.LogRec.store_logs_for_x_days
@@ -10,6 +10,7 @@ class ClientMessage:
10
10
  def __init__(self):
11
11
  # noinspection PyTypeChecker
12
12
  self.timestamp: datetime = None
13
+ self.engine_name: str = str()
13
14
  # noinspection PyTypeChecker
14
15
  self.request_raw_bytes: bytes = None
15
16
  self.request_auto_parsed: Union[http_parse.HTTPRequestParse, any] = None
@@ -22,6 +23,7 @@ class ClientMessage:
22
23
  self.response_raw_hex: hex = None
23
24
  self.server_name: str = str()
24
25
  self.server_ip: str = str()
26
+ self.client_name: str = str()
25
27
  self.client_ip: str = str()
26
28
  self.source_port: int = int()
27
29
  self.destination_port: int = int()
@@ -2,10 +2,11 @@ import threading
2
2
  import multiprocessing
3
3
  import time
4
4
  import datetime
5
+ import os
5
6
 
6
7
  import atomicshop # Importing atomicshop package to get the version of the package.
7
8
 
8
- from .. import filesystem, queues, dns, on_exit, print_api
9
+ from .. import filesystem, dns, on_exit, print_api
9
10
  from ..permissions import permissions
10
11
  from ..python_functions import get_current_python_version_string, check_python_version_compliance
11
12
  from ..wrappers.socketw import socket_wrapper, dns_server, base
@@ -29,6 +30,12 @@ EXCEPTIONS_CSV_LOGGER_HEADER: str = 'time,exception'
29
30
  # noinspection PyTypeChecker
30
31
  MITM_ERROR_LOGGER: loggingw.ExceptionCsvLogger = None
31
32
 
33
+ # Create request domain queue.
34
+ DOMAIN_QUEUE: multiprocessing.Queue = multiprocessing.Queue()
35
+
36
+ # Create logger's queue.
37
+ NETWORK_LOGGER_QUEUE: multiprocessing.Queue = multiprocessing.Queue()
38
+
32
39
 
33
40
  try:
34
41
  win_console.disable_quick_edit()
@@ -58,9 +65,20 @@ def exit_cleanup():
58
65
  RECS_PROCESS_INSTANCE.terminate()
59
66
  RECS_PROCESS_INSTANCE.join()
60
67
 
68
+ # Before terminating multiprocessing child processes, we need to put None to all the QueueListeners' queues,
69
+ # so they will stop waiting for new logs and will be able to terminate.
70
+ # Or else we will get a BrokenPipeError exception. This happens for because the QueueListener is waiting for
71
+ # new logs to come through the ".get()" method, but the main process is already terminated.
72
+ NETWORK_LOGGER_QUEUE.put(None)
73
+ # Get all the child processes and terminate them.
74
+ for process in multiprocessing.active_children():
75
+ process.terminate()
76
+ # We need for processes to finish, since there is a logger there that needs to write the last log.
77
+ process.join()
78
+
61
79
 
62
80
  def mitm_server(config_file_path: str, script_version: str):
63
- on_exit.register_exit_handler(exit_cleanup, at_exit=False)
81
+ on_exit.register_exit_handler(exit_cleanup, at_exit=False, kill_signal=False)
64
82
 
65
83
  # Main function should return integer with error code, 0 is successful.
66
84
  # Since listening server is infinite, this will not be reached.
@@ -91,15 +109,21 @@ def mitm_server(config_file_path: str, script_version: str):
91
109
  config_static.Certificates.sni_server_certificate_from_server_socket_download_directory)
92
110
 
93
111
  network_logger_name = config_static.MainConfig.LOGGER_NAME
94
- network_logger = loggingw.create_logger(
95
- logger_name=network_logger_name,
96
- directory_path=config_static.LogRec.logs_path,
112
+
113
+ _ = loggingw.create_logger(
114
+ get_queue_listener=True,
115
+ log_queue=NETWORK_LOGGER_QUEUE,
116
+ file_path=f'{config_static.LogRec.logs_path}{os.sep}{network_logger_name}.txt',
97
117
  add_stream=True,
98
118
  add_timedfile=True,
99
119
  formatter_streamhandler='DEFAULT',
100
120
  formatter_filehandler='DEFAULT',
101
- backupCount=config_static.LogRec.store_logs_for_x_days
102
- )
121
+ backupCount=config_static.LogRec.store_logs_for_x_days)
122
+
123
+ network_logger_with_queue_handler = loggingw.create_logger(
124
+ logger_name=network_logger_name,
125
+ add_queue_handler=True,
126
+ log_queue=NETWORK_LOGGER_QUEUE)
103
127
 
104
128
  # Initiate Listener logger, which is a child of network logger, so he uses the same settings and handlers
105
129
  listener_logger = loggingw.get_logger_with_level(f'{network_logger_name}.listener')
@@ -242,44 +266,53 @@ def mitm_server(config_file_path: str, script_version: str):
242
266
 
243
267
  print_api.print_api("Press [Ctrl]+[C] to stop.", color='blue')
244
268
 
245
- # Create request domain queue.
246
- domain_queue = queues.NonBlockQueue()
247
-
248
269
  # === Initialize DNS module ====================================================================================
249
270
  if config_static.DNSServer.enable:
250
- try:
251
- dns_server_instance = dns_server.DnsServer(
252
- listening_interface=config_static.DNSServer.listening_interface,
253
- listening_port=config_static.DNSServer.listening_port,
254
- log_directory_path=config_static.LogRec.logs_path,
255
- backupCount_log_files_x_days=config_static.LogRec.store_logs_for_x_days,
256
- forwarding_dns_service_ipv4=config_static.DNSServer.forwarding_dns_service_ipv4,
257
- tcp_target_server_ipv4=config_static.DNSServer.target_tcp_server_ipv4,
271
+ dns_process = multiprocessing.Process(
272
+ target=dns_server.start_dns_server_multiprocessing_worker,
273
+ kwargs={
274
+ 'listening_interface': config_static.DNSServer.listening_interface,
275
+ 'listening_port' :config_static.DNSServer.listening_port,
276
+ 'log_directory_path': config_static.LogRec.logs_path,
277
+ 'backupCount_log_files_x_days': config_static.LogRec.store_logs_for_x_days,
278
+ 'forwarding_dns_service_ipv4': config_static.DNSServer.forwarding_dns_service_ipv4,
279
+ 'tcp_target_server_ipv4': config_static.DNSServer.target_tcp_server_ipv4,
258
280
  # Passing the engine domain list to DNS server to work with.
259
281
  # 'list' function re-initializes the current list, or else it will be the same instance object.
260
- tcp_resolve_domain_list=list(config_static.Certificates.domains_all_times),
261
- offline_mode=config_static.DNSServer.offline_mode,
262
- resolve_to_tcp_server_only_tcp_resolve_domains=(
282
+ 'tcp_resolve_domain_list': list(config_static.Certificates.domains_all_times),
283
+ 'offline_mode': config_static.DNSServer.offline_mode,
284
+ 'resolve_to_tcp_server_only_tcp_resolve_domains': (
263
285
  config_static.DNSServer.resolve_to_tcp_server_only_engine_domains),
264
- resolve_to_tcp_server_all_domains=config_static.DNSServer.resolve_to_tcp_server_all_domains,
265
- resolve_regular=config_static.DNSServer.resolve_regular,
266
- cache_timeout_minutes=config_static.DNSServer.cache_timeout_minutes,
267
- request_domain_queue=domain_queue,
268
- logger=network_logger
269
- )
270
- except (dns_server.DnsPortInUseError, dns_server.DnsConfigurationValuesError) as e:
271
- print_api.print_api(e, error_type=True, color="red", logger=system_logger)
272
- # Wait for the message to be printed and saved to file.
286
+ 'resolve_to_tcp_server_all_domains': config_static.DNSServer.resolve_to_tcp_server_all_domains,
287
+ 'resolve_regular': config_static.DNSServer.resolve_regular,
288
+ 'cache_timeout_minutes': config_static.DNSServer.cache_timeout_minutes,
289
+ 'request_domain_queue': DOMAIN_QUEUE,
290
+ 'logging_queue': NETWORK_LOGGER_QUEUE,
291
+ 'logger_name': network_logger_name
292
+ },
293
+ name="dns_server")
294
+ dns_process.daemon = True
295
+ dns_process.start()
296
+
297
+ is_alive: bool = False
298
+ max_wait_time: int = 5
299
+ while not is_alive:
300
+ is_alive = dns_process.is_alive()
273
301
  time.sleep(1)
274
- return 1
275
-
276
- dns_thread = threading.Thread(target=dns_server_instance.start, name="dns_server")
277
- dns_thread.daemon = True
278
- dns_thread.start()
302
+ max_wait_time -= 1
303
+ if max_wait_time == 0:
304
+ message = "DNS Server process didn't start."
305
+ print_api.print_api(message, error_type=True, color="red", logger=system_logger)
306
+ # Wait for the message to be printed and saved to file.
307
+ time.sleep(1)
308
+ return 1
279
309
 
280
310
  # === EOF Initialize DNS module ================================================================================
281
311
  # === Initialize TCP Server ====================================================================================
282
312
  if config_static.TCPServer.enable:
313
+ engines_domains: dict = dict()
314
+ for engine in engines_list:
315
+ engines_domains[engine.engine_name] = engine.domain_list
283
316
  try:
284
317
  socket_wrapper_instance = socket_wrapper.SocketWrapper(
285
318
  listening_interface=config_static.TCPServer.listening_interface,
@@ -319,7 +352,8 @@ def mitm_server(config_file_path: str, script_version: str):
319
352
  forwarding_dns_service_ipv4_list___only_for_localhost=(
320
353
  config_static.TCPServer.forwarding_dns_service_ipv4_list___only_for_localhost),
321
354
  skip_extension_id_list=config_static.SkipExtensions.SKIP_EXTENSION_ID_LIST,
322
- request_domain_from_dns_server_queue=domain_queue
355
+ request_domain_from_dns_server_queue=DOMAIN_QUEUE,
356
+ engines_domains=engines_domains
323
357
  )
324
358
  except socket_wrapper.SocketWrapperPortInUseError as e:
325
359
  print_api.print_api(e, error_type=True, color="red", logger=system_logger)
@@ -376,7 +410,7 @@ def mitm_server(config_file_path: str, script_version: str):
376
410
  target=socket_wrapper_instance.loop_for_incoming_sockets,
377
411
  kwargs={
378
412
  'reference_function_name': thread_worker_main,
379
- 'reference_function_args': (network_logger, statistics_writer, engines_list, reference_module,)
413
+ 'reference_function_args': (network_logger_with_queue_handler, statistics_writer, engines_list, reference_module,)
380
414
  },
381
415
  name="accepting_loop"
382
416
  )
atomicshop/on_exit.py CHANGED
@@ -133,6 +133,7 @@ def register_exit_handler(
133
133
  Same goes for all the exceptions.
134
134
  :param console_close: Register the console close handler.
135
135
  :param kill_signal: Register the kill signal handler.
136
+ Same problem as with atexit handler, it will be called right away on [Ctrl+C].
136
137
  :param args: The arguments to pass to the cleanup action.
137
138
  :param kwargs: The keyword arguments to pass to the cleanup action.
138
139
  """
atomicshop/print_api.py CHANGED
@@ -112,7 +112,7 @@ def print_api(
112
112
  elif logger_method == 'critical' and not color:
113
113
  color = 'red'
114
114
 
115
- if color is not None and logger is None:
115
+ if color is not None:
116
116
  message = ansi_escape_codes.get_colors_basic_dict(color) + message + ansi_escape_codes.ColorsBasic.END
117
117
 
118
118
  # If 'online' is set to 'True', we'll output message as oneline.
@@ -126,13 +126,10 @@ def print_api(
126
126
  if logger:
127
127
  # Emit to logger only if 'print_end' is default, since we can't take responsibility for anything else.
128
128
  if print_end == '\n':
129
- if stdcolor and color is not None:
130
- # Use logger to output message.
131
- with loggingw.temporary_change_logger_stream_record_color(logger, color):
132
- getattr(logger, logger_method)(message)
133
- else:
134
- # Use logger to output message.
135
- getattr(logger, logger_method)(message)
129
+ # Use logger to output message.
130
+ getattr(logger, logger_method)(message)
131
+ else:
132
+ raise ValueError("Logger can't output messages with 'print_end' other than '\\n'.")
136
133
  # If logger wasn't passed.
137
134
  else:
138
135
  # Use print to output the message.
@@ -9,6 +9,7 @@ from typing import Literal, Union
9
9
  import threading
10
10
  from datetime import datetime
11
11
  import contextlib
12
+ import multiprocessing
12
13
 
13
14
  from . import loggers, formatters, filters, consts
14
15
  from ... import datetimes, filesystem
@@ -48,17 +49,16 @@ def _process_formatter_attribute(
48
49
  return formatter
49
50
 
50
51
 
51
- def add_stream_handler(
52
- logger: logging.Logger,
52
+ def get_stream_handler_extended(
53
53
  logging_level: str = "DEBUG",
54
54
  formatter: Union[
55
55
  Literal['DEFAULT', 'MESSAGE'],
56
56
  str,
57
57
  None] = None,
58
58
  formatter_use_nanoseconds: bool = False
59
- ):
59
+ ) -> logging.StreamHandler:
60
60
  """
61
- Function to add StreamHandler to logger.
61
+ Function to get StreamHandler with extended configuration.
62
62
  Stream formatter will output messages to the console.
63
63
  """
64
64
 
@@ -76,11 +76,7 @@ def add_stream_handler(
76
76
  formatter=formatter, use_nanoseconds=formatter_use_nanoseconds)
77
77
  set_formatter(stream_handler, logging_formatter)
78
78
 
79
- # Adding the handler to the main logger
80
- loggers.add_handler(logger, stream_handler)
81
-
82
- # Disable propagation from the 'root' logger, so we will not see the messages twice.
83
- loggers.set_propagation(logger)
79
+ return stream_handler
84
80
 
85
81
 
86
82
  # Function to start the interval-based rotation check
@@ -140,9 +136,80 @@ def _wrap_do_rollover(handler, header):
140
136
  handler.doRollover = new_do_rollover
141
137
 
142
138
 
139
+ def get_queue_handler_and_start_queue_listener_for_file_handler(file_handler):
140
+ """
141
+ Function to create QueueHandler and start QueueListener for the FileHandler.
142
+ The QueueListener, which will get the logs from the queue and use the FileHandler to write them to the
143
+ file.
144
+ The QueueHandler will put the logs to the queue.
145
+
146
+ :param file_handler: FileHandler object.
147
+ :return: QueueHandler object.
148
+ """
149
+
150
+ # Create the Queue between threads. "-1" means that there can infinite number of items that can be
151
+ # put in the Queue. if integer is bigger than 0, it means that this will be the maximum
152
+ # number of items.
153
+ queue_object = queue.Queue(-1)
154
+
155
+ # Create QueueListener, which will get the logs from the queue and use the FileHandler to write them to the file.
156
+ start_queue_listener_for_handlers((file_handler,), queue_object)
157
+
158
+ # Get the QueueHandler, which will put the logs to the queue.
159
+ queue_handler = get_queue_handler(queue_object)
160
+
161
+ return queue_handler
162
+
163
+
164
+ # BASE FUNCTIONS =======================================================================================================
165
+
166
+
167
+ def get_stream_handler() -> logging.StreamHandler:
168
+ """
169
+ Function to get a StreamHandler.
170
+ This handler that will output messages to the console.
171
+
172
+ :return: StreamHandler.
173
+ """
174
+
175
+ return logging.StreamHandler()
176
+
177
+
143
178
  # noinspection PyPep8Naming
144
- def add_timedfilehandler_with_queuehandler(
145
- logger: logging.Logger,
179
+ def get_timed_rotating_file_handler(
180
+ log_file_path: str,
181
+ when: str = "midnight",
182
+ interval: int = 1,
183
+ backupCount: int = 0,
184
+ delay: bool = False,
185
+ encoding=None
186
+ ) -> TimedRotatingFileHandler:
187
+ """
188
+ Function to get a TimedRotatingFileHandler.
189
+ This handler will output messages to a file, rotating the log file at certain timed intervals.
190
+
191
+ :param log_file_path: Path to the log file.
192
+ :param when: When to rotate the log file. Possible values:
193
+ "S" - Seconds
194
+ "M" - Minutes
195
+ "H" - Hours
196
+ "D" - Days
197
+ "midnight" - Roll over at midnight
198
+ :param interval: Interval to rotate the log file.
199
+ :param backupCount: int, Number of backup files to keep. Default is 0.
200
+ If backupCount is > 0, when rollover is done, no more than backupCount files are kept, the oldest are deleted.
201
+ If backupCount is == 0, all the backup files will be kept.
202
+ :param delay: bool, If set to True, the log file will be created only if there's something to write.
203
+ :param encoding: Encoding to use for the log file. Same as for the TimeRotatingFileHandler, which uses Default None.
204
+ :return: TimedRotatingFileHandler.
205
+ """
206
+
207
+ return TimedRotatingFileHandler(
208
+ filename=log_file_path, when=when, interval=interval, backupCount=backupCount, delay=delay, encoding=encoding)
209
+
210
+
211
+ # noinspection PyPep8Naming
212
+ def get_timed_rotating_file_handler_extended(
146
213
  file_path: str,
147
214
  file_type: Literal[
148
215
  'txt',
@@ -158,24 +225,47 @@ def add_timedfilehandler_with_queuehandler(
158
225
  rotation_date_format: str = None,
159
226
  rotation_callback_namer_function: callable = None,
160
227
  rotation_use_default_callback_namer_function: bool = True,
228
+ use_internal_queue_listener: bool = False,
161
229
  when: str = 'midnight',
162
230
  interval: int = 1,
163
231
  delay: bool = True,
164
232
  backupCount: int = 0,
165
233
  encoding=None,
166
234
  header: str = None
167
- ):
168
- """
169
- Function to add TimedRotatingFileHandler and QueueHandler to logger.
170
- TimedRotatingFileHandler will output messages to the file through QueueHandler.
171
- This is needed, since TimedRotatingFileHandler is not thread-safe, though official docs say it is.
235
+ ) -> Union[TimedRotatingFileHandler, logging.handlers.QueueHandler]:
236
+ """
237
+ :param file_path: Path to the log file.
238
+ :param file_type: Type of the file. Possible values: 'txt', 'csv', 'json'.
239
+ :param logging_level: Logging level for the handler.
240
+ :param formatter: Formatter for the handler.
241
+ :param formatter_use_nanoseconds: If set to True, the formatter will use nanoseconds.
242
+ :param rotate_at_rollover_time: If set to True, the handler will rotate the log file at the rollover time.
243
+ :param rotation_date_format: Date format string to set to the handler's suffix.
244
+ :param rotation_callback_namer_function: Callback function to change the filename on rotation.
245
+ :param rotation_use_default_callback_namer_function: If set to True, the default callback namer function will be used
246
+ and the filename will be changed on rotation instead of using the default like this:
247
+ 'file.log.2021-12-24' -> 'file_2021-12-24.log'.
248
+ :param use_internal_queue_listener: If set to True, the handler will use internal QueueListener to write logs.
249
+ :param when: When to rotate the log file. Possible values:
250
+ "S" - Seconds
251
+ "M" - Minutes
252
+ "H" - Hours
253
+ "D" - Days
254
+ "midnight" - Roll over at midnight
255
+ :param use_internal_queue_listener: If set to True, the handler will use internal QueueListener to write logs.
256
+ Function to add TimedRotatingFileHandler and QueueHandler to logger.
257
+ TimedRotatingFileHandler will output messages to the file through QueueHandler.
258
+ This is needed, since TimedRotatingFileHandler is not thread-safe, though official docs say it is.
259
+ :param interval: Interval to rotate the log file.
260
+ :param delay: If set to True, the log file will be created only if there's something to write.
261
+ :param backupCount: Number of backup files to keep. Default is 0.
262
+ If backupCount is > 0, when rollover is done, no more than backupCount files are kept, the oldest are deleted.
263
+ If backupCount is == 0, all the backup files will be kept.
264
+ :param encoding: Encoding to use for the log file. Same as for the TimeRotatingFileHandler, which uses Default None.
265
+ :param header: Header to write to the log file.
266
+ :return: TimedRotatingFileHandler or QueueHandler (if, use_internal_queue_listener is set to True).
172
267
  """
173
268
 
174
- # Setting the TimedRotatingFileHandler, without adding it to the logger.
175
- # It will be added to the QueueListener, which will use the TimedRotatingFileHandler to write logs.
176
- # This is needed since there's a bug in TimedRotatingFileHandler, which won't let it be used with
177
- # threads the same way it would be used for multiprocess.
178
-
179
269
  # Creating file handler with log filename. At this stage the log file is created and locked by the handler,
180
270
  # Unless we use "delay=True" to tell the class to write the file only if there's something to write.
181
271
 
@@ -216,95 +306,36 @@ def add_timedfilehandler_with_queuehandler(
216
306
  if rotate_at_rollover_time:
217
307
  _start_interval_rotation(file_handler)
218
308
 
219
- queue_handler = start_queue_listener_for_file_handler_and_get_queue_handler(file_handler)
220
- loggers.set_logging_level(queue_handler, logging_level)
221
-
222
- # Add the QueueHandler to the logger.
223
- loggers.add_handler(logger, queue_handler)
224
-
225
- # Disable propagation from the 'root' logger, so we will not see the messages twice.
226
- loggers.set_propagation(logger)
227
-
228
-
229
- def start_queue_listener_for_file_handler_and_get_queue_handler(file_handler):
230
- """
231
- Function to start QueueListener, which will put the logs from FileHandler to the Queue.
232
- QueueHandler will get the logs from the Queue and put them to the file that was set in the FileHandler.
233
-
234
- :param file_handler: FileHandler object.
235
- :return: QueueHandler object.
236
- """
237
-
238
- # Create the Queue between threads. "-1" means that there can infinite number of items that can be
239
- # put in the Queue. if integer is bigger than 0, it means that this will be the maximum
240
- # number of items.
241
- queue_object = queue.Queue(-1)
242
- # Create QueueListener, which will put the logs from FileHandler to the Queue and put the logs to the queue.
243
- start_queue_listener_for_file_handler(file_handler, queue_object)
244
-
245
- return get_queue_handler(queue_object)
246
-
247
-
248
- # BASE FUNCTIONS =======================================================================================================
249
-
250
-
251
- def get_stream_handler() -> logging.StreamHandler:
252
- """
253
- Function to get a StreamHandler.
254
- This handler that will output messages to the console.
255
-
256
- :return: StreamHandler.
257
- """
258
-
259
- return logging.StreamHandler()
260
-
261
-
262
- # noinspection PyPep8Naming
263
- def get_timed_rotating_file_handler(
264
- log_file_path: str,
265
- when: str = "midnight",
266
- interval: int = 1,
267
- backupCount: int = 0,
268
- delay: bool = False,
269
- encoding=None
270
- ) -> TimedRotatingFileHandler:
271
- """
272
- Function to get a TimedRotatingFileHandler.
273
- This handler will output messages to a file, rotating the log file at certain timed intervals.
274
-
275
- :param log_file_path: Path to the log file.
276
- :param when: When to rotate the log file. Possible values:
277
- "S" - Seconds
278
- "M" - Minutes
279
- "H" - Hours
280
- "D" - Days
281
- "midnight" - Roll over at midnight
282
- :param interval: Interval to rotate the log file.
283
- :param backupCount: int, Number of backup files to keep. Default is 0.
284
- If backupCount is > 0, when rollover is done, no more than backupCount files are kept, the oldest are deleted.
285
- If backupCount is == 0, all the backup files will be kept.
286
- :param delay: bool, If set to True, the log file will be created only if there's something to write.
287
- :param encoding: Encoding to use for the log file. Same as for the TimeRotatingFileHandler, which uses Default None.
288
- :return: TimedRotatingFileHandler.
289
- """
309
+ # Setting the TimedRotatingFileHandler, without adding it to the logger.
310
+ # It will be added to the QueueListener, which will use the TimedRotatingFileHandler to write logs.
311
+ # This is needed since there's a bug in TimedRotatingFileHandler, which won't let it be used with
312
+ # threads the same way it would be used for multiprocess.
290
313
 
291
- return TimedRotatingFileHandler(
292
- filename=log_file_path, when=when, interval=interval, backupCount=backupCount, delay=delay, encoding=encoding)
314
+ # If internal queue listener is set to True, we'll start the QueueListener for the FileHandler.
315
+ if use_internal_queue_listener:
316
+ queue_handler = get_queue_handler_and_start_queue_listener_for_file_handler(file_handler)
317
+ loggers.set_logging_level(queue_handler, logging_level)
318
+ return queue_handler
319
+ else:
320
+ return file_handler
293
321
 
294
322
 
295
- def start_queue_listener_for_file_handler(
296
- file_handler: logging.FileHandler, queue_object) -> logging.handlers.QueueListener:
323
+ def start_queue_listener_for_handlers(
324
+ handlers: tuple[logging.Handler],
325
+ queue_object: Union[queue.Queue, multiprocessing.Queue]
326
+ ) -> logging.handlers.QueueListener:
297
327
  """
298
328
  Function to get a QueueListener for the FileHandler.
299
329
  This handler get the messages from the FileHandler and put them in the Queue.
300
330
 
301
- :param file_handler: FileHandler to get the messages from.
331
+ :param handlers: Tuple of handlers to put in the QueueListener.
332
+ For example, it can be (stream_handler, file_handler).
302
333
  :param queue_object: Queue object to put the messages in.
303
334
  :return: QueueListener.
304
335
  """
305
336
 
306
337
  # Create the QueueListener based on TimedRotatingFileHandler
307
- queue_listener = QueueListener(queue_object, file_handler)
338
+ queue_listener: logging.handlers.QueueListener = QueueListener(queue_object, *handlers)
308
339
  # Start the QueueListener. Each logger will have its own instance of the Queue
309
340
  queue_listener.start()
310
341
 
@@ -324,6 +355,25 @@ def get_queue_handler(queue_object) -> logging.handlers.QueueHandler:
324
355
  return QueueHandler(queue_object)
325
356
 
326
357
 
358
+ def get_queue_handler_extended(
359
+ queue_object: Union[
360
+ queue.Queue,
361
+ multiprocessing.Queue],
362
+ logging_level: str = "DEBUG"):
363
+ """
364
+ Function to get the QueueHandler.
365
+ QueueHandler of the logger will pass the logs to the Queue and the opposite QueueListener will write them
366
+ from the Queue to the file that was set in the FileHandler.
367
+ """
368
+
369
+ # Getting the QueueHandler.
370
+ queue_handler = get_queue_handler(queue_object)
371
+ # Setting log level for the handler, that will use the logger while initiated.
372
+ loggers.set_logging_level(queue_handler, logging_level)
373
+
374
+ return queue_handler
375
+
376
+
327
377
  def set_formatter(handler: logging.Handler, logging_formatter: logging.Formatter):
328
378
  """
329
379
  Function to set the formatter for the handler.