atomicshop 2.14.2__py3-none-any.whl → 2.14.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

Files changed (35) hide show
  1. atomicshop/__init__.py +1 -1
  2. atomicshop/datetimes.py +3 -2
  3. atomicshop/etws/providers.py +18 -2
  4. atomicshop/etws/sessions.py +1 -1
  5. atomicshop/etws/trace.py +5 -22
  6. atomicshop/etws/traces/trace_dns.py +17 -14
  7. atomicshop/etws/traces/trace_sysmon_process_creation.py +34 -22
  8. atomicshop/file_io/csvs.py +1 -1
  9. atomicshop/get_process_list.py +133 -0
  10. atomicshop/mitm/statistic_analyzer.py +376 -3
  11. atomicshop/monitor/change_monitor.py +1 -0
  12. atomicshop/monitor/checks/dns.py +2 -1
  13. atomicshop/process.py +3 -3
  14. atomicshop/process_poller/__init__.py +0 -0
  15. atomicshop/process_poller/pollers/__init__.py +0 -0
  16. atomicshop/process_poller/pollers/psutil_pywin32wmi_dll.py +95 -0
  17. atomicshop/process_poller/process_pool.py +208 -0
  18. atomicshop/process_poller/simple_process_pool.py +112 -0
  19. atomicshop/process_poller/tracer_base.py +45 -0
  20. atomicshop/process_poller/tracers/__init__.py +0 -0
  21. atomicshop/process_poller/tracers/event_log.py +46 -0
  22. atomicshop/process_poller/tracers/sysmon_etw.py +68 -0
  23. atomicshop/wrappers/ctyping/etw_winapi/const.py +130 -20
  24. atomicshop/wrappers/ctyping/etw_winapi/etw_functions.py +141 -5
  25. atomicshop/wrappers/loggingw/reading.py +20 -19
  26. atomicshop/wrappers/pywin32w/win_event_log/subscribe.py +0 -2
  27. atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_create.py +3 -24
  28. atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_terminate.py +103 -0
  29. {atomicshop-2.14.2.dist-info → atomicshop-2.14.4.dist-info}/METADATA +1 -1
  30. {atomicshop-2.14.2.dist-info → atomicshop-2.14.4.dist-info}/RECORD +34 -24
  31. atomicshop/process_poller.py +0 -345
  32. /atomicshop/{process_name_cmd.py → get_process_name_cmd_dll.py} +0 -0
  33. {atomicshop-2.14.2.dist-info → atomicshop-2.14.4.dist-info}/LICENSE.txt +0 -0
  34. {atomicshop-2.14.2.dist-info → atomicshop-2.14.4.dist-info}/WHEEL +0 -0
  35. {atomicshop-2.14.2.dist-info → atomicshop-2.14.4.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,12 @@
1
+ import os
1
2
  import datetime
3
+ import statistics
4
+ import json
5
+ from typing import Literal
2
6
 
3
7
  from .. import filesystem, domains, datetimes, urls
4
8
  from ..basics import dicts
5
- from ..file_io import tomls, xlsxs
9
+ from ..file_io import tomls, xlsxs, csvs, jsons
6
10
  from ..wrappers.loggingw import reading
7
11
  from ..print_api import print_api
8
12
 
@@ -154,10 +158,10 @@ def analyze(main_file_path: str):
154
158
  summary_path: str = filesystem.check_absolute_path___add_full(config['report_file_path'], script_directory)
155
159
 
156
160
  # Get the content from statistics files.
157
- statistics_content: list = reading.get_logs(
161
+ statistics_content: list = reading.get_all_log_files_into_list(
158
162
  config['statistic_files_path'],
159
163
  file_name_pattern='statistics*.csv',
160
- log_type='csv',
164
+ log_type='csv'
161
165
  )
162
166
 
163
167
  # Initialize loop.
@@ -465,3 +469,372 @@ def analyze(main_file_path: str):
465
469
  xlsxs.write_xlsx(combined_sorted_stats, file_path=summary_path)
466
470
 
467
471
  return
472
+
473
+
474
+ # ======================================================================================================================
475
+
476
+
477
+ def calculate_moving_average(
478
+ file_path: str,
479
+ moving_average_window_days,
480
+ top_bottom_deviation_percentage: float,
481
+ print_kwargs: dict = None
482
+ ):
483
+ """
484
+ This function calculates the moving average of the daily statistics.
485
+
486
+ :param file_path: string, the path to the 'statistics.csv' file.
487
+ :param moving_average_window_days: integer, the window size for the moving average.
488
+ :param top_bottom_deviation_percentage: float, the percentage of deviation from the moving average to the top or
489
+ bottom.
490
+ :param print_kwargs: dict, the print_api arguments.
491
+ """
492
+
493
+ date_pattern: str = '%Y_%m_%d'
494
+
495
+ # Get all the file paths and their midnight rotations.
496
+ logs_paths: list = reading.get_logs_paths(
497
+ log_file_path=file_path,
498
+ date_pattern=date_pattern
499
+ )
500
+
501
+ statistics_content: dict = {}
502
+ # Read each file to its day.
503
+ for log_path_dict in logs_paths:
504
+ date_string = log_path_dict['date_string']
505
+ statistics_content[date_string] = {}
506
+
507
+ statistics_content[date_string]['file'] = log_path_dict
508
+
509
+ log_file_content, log_file_header = (
510
+ csvs.read_csv_to_list_of_dicts_by_header(log_path_dict['file_path'], **(print_kwargs or {})))
511
+ statistics_content[date_string]['content'] = log_file_content
512
+ statistics_content[date_string]['header'] = log_file_header
513
+
514
+ statistics_content[date_string]['content_no_errors'] = get_content_without_errors(log_file_content)
515
+
516
+ # Get the data dictionary from the statistics content.
517
+ statistics_content[date_string]['statistics_daily'] = compute_statistics_from_content(
518
+ statistics_content[date_string]['content_no_errors']
519
+ )
520
+
521
+ moving_average_dict: dict = compute_moving_averages_from_average_statistics(
522
+ statistics_content,
523
+ moving_average_window_days
524
+ )
525
+
526
+ # Add the moving average to the statistics content.
527
+ for day, day_dict in statistics_content.items():
528
+ try:
529
+ day_dict['moving_average'] = moving_average_dict[day]
530
+ except KeyError:
531
+ day_dict['moving_average'] = {}
532
+
533
+ # Find deviation from the moving average to the bottom or top by specified percentage.
534
+ deviation_list: list = find_deviation_from_moving_average(
535
+ statistics_content, top_bottom_deviation_percentage)
536
+
537
+ return deviation_list
538
+
539
+
540
+ def get_content_without_errors(content: list) -> list:
541
+ """
542
+ This function gets the 'statistics.csv' file content without errors from the 'content' list.
543
+
544
+ :param content: list, the content list.
545
+ :return: list, the content without errors.
546
+ """
547
+
548
+ traffic_statistics_without_errors: list = []
549
+ for line in content:
550
+ # Skip empty lines, headers and errors.
551
+ if line['host'] == 'host' or line['command'] == '':
552
+ continue
553
+
554
+ traffic_statistics_without_errors.append(line)
555
+
556
+ return traffic_statistics_without_errors
557
+
558
+
559
+ def get_data_dict_from_statistics_content(content: list) -> dict:
560
+ """
561
+ This function gets the data dictionary from the 'statistics.csv' file content.
562
+
563
+ :param content: list, the content list.
564
+ :return: dict, the data dictionary.
565
+ """
566
+
567
+ hosts_requests_responses: dict = {}
568
+ for line in content:
569
+ # If subdomain is not in the dictionary, add it.
570
+ if line['host'] not in hosts_requests_responses:
571
+ hosts_requests_responses[line['host']] = {
572
+ 'request_sizes': [],
573
+ 'response_sizes': []
574
+ }
575
+
576
+ # Append the sizes.
577
+ try:
578
+ hosts_requests_responses[line['host']]['request_sizes'].append(int(line['request_size_bytes']))
579
+ hosts_requests_responses[line['host']]['response_sizes'].append(
580
+ int(line['response_size_bytes']))
581
+ except ValueError:
582
+ print_api(line, color='yellow')
583
+ raise
584
+
585
+ return hosts_requests_responses
586
+
587
+
588
+ def compute_statistics_from_data_dict(data_dict: dict):
589
+ """
590
+ This function computes the statistics from the data dictionary.
591
+
592
+ :param data_dict: dict, the data dictionary.
593
+ :return: dict, the statistics dictionary.
594
+ """
595
+
596
+ for host, host_dict in data_dict.items():
597
+ count = len(host_dict['request_sizes'])
598
+ avg_request_size = statistics.mean(host_dict['request_sizes']) if count > 0 else 0
599
+ median_request_size = statistics.median(host_dict['request_sizes']) if count > 0 else 0
600
+ avg_response_size = statistics.mean(host_dict['response_sizes']) if count > 0 else 0
601
+ median_response_size = statistics.median(host_dict['response_sizes']) if count > 0 else 0
602
+
603
+ data_dict[host]['count'] = count
604
+ data_dict[host]['avg_request_size'] = avg_request_size
605
+ data_dict[host]['median_request_size'] = median_request_size
606
+ data_dict[host]['avg_response_size'] = avg_response_size
607
+ data_dict[host]['median_response_size'] = median_response_size
608
+
609
+
610
+ def compute_statistics_from_content(content: list):
611
+ """
612
+ This function computes the statistics from the 'statistics.csv' file content.
613
+
614
+ :param content: list, the content list.
615
+ :return: dict, the statistics dictionary.
616
+ """
617
+
618
+ hosts_requests_responses: dict = get_data_dict_from_statistics_content(content)
619
+ compute_statistics_from_data_dict(hosts_requests_responses)
620
+
621
+ return hosts_requests_responses
622
+
623
+
624
+ def compute_moving_averages_from_average_statistics(
625
+ average_statistics_dict: dict,
626
+ moving_average_window_days: int
627
+ ):
628
+ """
629
+ This function computes the moving averages from the average statistics dictionary.
630
+
631
+ :param average_statistics_dict: dict, the average statistics dictionary.
632
+ :param moving_average_window_days: integer, the window size for the moving average.
633
+ :return: dict, the moving averages dictionary.
634
+ """
635
+
636
+ moving_average: dict = {}
637
+ for day_index, (day, day_dict) in enumerate(average_statistics_dict.items()):
638
+ current_day = day_index + 1
639
+ if current_day < moving_average_window_days:
640
+ continue
641
+
642
+ # Create list of the previous 'moving_average_window_days' days.
643
+ previous_days_content_list = (
644
+ list(average_statistics_dict.values()))[current_day-moving_average_window_days:current_day]
645
+
646
+ # Compute the moving averages.
647
+ moving_average[day] = compute_average_for_current_day_from_past_x_days(previous_days_content_list)
648
+
649
+ return moving_average
650
+
651
+
652
+ def compute_average_for_current_day_from_past_x_days(previous_days_content_list: list) -> dict:
653
+ """
654
+ This function computes the average for the current day from the past x days.
655
+
656
+ :param previous_days_content_list: list, the list of the previous days content.
657
+ :return: dict, the average dictionary.
658
+ """
659
+
660
+ moving_average: dict = {}
661
+ for entry in previous_days_content_list:
662
+ statistics_daily = entry['statistics_daily']
663
+ for host, host_dict in statistics_daily.items():
664
+ if host not in moving_average:
665
+ moving_average[host] = {
666
+ 'counts': [],
667
+ 'avg_request_sizes': [],
668
+ 'avg_response_sizes': [],
669
+ }
670
+
671
+ moving_average[host]['counts'].append(int(host_dict['count']))
672
+ moving_average[host]['avg_request_sizes'].append(float(host_dict['avg_request_size']))
673
+ moving_average[host]['avg_response_sizes'].append(float(host_dict['avg_response_size']))
674
+
675
+ # Compute the moving average.
676
+ moving_average_results: dict = {}
677
+ for host, host_dict in moving_average.items():
678
+ ma_count = statistics.mean(host_dict['counts'])
679
+ ma_request_size = statistics.mean(host_dict['avg_request_sizes'])
680
+ ma_response_size = statistics.mean(host_dict['avg_response_sizes'])
681
+
682
+ moving_average_results[host] = {
683
+ 'ma_count': ma_count,
684
+ 'ma_request_size': ma_request_size,
685
+ 'ma_response_size': ma_response_size,
686
+ 'counts': host_dict['counts'],
687
+ 'avg_request_sizes': host_dict['avg_request_sizes'],
688
+ 'avg_response_sizes': host_dict['avg_response_sizes']
689
+ }
690
+
691
+ return moving_average_results
692
+
693
+
694
+ def find_deviation_from_moving_average(
695
+ statistics_content: dict,
696
+ top_bottom_deviation_percentage: float
697
+ ) -> list:
698
+ """
699
+ This function finds the deviation from the moving average to the bottom or top by specified percentage.
700
+
701
+ :param statistics_content: dict, the statistics content dictionary.
702
+ :param top_bottom_deviation_percentage: float, the percentage of deviation from the moving average to the top or
703
+ bottom.
704
+ :return: list, the deviation list.
705
+ """
706
+
707
+ def _check_deviation(
708
+ check_type: Literal['count', 'avg_request_size', 'avg_response_size'],
709
+ ma_check_type: Literal['ma_count', 'ma_request_size', 'ma_response_size'],
710
+ day_statistics_content_dict: dict,
711
+ moving_averages_dict: dict
712
+ ):
713
+ """
714
+ This function checks the deviation for the host.
715
+ """
716
+
717
+ nonlocal message
718
+
719
+ host_moving_average_by_type = moving_averages_dict[host][ma_check_type]
720
+ check_type_moving_by_percent = (
721
+ host_moving_average_by_type * top_bottom_deviation_percentage)
722
+ check_type_moving_above = host_moving_average_by_type + check_type_moving_by_percent
723
+ check_type_moving_below = host_moving_average_by_type - check_type_moving_by_percent
724
+
725
+ deviation_type = None
726
+ if day_statistics_content_dict[check_type] > check_type_moving_above:
727
+ deviation_type = 'above'
728
+ elif day_statistics_content_dict[check_type] < check_type_moving_below:
729
+ deviation_type = 'below'
730
+
731
+ if deviation_type:
732
+ message = f'[{check_type}] is [{deviation_type}] the moving average.'
733
+ deviation_list.append({
734
+ 'day': day,
735
+ 'host': host,
736
+ 'message': message,
737
+ 'value': day_statistics_content_dict[check_type],
738
+ 'ma_value': host_moving_average_by_type,
739
+ 'check_type': check_type,
740
+ 'percentage': top_bottom_deviation_percentage,
741
+ 'ma_value_checked': check_type_moving_above,
742
+ 'deviation_type': deviation_type,
743
+ 'data': day_statistics_content_dict,
744
+ 'ma_data': moving_averages_dict[host]
745
+ })
746
+
747
+ deviation_list: list = []
748
+ for day_index, (day, day_dict) in enumerate(statistics_content.items()):
749
+ # If it's the first day, there is no previous day moving average.
750
+ if day_index == 0:
751
+ previous_day_moving_average_dict = {}
752
+ else:
753
+ previous_day_moving_average_dict = list(statistics_content.values())[day_index-1].get('moving_average', {})
754
+
755
+ # If there is no moving average for previous day continue to the next day.
756
+ if not previous_day_moving_average_dict:
757
+ continue
758
+
759
+ for host, host_dict in day_dict['statistics_daily'].items():
760
+ # If the host is not in the moving averages, then this is clear deviation.
761
+ # It means that in the current day, there were no requests for this host.
762
+ if host not in previous_day_moving_average_dict:
763
+ message = f'Host not in the moving averages: {host}'
764
+ deviation_list.append({
765
+ 'day': day,
766
+ 'host': host,
767
+ 'data': host_dict,
768
+ 'message': message,
769
+ 'type': 'clear'
770
+ })
771
+ continue
772
+
773
+ _check_deviation(
774
+ 'count', 'ma_count', host_dict, previous_day_moving_average_dict)
775
+ _check_deviation(
776
+ 'avg_request_size', 'ma_request_size', host_dict, previous_day_moving_average_dict)
777
+ _check_deviation(
778
+ 'avg_response_size', 'ma_response_size', host_dict, previous_day_moving_average_dict)
779
+
780
+ return deviation_list
781
+
782
+
783
+ def moving_average_calculator_main(
784
+ statistics_file_path: str,
785
+ output_directory: str,
786
+ moving_average_window_days: int,
787
+ top_bottom_deviation_percentage: float
788
+ ) -> int:
789
+ """
790
+ This function is the main function for the moving average calculator.
791
+
792
+ :param statistics_file_path: string, the statistics file path.
793
+ :param output_directory: string, the output directory.
794
+ :param moving_average_window_days: integer, the moving average window days.
795
+ :param top_bottom_deviation_percentage: float, the top bottom deviation percentage. Example: 0.1 for 10%.
796
+ :return: integer, the return code.
797
+ -----------------------------
798
+
799
+ Example:
800
+ import sys
801
+ from atomicshop.mitm import statistic_analyzer
802
+
803
+
804
+ def main():
805
+ return statistic_analyzer.moving_average_calculator_main(
806
+ statistics_file_path='statistics.csv',
807
+ output_directory='output',
808
+ moving_average_window_days=7,
809
+ top_bottom_deviation_percentage=0.1
810
+ )
811
+
812
+
813
+ if __name__ == '__main__':
814
+ sys.exit(main())
815
+ """
816
+
817
+ def convert_data_value_to_string(value_key: str, list_index: int) -> None:
818
+ deviation_list[list_index]['data'][value_key] = json.dumps(deviation_list[list_index]['data'][value_key])
819
+
820
+ def convert_value_to_string(value_key: str, list_index: int) -> None:
821
+ if value_key in deviation_list[list_index]:
822
+ deviation_list[list_index][value_key] = json.dumps(deviation_list[list_index][value_key])
823
+
824
+ deviation_list = calculate_moving_average(
825
+ statistics_file_path,
826
+ moving_average_window_days,
827
+ top_bottom_deviation_percentage
828
+ )
829
+
830
+ if deviation_list:
831
+ for deviation_list_index, deviation in enumerate(deviation_list):
832
+ convert_data_value_to_string('request_sizes', deviation_list_index)
833
+ convert_data_value_to_string('response_sizes', deviation_list_index)
834
+ convert_value_to_string('ma_data', deviation_list_index)
835
+
836
+ file_path = output_directory + os.sep + 'deviation.json'
837
+ print_api(f'Deviation Found, saving to file: {file_path}', color='blue')
838
+ jsons.write_json_file(deviation_list, file_path, use_default_indent=True)
839
+
840
+ return 0
@@ -4,6 +4,7 @@ from .checks import dns, network, file, url, process_running
4
4
 
5
5
 
6
6
  DNS__DEFAULT_SETTINGS = {
7
+ 'skip_record_list': [], # List of DNS Records to skip emitting. Example: ['PTR', 'SRV']
7
8
  'learning_mode_create_unique_entries_list': True,
8
9
  'learning_hours': 24, # 0 - the learning will never stop.
9
10
  'alert_about_missing_entries_after_learning': False,
@@ -27,7 +27,8 @@ class DnsCheck:
27
27
  trace_dns.DnsRequestResponseTrace(
28
28
  attrs=['name', 'cmdline', 'domain', 'query_type'],
29
29
  session_name=self.etw_session_name,
30
- close_existing_session_name=True
30
+ close_existing_session_name=True,
31
+ skip_record_list=self.settings['skip_record_list'],
31
32
  )
32
33
  )
33
34
 
atomicshop/process.py CHANGED
@@ -11,7 +11,7 @@ from .basics import strings
11
11
  from .wrappers import ubuntu_terminal
12
12
 
13
13
  if os.name == 'nt':
14
- from . import process_poller
14
+ from . import get_process_list
15
15
 
16
16
 
17
17
  def is_command_exists(cmd: str) -> bool:
@@ -265,8 +265,8 @@ def match_pattern_against_running_processes_cmdlines(
265
265
  """
266
266
 
267
267
  # Get the list of all the currently running processes.
268
- get_process_list = process_poller.GetProcessList(get_method='pywin32', connect_on_init=True)
269
- processes = get_process_list.get_processes(as_dict=False)
268
+ get_process_list_instance = get_process_list.GetProcessList(get_method='pywin32', connect_on_init=True)
269
+ processes = get_process_list_instance.get_processes(as_dict=False)
270
270
 
271
271
  # Iterate through all the current process, while fetching executable file 'name' and the command line.
272
272
  # Name is always populated, while command line is not.
File without changes
File without changes
@@ -0,0 +1,95 @@
1
+ from typing import Union, Literal
2
+ import threading
3
+ import time
4
+
5
+ from ... import get_process_list
6
+
7
+
8
+ class PollerPsutilPywin32Dll:
9
+ """
10
+ The class is responsible for getting the list of opened processes by using mentioned libraries.
11
+ """
12
+ def __init__(
13
+ self,
14
+ interval_seconds: Union[int, float] = 0,
15
+ process_get_method: Literal[
16
+ 'poll_psutil',
17
+ 'poll_pywin32',
18
+ 'poll_process_dll'
19
+ ] = 'poll_process_dll',
20
+ process_queue=None
21
+ ):
22
+ """
23
+ :param interval_seconds: works only for pollers, float, how many seconds to wait between each cycle.
24
+ Default is 0, which means that the polling will be as fast as possible.
25
+
26
+ Basically, you want it to be '0' if you want to get the most recent processes.
27
+ Any polling by itself takes time, so if you want to get the most recent processes, you want to do it as fast
28
+ as possible.
29
+ :param process_get_method: str. Default is 'process_dll'. Available:
30
+ 'poll_psutil': Poller, Get the list of processes by 'psutil' library. Resource intensive and slow.
31
+ 'poll_pywin32': Poller, processes by 'pywin32' library, using WMI. Not resource intensive, but slow.
32
+ 'poll_process_dll'. Poller, Not resource intensive and fast. Probably works only in Windows 10 x64.
33
+ 'trace_sysmon_etw': Tracer, Get the list of processes with running SysMon by ETW - Event Tracing.
34
+ In this case 'interval_seconds' is irrelevant, since the ETW is real-time.
35
+ Steps we take:
36
+ 1. Check if SysMon is Running. If not, check if the executable exists in specified
37
+ location and start it as a service.
38
+ 2. Start the "Microsoft-Windows-Sysmon" ETW session.
39
+ 3. Take a snapshot of current processes and their CMDs with psutil and store it in a dict.
40
+ 4. Each new process creation from ETW updates the dict.
41
+ 'trace_event_log': Get the list of processes by subscribing to the Windows Event Log.
42
+ Log Channel: Security, Event ID: 4688.
43
+ We enable the necessary prerequisites in registry and subscribe to the event.
44
+ :param process_queue: Queue. The queue to put the processes in. If None, the processes will not be put in the
45
+ queue.
46
+ """
47
+
48
+ self.interval_seconds: Union[int, float] = interval_seconds
49
+ self.process_get_method = process_get_method.replace('poll_', '')
50
+ self.process_queue = process_queue
51
+
52
+ # noinspection PyTypeChecker
53
+ self.poller_instance = get_process_list.GetProcessList(get_method=self.process_get_method)
54
+
55
+ self._processes = {}
56
+
57
+ def start(self):
58
+ """
59
+ Start the poller.
60
+ """
61
+
62
+ thread = threading.Thread(target=self.emit_loop)
63
+ thread.daemon = True
64
+ thread.start()
65
+
66
+ def emit_loop(self):
67
+ """
68
+ Get the list of processes.
69
+ """
70
+
71
+ self.poller_instance.connect()
72
+
73
+ while True:
74
+ current_processes = self.poller_instance.get_processes(as_dict=True)
75
+
76
+ # Remove Command lines that contains only numbers, since they are useless.
77
+ for pid, process_info in current_processes.items():
78
+ if process_info['cmdline'].isnumeric():
79
+ current_processes[pid]['cmdline'] = str()
80
+ elif process_info['cmdline'] == 'Error':
81
+ current_processes[pid]['cmdline'] = str()
82
+
83
+ # This loop is essential for keeping the command lines.
84
+ # When the process unloads from memory, the last polling will have only pid and executable name, but not
85
+ # the command line. This loop will keep the command line from the previous polling if this happens.
86
+ for pid, process_info in current_processes.items():
87
+ if pid in self._processes:
88
+ if self._processes[pid]['name'] == current_processes[pid]['name']:
89
+ if current_processes[pid]['cmdline'] == '':
90
+ current_processes[pid]['cmdline'] = self._processes[pid]['cmdline']
91
+ self._processes.update(current_processes)
92
+
93
+ self.process_queue.put(self._processes)
94
+
95
+ time.sleep(self.interval_seconds)